posix_acl.c (24649B)
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (C) 2002,2003 by Andreas Gruenbacher <a.gruenbacher@computer.org> 4 * 5 * Fixes from William Schumacher incorporated on 15 March 2001. 6 * (Reported by Charles Bertsch, <CBertsch@microtest.com>). 7 */ 8 9/* 10 * This file contains generic functions for manipulating 11 * POSIX 1003.1e draft standard 17 ACLs. 12 */ 13 14#include <linux/kernel.h> 15#include <linux/slab.h> 16#include <linux/atomic.h> 17#include <linux/fs.h> 18#include <linux/sched.h> 19#include <linux/cred.h> 20#include <linux/posix_acl.h> 21#include <linux/posix_acl_xattr.h> 22#include <linux/xattr.h> 23#include <linux/export.h> 24#include <linux/user_namespace.h> 25#include <linux/namei.h> 26#include <linux/mnt_idmapping.h> 27 28static struct posix_acl **acl_by_type(struct inode *inode, int type) 29{ 30 switch (type) { 31 case ACL_TYPE_ACCESS: 32 return &inode->i_acl; 33 case ACL_TYPE_DEFAULT: 34 return &inode->i_default_acl; 35 default: 36 BUG(); 37 } 38} 39 40struct posix_acl *get_cached_acl(struct inode *inode, int type) 41{ 42 struct posix_acl **p = acl_by_type(inode, type); 43 struct posix_acl *acl; 44 45 for (;;) { 46 rcu_read_lock(); 47 acl = rcu_dereference(*p); 48 if (!acl || is_uncached_acl(acl) || 49 refcount_inc_not_zero(&acl->a_refcount)) 50 break; 51 rcu_read_unlock(); 52 cpu_relax(); 53 } 54 rcu_read_unlock(); 55 return acl; 56} 57EXPORT_SYMBOL(get_cached_acl); 58 59struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type) 60{ 61 struct posix_acl *acl = rcu_dereference(*acl_by_type(inode, type)); 62 63 if (acl == ACL_DONT_CACHE) { 64 struct posix_acl *ret; 65 66 ret = inode->i_op->get_acl(inode, type, LOOKUP_RCU); 67 if (!IS_ERR(ret)) 68 acl = ret; 69 } 70 71 return acl; 72} 73EXPORT_SYMBOL(get_cached_acl_rcu); 74 75void set_cached_acl(struct inode *inode, int type, struct posix_acl *acl) 76{ 77 struct posix_acl **p = acl_by_type(inode, type); 78 struct posix_acl *old; 79 80 old = xchg(p, posix_acl_dup(acl)); 81 if (!is_uncached_acl(old)) 82 posix_acl_release(old); 83} 84EXPORT_SYMBOL(set_cached_acl); 85 86static void __forget_cached_acl(struct posix_acl **p) 87{ 88 struct posix_acl *old; 89 90 old = xchg(p, ACL_NOT_CACHED); 91 if (!is_uncached_acl(old)) 92 posix_acl_release(old); 93} 94 95void forget_cached_acl(struct inode *inode, int type) 96{ 97 __forget_cached_acl(acl_by_type(inode, type)); 98} 99EXPORT_SYMBOL(forget_cached_acl); 100 101void forget_all_cached_acls(struct inode *inode) 102{ 103 __forget_cached_acl(&inode->i_acl); 104 __forget_cached_acl(&inode->i_default_acl); 105} 106EXPORT_SYMBOL(forget_all_cached_acls); 107 108struct posix_acl *get_acl(struct inode *inode, int type) 109{ 110 void *sentinel; 111 struct posix_acl **p; 112 struct posix_acl *acl; 113 114 /* 115 * The sentinel is used to detect when another operation like 116 * set_cached_acl() or forget_cached_acl() races with get_acl(). 117 * It is guaranteed that is_uncached_acl(sentinel) is true. 118 */ 119 120 acl = get_cached_acl(inode, type); 121 if (!is_uncached_acl(acl)) 122 return acl; 123 124 if (!IS_POSIXACL(inode)) 125 return NULL; 126 127 sentinel = uncached_acl_sentinel(current); 128 p = acl_by_type(inode, type); 129 130 /* 131 * If the ACL isn't being read yet, set our sentinel. Otherwise, the 132 * current value of the ACL will not be ACL_NOT_CACHED and so our own 133 * sentinel will not be set; another task will update the cache. We 134 * could wait for that other task to complete its job, but it's easier 135 * to just call ->get_acl to fetch the ACL ourself. (This is going to 136 * be an unlikely race.) 137 */ 138 cmpxchg(p, ACL_NOT_CACHED, sentinel); 139 140 /* 141 * Normally, the ACL returned by ->get_acl will be cached. 142 * A filesystem can prevent that by calling 143 * forget_cached_acl(inode, type) in ->get_acl. 144 * 145 * If the filesystem doesn't have a get_acl() function at all, we'll 146 * just create the negative cache entry. 147 */ 148 if (!inode->i_op->get_acl) { 149 set_cached_acl(inode, type, NULL); 150 return NULL; 151 } 152 acl = inode->i_op->get_acl(inode, type, false); 153 154 if (IS_ERR(acl)) { 155 /* 156 * Remove our sentinel so that we don't block future attempts 157 * to cache the ACL. 158 */ 159 cmpxchg(p, sentinel, ACL_NOT_CACHED); 160 return acl; 161 } 162 163 /* 164 * Cache the result, but only if our sentinel is still in place. 165 */ 166 posix_acl_dup(acl); 167 if (unlikely(cmpxchg(p, sentinel, acl) != sentinel)) 168 posix_acl_release(acl); 169 return acl; 170} 171EXPORT_SYMBOL(get_acl); 172 173/* 174 * Init a fresh posix_acl 175 */ 176void 177posix_acl_init(struct posix_acl *acl, int count) 178{ 179 refcount_set(&acl->a_refcount, 1); 180 acl->a_count = count; 181} 182EXPORT_SYMBOL(posix_acl_init); 183 184/* 185 * Allocate a new ACL with the specified number of entries. 186 */ 187struct posix_acl * 188posix_acl_alloc(int count, gfp_t flags) 189{ 190 const size_t size = sizeof(struct posix_acl) + 191 count * sizeof(struct posix_acl_entry); 192 struct posix_acl *acl = kmalloc(size, flags); 193 if (acl) 194 posix_acl_init(acl, count); 195 return acl; 196} 197EXPORT_SYMBOL(posix_acl_alloc); 198 199/* 200 * Clone an ACL. 201 */ 202static struct posix_acl * 203posix_acl_clone(const struct posix_acl *acl, gfp_t flags) 204{ 205 struct posix_acl *clone = NULL; 206 207 if (acl) { 208 int size = sizeof(struct posix_acl) + acl->a_count * 209 sizeof(struct posix_acl_entry); 210 clone = kmemdup(acl, size, flags); 211 if (clone) 212 refcount_set(&clone->a_refcount, 1); 213 } 214 return clone; 215} 216 217/* 218 * Check if an acl is valid. Returns 0 if it is, or -E... otherwise. 219 */ 220int 221posix_acl_valid(struct user_namespace *user_ns, const struct posix_acl *acl) 222{ 223 const struct posix_acl_entry *pa, *pe; 224 int state = ACL_USER_OBJ; 225 int needs_mask = 0; 226 227 FOREACH_ACL_ENTRY(pa, acl, pe) { 228 if (pa->e_perm & ~(ACL_READ|ACL_WRITE|ACL_EXECUTE)) 229 return -EINVAL; 230 switch (pa->e_tag) { 231 case ACL_USER_OBJ: 232 if (state == ACL_USER_OBJ) { 233 state = ACL_USER; 234 break; 235 } 236 return -EINVAL; 237 238 case ACL_USER: 239 if (state != ACL_USER) 240 return -EINVAL; 241 if (!kuid_has_mapping(user_ns, pa->e_uid)) 242 return -EINVAL; 243 needs_mask = 1; 244 break; 245 246 case ACL_GROUP_OBJ: 247 if (state == ACL_USER) { 248 state = ACL_GROUP; 249 break; 250 } 251 return -EINVAL; 252 253 case ACL_GROUP: 254 if (state != ACL_GROUP) 255 return -EINVAL; 256 if (!kgid_has_mapping(user_ns, pa->e_gid)) 257 return -EINVAL; 258 needs_mask = 1; 259 break; 260 261 case ACL_MASK: 262 if (state != ACL_GROUP) 263 return -EINVAL; 264 state = ACL_OTHER; 265 break; 266 267 case ACL_OTHER: 268 if (state == ACL_OTHER || 269 (state == ACL_GROUP && !needs_mask)) { 270 state = 0; 271 break; 272 } 273 return -EINVAL; 274 275 default: 276 return -EINVAL; 277 } 278 } 279 if (state == 0) 280 return 0; 281 return -EINVAL; 282} 283EXPORT_SYMBOL(posix_acl_valid); 284 285/* 286 * Returns 0 if the acl can be exactly represented in the traditional 287 * file mode permission bits, or else 1. Returns -E... on error. 288 */ 289int 290posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p) 291{ 292 const struct posix_acl_entry *pa, *pe; 293 umode_t mode = 0; 294 int not_equiv = 0; 295 296 /* 297 * A null ACL can always be presented as mode bits. 298 */ 299 if (!acl) 300 return 0; 301 302 FOREACH_ACL_ENTRY(pa, acl, pe) { 303 switch (pa->e_tag) { 304 case ACL_USER_OBJ: 305 mode |= (pa->e_perm & S_IRWXO) << 6; 306 break; 307 case ACL_GROUP_OBJ: 308 mode |= (pa->e_perm & S_IRWXO) << 3; 309 break; 310 case ACL_OTHER: 311 mode |= pa->e_perm & S_IRWXO; 312 break; 313 case ACL_MASK: 314 mode = (mode & ~S_IRWXG) | 315 ((pa->e_perm & S_IRWXO) << 3); 316 not_equiv = 1; 317 break; 318 case ACL_USER: 319 case ACL_GROUP: 320 not_equiv = 1; 321 break; 322 default: 323 return -EINVAL; 324 } 325 } 326 if (mode_p) 327 *mode_p = (*mode_p & ~S_IRWXUGO) | mode; 328 return not_equiv; 329} 330EXPORT_SYMBOL(posix_acl_equiv_mode); 331 332/* 333 * Create an ACL representing the file mode permission bits of an inode. 334 */ 335struct posix_acl * 336posix_acl_from_mode(umode_t mode, gfp_t flags) 337{ 338 struct posix_acl *acl = posix_acl_alloc(3, flags); 339 if (!acl) 340 return ERR_PTR(-ENOMEM); 341 342 acl->a_entries[0].e_tag = ACL_USER_OBJ; 343 acl->a_entries[0].e_perm = (mode & S_IRWXU) >> 6; 344 345 acl->a_entries[1].e_tag = ACL_GROUP_OBJ; 346 acl->a_entries[1].e_perm = (mode & S_IRWXG) >> 3; 347 348 acl->a_entries[2].e_tag = ACL_OTHER; 349 acl->a_entries[2].e_perm = (mode & S_IRWXO); 350 return acl; 351} 352EXPORT_SYMBOL(posix_acl_from_mode); 353 354/* 355 * Return 0 if current is granted want access to the inode 356 * by the acl. Returns -E... otherwise. 357 */ 358int 359posix_acl_permission(struct user_namespace *mnt_userns, struct inode *inode, 360 const struct posix_acl *acl, int want) 361{ 362 const struct posix_acl_entry *pa, *pe, *mask_obj; 363 int found = 0; 364 kuid_t uid; 365 kgid_t gid; 366 367 want &= MAY_READ | MAY_WRITE | MAY_EXEC; 368 369 FOREACH_ACL_ENTRY(pa, acl, pe) { 370 switch(pa->e_tag) { 371 case ACL_USER_OBJ: 372 /* (May have been checked already) */ 373 uid = i_uid_into_mnt(mnt_userns, inode); 374 if (uid_eq(uid, current_fsuid())) 375 goto check_perm; 376 break; 377 case ACL_USER: 378 uid = mapped_kuid_fs(mnt_userns, 379 i_user_ns(inode), 380 pa->e_uid); 381 if (uid_eq(uid, current_fsuid())) 382 goto mask; 383 break; 384 case ACL_GROUP_OBJ: 385 gid = i_gid_into_mnt(mnt_userns, inode); 386 if (in_group_p(gid)) { 387 found = 1; 388 if ((pa->e_perm & want) == want) 389 goto mask; 390 } 391 break; 392 case ACL_GROUP: 393 gid = mapped_kgid_fs(mnt_userns, 394 i_user_ns(inode), 395 pa->e_gid); 396 if (in_group_p(gid)) { 397 found = 1; 398 if ((pa->e_perm & want) == want) 399 goto mask; 400 } 401 break; 402 case ACL_MASK: 403 break; 404 case ACL_OTHER: 405 if (found) 406 return -EACCES; 407 else 408 goto check_perm; 409 default: 410 return -EIO; 411 } 412 } 413 return -EIO; 414 415mask: 416 for (mask_obj = pa+1; mask_obj != pe; mask_obj++) { 417 if (mask_obj->e_tag == ACL_MASK) { 418 if ((pa->e_perm & mask_obj->e_perm & want) == want) 419 return 0; 420 return -EACCES; 421 } 422 } 423 424check_perm: 425 if ((pa->e_perm & want) == want) 426 return 0; 427 return -EACCES; 428} 429 430/* 431 * Modify acl when creating a new inode. The caller must ensure the acl is 432 * only referenced once. 433 * 434 * mode_p initially must contain the mode parameter to the open() / creat() 435 * system calls. All permissions that are not granted by the acl are removed. 436 * The permissions in the acl are changed to reflect the mode_p parameter. 437 */ 438static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p) 439{ 440 struct posix_acl_entry *pa, *pe; 441 struct posix_acl_entry *group_obj = NULL, *mask_obj = NULL; 442 umode_t mode = *mode_p; 443 int not_equiv = 0; 444 445 /* assert(atomic_read(acl->a_refcount) == 1); */ 446 447 FOREACH_ACL_ENTRY(pa, acl, pe) { 448 switch(pa->e_tag) { 449 case ACL_USER_OBJ: 450 pa->e_perm &= (mode >> 6) | ~S_IRWXO; 451 mode &= (pa->e_perm << 6) | ~S_IRWXU; 452 break; 453 454 case ACL_USER: 455 case ACL_GROUP: 456 not_equiv = 1; 457 break; 458 459 case ACL_GROUP_OBJ: 460 group_obj = pa; 461 break; 462 463 case ACL_OTHER: 464 pa->e_perm &= mode | ~S_IRWXO; 465 mode &= pa->e_perm | ~S_IRWXO; 466 break; 467 468 case ACL_MASK: 469 mask_obj = pa; 470 not_equiv = 1; 471 break; 472 473 default: 474 return -EIO; 475 } 476 } 477 478 if (mask_obj) { 479 mask_obj->e_perm &= (mode >> 3) | ~S_IRWXO; 480 mode &= (mask_obj->e_perm << 3) | ~S_IRWXG; 481 } else { 482 if (!group_obj) 483 return -EIO; 484 group_obj->e_perm &= (mode >> 3) | ~S_IRWXO; 485 mode &= (group_obj->e_perm << 3) | ~S_IRWXG; 486 } 487 488 *mode_p = (*mode_p & ~S_IRWXUGO) | mode; 489 return not_equiv; 490} 491 492/* 493 * Modify the ACL for the chmod syscall. 494 */ 495static int __posix_acl_chmod_masq(struct posix_acl *acl, umode_t mode) 496{ 497 struct posix_acl_entry *group_obj = NULL, *mask_obj = NULL; 498 struct posix_acl_entry *pa, *pe; 499 500 /* assert(atomic_read(acl->a_refcount) == 1); */ 501 502 FOREACH_ACL_ENTRY(pa, acl, pe) { 503 switch(pa->e_tag) { 504 case ACL_USER_OBJ: 505 pa->e_perm = (mode & S_IRWXU) >> 6; 506 break; 507 508 case ACL_USER: 509 case ACL_GROUP: 510 break; 511 512 case ACL_GROUP_OBJ: 513 group_obj = pa; 514 break; 515 516 case ACL_MASK: 517 mask_obj = pa; 518 break; 519 520 case ACL_OTHER: 521 pa->e_perm = (mode & S_IRWXO); 522 break; 523 524 default: 525 return -EIO; 526 } 527 } 528 529 if (mask_obj) { 530 mask_obj->e_perm = (mode & S_IRWXG) >> 3; 531 } else { 532 if (!group_obj) 533 return -EIO; 534 group_obj->e_perm = (mode & S_IRWXG) >> 3; 535 } 536 537 return 0; 538} 539 540int 541__posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p) 542{ 543 struct posix_acl *clone = posix_acl_clone(*acl, gfp); 544 int err = -ENOMEM; 545 if (clone) { 546 err = posix_acl_create_masq(clone, mode_p); 547 if (err < 0) { 548 posix_acl_release(clone); 549 clone = NULL; 550 } 551 } 552 posix_acl_release(*acl); 553 *acl = clone; 554 return err; 555} 556EXPORT_SYMBOL(__posix_acl_create); 557 558int 559__posix_acl_chmod(struct posix_acl **acl, gfp_t gfp, umode_t mode) 560{ 561 struct posix_acl *clone = posix_acl_clone(*acl, gfp); 562 int err = -ENOMEM; 563 if (clone) { 564 err = __posix_acl_chmod_masq(clone, mode); 565 if (err) { 566 posix_acl_release(clone); 567 clone = NULL; 568 } 569 } 570 posix_acl_release(*acl); 571 *acl = clone; 572 return err; 573} 574EXPORT_SYMBOL(__posix_acl_chmod); 575 576/** 577 * posix_acl_chmod - chmod a posix acl 578 * 579 * @mnt_userns: user namespace of the mount @inode was found from 580 * @inode: inode to check permissions on 581 * @mode: the new mode of @inode 582 * 583 * If the inode has been found through an idmapped mount the user namespace of 584 * the vfsmount must be passed through @mnt_userns. This function will then 585 * take care to map the inode according to @mnt_userns before checking 586 * permissions. On non-idmapped mounts or if permission checking is to be 587 * performed on the raw inode simply passs init_user_ns. 588 */ 589int 590 posix_acl_chmod(struct user_namespace *mnt_userns, struct inode *inode, 591 umode_t mode) 592{ 593 struct posix_acl *acl; 594 int ret = 0; 595 596 if (!IS_POSIXACL(inode)) 597 return 0; 598 if (!inode->i_op->set_acl) 599 return -EOPNOTSUPP; 600 601 acl = get_acl(inode, ACL_TYPE_ACCESS); 602 if (IS_ERR_OR_NULL(acl)) { 603 if (acl == ERR_PTR(-EOPNOTSUPP)) 604 return 0; 605 return PTR_ERR(acl); 606 } 607 608 ret = __posix_acl_chmod(&acl, GFP_KERNEL, mode); 609 if (ret) 610 return ret; 611 ret = inode->i_op->set_acl(mnt_userns, inode, acl, ACL_TYPE_ACCESS); 612 posix_acl_release(acl); 613 return ret; 614} 615EXPORT_SYMBOL(posix_acl_chmod); 616 617int 618posix_acl_create(struct inode *dir, umode_t *mode, 619 struct posix_acl **default_acl, struct posix_acl **acl) 620{ 621 struct posix_acl *p; 622 struct posix_acl *clone; 623 int ret; 624 625 *acl = NULL; 626 *default_acl = NULL; 627 628 if (S_ISLNK(*mode) || !IS_POSIXACL(dir)) 629 return 0; 630 631 p = get_acl(dir, ACL_TYPE_DEFAULT); 632 if (!p || p == ERR_PTR(-EOPNOTSUPP)) { 633 *mode &= ~current_umask(); 634 return 0; 635 } 636 if (IS_ERR(p)) 637 return PTR_ERR(p); 638 639 ret = -ENOMEM; 640 clone = posix_acl_clone(p, GFP_NOFS); 641 if (!clone) 642 goto err_release; 643 644 ret = posix_acl_create_masq(clone, mode); 645 if (ret < 0) 646 goto err_release_clone; 647 648 if (ret == 0) 649 posix_acl_release(clone); 650 else 651 *acl = clone; 652 653 if (!S_ISDIR(*mode)) 654 posix_acl_release(p); 655 else 656 *default_acl = p; 657 658 return 0; 659 660err_release_clone: 661 posix_acl_release(clone); 662err_release: 663 posix_acl_release(p); 664 return ret; 665} 666EXPORT_SYMBOL_GPL(posix_acl_create); 667 668/** 669 * posix_acl_update_mode - update mode in set_acl 670 * @mnt_userns: user namespace of the mount @inode was found from 671 * @inode: target inode 672 * @mode_p: mode (pointer) for update 673 * @acl: acl pointer 674 * 675 * Update the file mode when setting an ACL: compute the new file permission 676 * bits based on the ACL. In addition, if the ACL is equivalent to the new 677 * file mode, set *@acl to NULL to indicate that no ACL should be set. 678 * 679 * As with chmod, clear the setgid bit if the caller is not in the owning group 680 * or capable of CAP_FSETID (see inode_change_ok). 681 * 682 * If the inode has been found through an idmapped mount the user namespace of 683 * the vfsmount must be passed through @mnt_userns. This function will then 684 * take care to map the inode according to @mnt_userns before checking 685 * permissions. On non-idmapped mounts or if permission checking is to be 686 * performed on the raw inode simply passs init_user_ns. 687 * 688 * Called from set_acl inode operations. 689 */ 690int posix_acl_update_mode(struct user_namespace *mnt_userns, 691 struct inode *inode, umode_t *mode_p, 692 struct posix_acl **acl) 693{ 694 umode_t mode = inode->i_mode; 695 int error; 696 697 error = posix_acl_equiv_mode(*acl, &mode); 698 if (error < 0) 699 return error; 700 if (error == 0) 701 *acl = NULL; 702 if (!in_group_p(i_gid_into_mnt(mnt_userns, inode)) && 703 !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID)) 704 mode &= ~S_ISGID; 705 *mode_p = mode; 706 return 0; 707} 708EXPORT_SYMBOL(posix_acl_update_mode); 709 710/* 711 * Fix up the uids and gids in posix acl extended attributes in place. 712 */ 713static void posix_acl_fix_xattr_userns( 714 struct user_namespace *to, struct user_namespace *from, 715 struct user_namespace *mnt_userns, 716 void *value, size_t size, bool from_user) 717{ 718 struct posix_acl_xattr_header *header = value; 719 struct posix_acl_xattr_entry *entry = (void *)(header + 1), *end; 720 int count; 721 kuid_t uid; 722 kgid_t gid; 723 724 if (!value) 725 return; 726 if (size < sizeof(struct posix_acl_xattr_header)) 727 return; 728 if (header->a_version != cpu_to_le32(POSIX_ACL_XATTR_VERSION)) 729 return; 730 731 count = posix_acl_xattr_count(size); 732 if (count < 0) 733 return; 734 if (count == 0) 735 return; 736 737 for (end = entry + count; entry != end; entry++) { 738 switch(le16_to_cpu(entry->e_tag)) { 739 case ACL_USER: 740 uid = make_kuid(from, le32_to_cpu(entry->e_id)); 741 if (from_user) 742 uid = mapped_kuid_user(mnt_userns, &init_user_ns, uid); 743 else 744 uid = mapped_kuid_fs(mnt_userns, &init_user_ns, uid); 745 entry->e_id = cpu_to_le32(from_kuid(to, uid)); 746 break; 747 case ACL_GROUP: 748 gid = make_kgid(from, le32_to_cpu(entry->e_id)); 749 if (from_user) 750 gid = mapped_kgid_user(mnt_userns, &init_user_ns, gid); 751 else 752 gid = mapped_kgid_fs(mnt_userns, &init_user_ns, gid); 753 entry->e_id = cpu_to_le32(from_kgid(to, gid)); 754 break; 755 default: 756 break; 757 } 758 } 759} 760 761void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns, 762 struct inode *inode, 763 void *value, size_t size) 764{ 765 struct user_namespace *user_ns = current_user_ns(); 766 767 /* Leave ids untouched on non-idmapped mounts. */ 768 if (no_idmapping(mnt_userns, i_user_ns(inode))) 769 mnt_userns = &init_user_ns; 770 if ((user_ns == &init_user_ns) && (mnt_userns == &init_user_ns)) 771 return; 772 posix_acl_fix_xattr_userns(&init_user_ns, user_ns, mnt_userns, value, 773 size, true); 774} 775 776void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns, 777 struct inode *inode, 778 void *value, size_t size) 779{ 780 struct user_namespace *user_ns = current_user_ns(); 781 782 /* Leave ids untouched on non-idmapped mounts. */ 783 if (no_idmapping(mnt_userns, i_user_ns(inode))) 784 mnt_userns = &init_user_ns; 785 if ((user_ns == &init_user_ns) && (mnt_userns == &init_user_ns)) 786 return; 787 posix_acl_fix_xattr_userns(user_ns, &init_user_ns, mnt_userns, value, 788 size, false); 789} 790 791/* 792 * Convert from extended attribute to in-memory representation. 793 */ 794struct posix_acl * 795posix_acl_from_xattr(struct user_namespace *user_ns, 796 const void *value, size_t size) 797{ 798 const struct posix_acl_xattr_header *header = value; 799 const struct posix_acl_xattr_entry *entry = (const void *)(header + 1), *end; 800 int count; 801 struct posix_acl *acl; 802 struct posix_acl_entry *acl_e; 803 804 if (!value) 805 return NULL; 806 if (size < sizeof(struct posix_acl_xattr_header)) 807 return ERR_PTR(-EINVAL); 808 if (header->a_version != cpu_to_le32(POSIX_ACL_XATTR_VERSION)) 809 return ERR_PTR(-EOPNOTSUPP); 810 811 count = posix_acl_xattr_count(size); 812 if (count < 0) 813 return ERR_PTR(-EINVAL); 814 if (count == 0) 815 return NULL; 816 817 acl = posix_acl_alloc(count, GFP_NOFS); 818 if (!acl) 819 return ERR_PTR(-ENOMEM); 820 acl_e = acl->a_entries; 821 822 for (end = entry + count; entry != end; acl_e++, entry++) { 823 acl_e->e_tag = le16_to_cpu(entry->e_tag); 824 acl_e->e_perm = le16_to_cpu(entry->e_perm); 825 826 switch(acl_e->e_tag) { 827 case ACL_USER_OBJ: 828 case ACL_GROUP_OBJ: 829 case ACL_MASK: 830 case ACL_OTHER: 831 break; 832 833 case ACL_USER: 834 acl_e->e_uid = 835 make_kuid(user_ns, 836 le32_to_cpu(entry->e_id)); 837 if (!uid_valid(acl_e->e_uid)) 838 goto fail; 839 break; 840 case ACL_GROUP: 841 acl_e->e_gid = 842 make_kgid(user_ns, 843 le32_to_cpu(entry->e_id)); 844 if (!gid_valid(acl_e->e_gid)) 845 goto fail; 846 break; 847 848 default: 849 goto fail; 850 } 851 } 852 return acl; 853 854fail: 855 posix_acl_release(acl); 856 return ERR_PTR(-EINVAL); 857} 858EXPORT_SYMBOL (posix_acl_from_xattr); 859 860/* 861 * Convert from in-memory to extended attribute representation. 862 */ 863int 864posix_acl_to_xattr(struct user_namespace *user_ns, const struct posix_acl *acl, 865 void *buffer, size_t size) 866{ 867 struct posix_acl_xattr_header *ext_acl = buffer; 868 struct posix_acl_xattr_entry *ext_entry; 869 int real_size, n; 870 871 real_size = posix_acl_xattr_size(acl->a_count); 872 if (!buffer) 873 return real_size; 874 if (real_size > size) 875 return -ERANGE; 876 877 ext_entry = (void *)(ext_acl + 1); 878 ext_acl->a_version = cpu_to_le32(POSIX_ACL_XATTR_VERSION); 879 880 for (n=0; n < acl->a_count; n++, ext_entry++) { 881 const struct posix_acl_entry *acl_e = &acl->a_entries[n]; 882 ext_entry->e_tag = cpu_to_le16(acl_e->e_tag); 883 ext_entry->e_perm = cpu_to_le16(acl_e->e_perm); 884 switch(acl_e->e_tag) { 885 case ACL_USER: 886 ext_entry->e_id = 887 cpu_to_le32(from_kuid(user_ns, acl_e->e_uid)); 888 break; 889 case ACL_GROUP: 890 ext_entry->e_id = 891 cpu_to_le32(from_kgid(user_ns, acl_e->e_gid)); 892 break; 893 default: 894 ext_entry->e_id = cpu_to_le32(ACL_UNDEFINED_ID); 895 break; 896 } 897 } 898 return real_size; 899} 900EXPORT_SYMBOL (posix_acl_to_xattr); 901 902static int 903posix_acl_xattr_get(const struct xattr_handler *handler, 904 struct dentry *unused, struct inode *inode, 905 const char *name, void *value, size_t size) 906{ 907 struct posix_acl *acl; 908 int error; 909 910 if (!IS_POSIXACL(inode)) 911 return -EOPNOTSUPP; 912 if (S_ISLNK(inode->i_mode)) 913 return -EOPNOTSUPP; 914 915 acl = get_acl(inode, handler->flags); 916 if (IS_ERR(acl)) 917 return PTR_ERR(acl); 918 if (acl == NULL) 919 return -ENODATA; 920 921 error = posix_acl_to_xattr(&init_user_ns, acl, value, size); 922 posix_acl_release(acl); 923 924 return error; 925} 926 927int 928set_posix_acl(struct user_namespace *mnt_userns, struct inode *inode, 929 int type, struct posix_acl *acl) 930{ 931 if (!IS_POSIXACL(inode)) 932 return -EOPNOTSUPP; 933 if (!inode->i_op->set_acl) 934 return -EOPNOTSUPP; 935 936 if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode)) 937 return acl ? -EACCES : 0; 938 if (!inode_owner_or_capable(mnt_userns, inode)) 939 return -EPERM; 940 941 if (acl) { 942 int ret = posix_acl_valid(inode->i_sb->s_user_ns, acl); 943 if (ret) 944 return ret; 945 } 946 return inode->i_op->set_acl(mnt_userns, inode, acl, type); 947} 948EXPORT_SYMBOL(set_posix_acl); 949 950static int 951posix_acl_xattr_set(const struct xattr_handler *handler, 952 struct user_namespace *mnt_userns, 953 struct dentry *unused, struct inode *inode, 954 const char *name, const void *value, size_t size, 955 int flags) 956{ 957 struct posix_acl *acl = NULL; 958 int ret; 959 960 if (value) { 961 acl = posix_acl_from_xattr(&init_user_ns, value, size); 962 if (IS_ERR(acl)) 963 return PTR_ERR(acl); 964 } 965 ret = set_posix_acl(mnt_userns, inode, handler->flags, acl); 966 posix_acl_release(acl); 967 return ret; 968} 969 970static bool 971posix_acl_xattr_list(struct dentry *dentry) 972{ 973 return IS_POSIXACL(d_backing_inode(dentry)); 974} 975 976const struct xattr_handler posix_acl_access_xattr_handler = { 977 .name = XATTR_NAME_POSIX_ACL_ACCESS, 978 .flags = ACL_TYPE_ACCESS, 979 .list = posix_acl_xattr_list, 980 .get = posix_acl_xattr_get, 981 .set = posix_acl_xattr_set, 982}; 983EXPORT_SYMBOL_GPL(posix_acl_access_xattr_handler); 984 985const struct xattr_handler posix_acl_default_xattr_handler = { 986 .name = XATTR_NAME_POSIX_ACL_DEFAULT, 987 .flags = ACL_TYPE_DEFAULT, 988 .list = posix_acl_xattr_list, 989 .get = posix_acl_xattr_get, 990 .set = posix_acl_xattr_set, 991}; 992EXPORT_SYMBOL_GPL(posix_acl_default_xattr_handler); 993 994int simple_set_acl(struct user_namespace *mnt_userns, struct inode *inode, 995 struct posix_acl *acl, int type) 996{ 997 int error; 998 999 if (type == ACL_TYPE_ACCESS) { 1000 error = posix_acl_update_mode(mnt_userns, inode, 1001 &inode->i_mode, &acl); 1002 if (error) 1003 return error; 1004 } 1005 1006 inode->i_ctime = current_time(inode); 1007 set_cached_acl(inode, type, acl); 1008 return 0; 1009} 1010 1011int simple_acl_create(struct inode *dir, struct inode *inode) 1012{ 1013 struct posix_acl *default_acl, *acl; 1014 int error; 1015 1016 error = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl); 1017 if (error) 1018 return error; 1019 1020 set_cached_acl(inode, ACL_TYPE_DEFAULT, default_acl); 1021 set_cached_acl(inode, ACL_TYPE_ACCESS, acl); 1022 1023 if (default_acl) 1024 posix_acl_release(default_acl); 1025 if (acl) 1026 posix_acl_release(acl); 1027 return 0; 1028}