shm.c (45846B)
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * linux/ipc/shm.c 4 * Copyright (C) 1992, 1993 Krishna Balasubramanian 5 * Many improvements/fixes by Bruno Haible. 6 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994. 7 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli. 8 * 9 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com> 10 * BIGMEM support, Andrea Arcangeli <andrea@suse.de> 11 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr> 12 * HIGHMEM support, Ingo Molnar <mingo@redhat.com> 13 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com> 14 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com> 15 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com> 16 * 17 * support for audit of ipc object properties and permission changes 18 * Dustin Kirkland <dustin.kirkland@us.ibm.com> 19 * 20 * namespaces support 21 * OpenVZ, SWsoft Inc. 22 * Pavel Emelianov <xemul@openvz.org> 23 * 24 * Better ipc lock (kern_ipc_perm.lock) handling 25 * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013. 26 */ 27 28#include <linux/slab.h> 29#include <linux/mm.h> 30#include <linux/hugetlb.h> 31#include <linux/shm.h> 32#include <linux/init.h> 33#include <linux/file.h> 34#include <linux/mman.h> 35#include <linux/shmem_fs.h> 36#include <linux/security.h> 37#include <linux/syscalls.h> 38#include <linux/audit.h> 39#include <linux/capability.h> 40#include <linux/ptrace.h> 41#include <linux/seq_file.h> 42#include <linux/rwsem.h> 43#include <linux/nsproxy.h> 44#include <linux/mount.h> 45#include <linux/ipc_namespace.h> 46#include <linux/rhashtable.h> 47 48#include <linux/uaccess.h> 49 50#include "util.h" 51 52struct shmid_kernel /* private to the kernel */ 53{ 54 struct kern_ipc_perm shm_perm; 55 struct file *shm_file; 56 unsigned long shm_nattch; 57 unsigned long shm_segsz; 58 time64_t shm_atim; 59 time64_t shm_dtim; 60 time64_t shm_ctim; 61 struct pid *shm_cprid; 62 struct pid *shm_lprid; 63 struct ucounts *mlock_ucounts; 64 65 /* 66 * The task created the shm object, for 67 * task_lock(shp->shm_creator) 68 */ 69 struct task_struct *shm_creator; 70 71 /* 72 * List by creator. task_lock(->shm_creator) required for read/write. 73 * If list_empty(), then the creator is dead already. 74 */ 75 struct list_head shm_clist; 76 struct ipc_namespace *ns; 77} __randomize_layout; 78 79/* shm_mode upper byte flags */ 80#define SHM_DEST 01000 /* segment will be destroyed on last detach */ 81#define SHM_LOCKED 02000 /* segment will not be swapped */ 82 83struct shm_file_data { 84 int id; 85 struct ipc_namespace *ns; 86 struct file *file; 87 const struct vm_operations_struct *vm_ops; 88}; 89 90#define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data)) 91 92static const struct file_operations shm_file_operations; 93static const struct vm_operations_struct shm_vm_ops; 94 95#define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS]) 96 97#define shm_unlock(shp) \ 98 ipc_unlock(&(shp)->shm_perm) 99 100static int newseg(struct ipc_namespace *, struct ipc_params *); 101static void shm_open(struct vm_area_struct *vma); 102static void shm_close(struct vm_area_struct *vma); 103static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp); 104#ifdef CONFIG_PROC_FS 105static int sysvipc_shm_proc_show(struct seq_file *s, void *it); 106#endif 107 108void shm_init_ns(struct ipc_namespace *ns) 109{ 110 ns->shm_ctlmax = SHMMAX; 111 ns->shm_ctlall = SHMALL; 112 ns->shm_ctlmni = SHMMNI; 113 ns->shm_rmid_forced = 0; 114 ns->shm_tot = 0; 115 ipc_init_ids(&shm_ids(ns)); 116} 117 118/* 119 * Called with shm_ids.rwsem (writer) and the shp structure locked. 120 * Only shm_ids.rwsem remains locked on exit. 121 */ 122static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) 123{ 124 struct shmid_kernel *shp; 125 126 shp = container_of(ipcp, struct shmid_kernel, shm_perm); 127 WARN_ON(ns != shp->ns); 128 129 if (shp->shm_nattch) { 130 shp->shm_perm.mode |= SHM_DEST; 131 /* Do not find it any more */ 132 ipc_set_key_private(&shm_ids(ns), &shp->shm_perm); 133 shm_unlock(shp); 134 } else 135 shm_destroy(ns, shp); 136} 137 138#ifdef CONFIG_IPC_NS 139void shm_exit_ns(struct ipc_namespace *ns) 140{ 141 free_ipcs(ns, &shm_ids(ns), do_shm_rmid); 142 idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr); 143 rhashtable_destroy(&ns->ids[IPC_SHM_IDS].key_ht); 144} 145#endif 146 147static int __init ipc_ns_init(void) 148{ 149 shm_init_ns(&init_ipc_ns); 150 return 0; 151} 152 153pure_initcall(ipc_ns_init); 154 155void __init shm_init(void) 156{ 157 ipc_init_proc_interface("sysvipc/shm", 158#if BITS_PER_LONG <= 32 159 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", 160#else 161 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", 162#endif 163 IPC_SHM_IDS, sysvipc_shm_proc_show); 164} 165 166static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id) 167{ 168 struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id); 169 170 if (IS_ERR(ipcp)) 171 return ERR_CAST(ipcp); 172 173 return container_of(ipcp, struct shmid_kernel, shm_perm); 174} 175 176static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id) 177{ 178 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id); 179 180 if (IS_ERR(ipcp)) 181 return ERR_CAST(ipcp); 182 183 return container_of(ipcp, struct shmid_kernel, shm_perm); 184} 185 186/* 187 * shm_lock_(check_) routines are called in the paths where the rwsem 188 * is not necessarily held. 189 */ 190static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) 191{ 192 struct kern_ipc_perm *ipcp; 193 194 rcu_read_lock(); 195 ipcp = ipc_obtain_object_idr(&shm_ids(ns), id); 196 if (IS_ERR(ipcp)) 197 goto err; 198 199 ipc_lock_object(ipcp); 200 /* 201 * ipc_rmid() may have already freed the ID while ipc_lock_object() 202 * was spinning: here verify that the structure is still valid. 203 * Upon races with RMID, return -EIDRM, thus indicating that 204 * the ID points to a removed identifier. 205 */ 206 if (ipc_valid_object(ipcp)) { 207 /* return a locked ipc object upon success */ 208 return container_of(ipcp, struct shmid_kernel, shm_perm); 209 } 210 211 ipc_unlock_object(ipcp); 212 ipcp = ERR_PTR(-EIDRM); 213err: 214 rcu_read_unlock(); 215 /* 216 * Callers of shm_lock() must validate the status of the returned ipc 217 * object pointer and error out as appropriate. 218 */ 219 return ERR_CAST(ipcp); 220} 221 222static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp) 223{ 224 rcu_read_lock(); 225 ipc_lock_object(&ipcp->shm_perm); 226} 227 228static void shm_rcu_free(struct rcu_head *head) 229{ 230 struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm, 231 rcu); 232 struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel, 233 shm_perm); 234 security_shm_free(&shp->shm_perm); 235 kfree(shp); 236} 237 238/* 239 * It has to be called with shp locked. 240 * It must be called before ipc_rmid() 241 */ 242static inline void shm_clist_rm(struct shmid_kernel *shp) 243{ 244 struct task_struct *creator; 245 246 /* ensure that shm_creator does not disappear */ 247 rcu_read_lock(); 248 249 /* 250 * A concurrent exit_shm may do a list_del_init() as well. 251 * Just do nothing if exit_shm already did the work 252 */ 253 if (!list_empty(&shp->shm_clist)) { 254 /* 255 * shp->shm_creator is guaranteed to be valid *only* 256 * if shp->shm_clist is not empty. 257 */ 258 creator = shp->shm_creator; 259 260 task_lock(creator); 261 /* 262 * list_del_init() is a nop if the entry was already removed 263 * from the list. 264 */ 265 list_del_init(&shp->shm_clist); 266 task_unlock(creator); 267 } 268 rcu_read_unlock(); 269} 270 271static inline void shm_rmid(struct shmid_kernel *s) 272{ 273 shm_clist_rm(s); 274 ipc_rmid(&shm_ids(s->ns), &s->shm_perm); 275} 276 277 278static int __shm_open(struct vm_area_struct *vma) 279{ 280 struct file *file = vma->vm_file; 281 struct shm_file_data *sfd = shm_file_data(file); 282 struct shmid_kernel *shp; 283 284 shp = shm_lock(sfd->ns, sfd->id); 285 286 if (IS_ERR(shp)) 287 return PTR_ERR(shp); 288 289 if (shp->shm_file != sfd->file) { 290 /* ID was reused */ 291 shm_unlock(shp); 292 return -EINVAL; 293 } 294 295 shp->shm_atim = ktime_get_real_seconds(); 296 ipc_update_pid(&shp->shm_lprid, task_tgid(current)); 297 shp->shm_nattch++; 298 shm_unlock(shp); 299 return 0; 300} 301 302/* This is called by fork, once for every shm attach. */ 303static void shm_open(struct vm_area_struct *vma) 304{ 305 int err = __shm_open(vma); 306 /* 307 * We raced in the idr lookup or with shm_destroy(). 308 * Either way, the ID is busted. 309 */ 310 WARN_ON_ONCE(err); 311} 312 313/* 314 * shm_destroy - free the struct shmid_kernel 315 * 316 * @ns: namespace 317 * @shp: struct to free 318 * 319 * It has to be called with shp and shm_ids.rwsem (writer) locked, 320 * but returns with shp unlocked and freed. 321 */ 322static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) 323{ 324 struct file *shm_file; 325 326 shm_file = shp->shm_file; 327 shp->shm_file = NULL; 328 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; 329 shm_rmid(shp); 330 shm_unlock(shp); 331 if (!is_file_hugepages(shm_file)) 332 shmem_lock(shm_file, 0, shp->mlock_ucounts); 333 fput(shm_file); 334 ipc_update_pid(&shp->shm_cprid, NULL); 335 ipc_update_pid(&shp->shm_lprid, NULL); 336 ipc_rcu_putref(&shp->shm_perm, shm_rcu_free); 337} 338 339/* 340 * shm_may_destroy - identifies whether shm segment should be destroyed now 341 * 342 * Returns true if and only if there are no active users of the segment and 343 * one of the following is true: 344 * 345 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp 346 * 347 * 2) sysctl kernel.shm_rmid_forced is set to 1. 348 */ 349static bool shm_may_destroy(struct shmid_kernel *shp) 350{ 351 return (shp->shm_nattch == 0) && 352 (shp->ns->shm_rmid_forced || 353 (shp->shm_perm.mode & SHM_DEST)); 354} 355 356/* 357 * remove the attach descriptor vma. 358 * free memory for segment if it is marked destroyed. 359 * The descriptor has already been removed from the current->mm->mmap list 360 * and will later be kfree()d. 361 */ 362static void shm_close(struct vm_area_struct *vma) 363{ 364 struct file *file = vma->vm_file; 365 struct shm_file_data *sfd = shm_file_data(file); 366 struct shmid_kernel *shp; 367 struct ipc_namespace *ns = sfd->ns; 368 369 down_write(&shm_ids(ns).rwsem); 370 /* remove from the list of attaches of the shm segment */ 371 shp = shm_lock(ns, sfd->id); 372 373 /* 374 * We raced in the idr lookup or with shm_destroy(). 375 * Either way, the ID is busted. 376 */ 377 if (WARN_ON_ONCE(IS_ERR(shp))) 378 goto done; /* no-op */ 379 380 ipc_update_pid(&shp->shm_lprid, task_tgid(current)); 381 shp->shm_dtim = ktime_get_real_seconds(); 382 shp->shm_nattch--; 383 if (shm_may_destroy(shp)) 384 shm_destroy(ns, shp); 385 else 386 shm_unlock(shp); 387done: 388 up_write(&shm_ids(ns).rwsem); 389} 390 391/* Called with ns->shm_ids(ns).rwsem locked */ 392static int shm_try_destroy_orphaned(int id, void *p, void *data) 393{ 394 struct ipc_namespace *ns = data; 395 struct kern_ipc_perm *ipcp = p; 396 struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm); 397 398 /* 399 * We want to destroy segments without users and with already 400 * exit'ed originating process. 401 * 402 * As shp->* are changed under rwsem, it's safe to skip shp locking. 403 */ 404 if (!list_empty(&shp->shm_clist)) 405 return 0; 406 407 if (shm_may_destroy(shp)) { 408 shm_lock_by_ptr(shp); 409 shm_destroy(ns, shp); 410 } 411 return 0; 412} 413 414void shm_destroy_orphaned(struct ipc_namespace *ns) 415{ 416 down_write(&shm_ids(ns).rwsem); 417 if (shm_ids(ns).in_use) 418 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns); 419 up_write(&shm_ids(ns).rwsem); 420} 421 422/* Locking assumes this will only be called with task == current */ 423void exit_shm(struct task_struct *task) 424{ 425 for (;;) { 426 struct shmid_kernel *shp; 427 struct ipc_namespace *ns; 428 429 task_lock(task); 430 431 if (list_empty(&task->sysvshm.shm_clist)) { 432 task_unlock(task); 433 break; 434 } 435 436 shp = list_first_entry(&task->sysvshm.shm_clist, struct shmid_kernel, 437 shm_clist); 438 439 /* 440 * 1) Get pointer to the ipc namespace. It is worth to say 441 * that this pointer is guaranteed to be valid because 442 * shp lifetime is always shorter than namespace lifetime 443 * in which shp lives. 444 * We taken task_lock it means that shp won't be freed. 445 */ 446 ns = shp->ns; 447 448 /* 449 * 2) If kernel.shm_rmid_forced is not set then only keep track of 450 * which shmids are orphaned, so that a later set of the sysctl 451 * can clean them up. 452 */ 453 if (!ns->shm_rmid_forced) 454 goto unlink_continue; 455 456 /* 457 * 3) get a reference to the namespace. 458 * The refcount could be already 0. If it is 0, then 459 * the shm objects will be free by free_ipc_work(). 460 */ 461 ns = get_ipc_ns_not_zero(ns); 462 if (!ns) { 463unlink_continue: 464 list_del_init(&shp->shm_clist); 465 task_unlock(task); 466 continue; 467 } 468 469 /* 470 * 4) get a reference to shp. 471 * This cannot fail: shm_clist_rm() is called before 472 * ipc_rmid(), thus the refcount cannot be 0. 473 */ 474 WARN_ON(!ipc_rcu_getref(&shp->shm_perm)); 475 476 /* 477 * 5) unlink the shm segment from the list of segments 478 * created by current. 479 * This must be done last. After unlinking, 480 * only the refcounts obtained above prevent IPC_RMID 481 * from destroying the segment or the namespace. 482 */ 483 list_del_init(&shp->shm_clist); 484 485 task_unlock(task); 486 487 /* 488 * 6) we have all references 489 * Thus lock & if needed destroy shp. 490 */ 491 down_write(&shm_ids(ns).rwsem); 492 shm_lock_by_ptr(shp); 493 /* 494 * rcu_read_lock was implicitly taken in shm_lock_by_ptr, it's 495 * safe to call ipc_rcu_putref here 496 */ 497 ipc_rcu_putref(&shp->shm_perm, shm_rcu_free); 498 499 if (ipc_valid_object(&shp->shm_perm)) { 500 if (shm_may_destroy(shp)) 501 shm_destroy(ns, shp); 502 else 503 shm_unlock(shp); 504 } else { 505 /* 506 * Someone else deleted the shp from namespace 507 * idr/kht while we have waited. 508 * Just unlock and continue. 509 */ 510 shm_unlock(shp); 511 } 512 513 up_write(&shm_ids(ns).rwsem); 514 put_ipc_ns(ns); /* paired with get_ipc_ns_not_zero */ 515 } 516} 517 518static vm_fault_t shm_fault(struct vm_fault *vmf) 519{ 520 struct file *file = vmf->vma->vm_file; 521 struct shm_file_data *sfd = shm_file_data(file); 522 523 return sfd->vm_ops->fault(vmf); 524} 525 526static int shm_may_split(struct vm_area_struct *vma, unsigned long addr) 527{ 528 struct file *file = vma->vm_file; 529 struct shm_file_data *sfd = shm_file_data(file); 530 531 if (sfd->vm_ops->may_split) 532 return sfd->vm_ops->may_split(vma, addr); 533 534 return 0; 535} 536 537static unsigned long shm_pagesize(struct vm_area_struct *vma) 538{ 539 struct file *file = vma->vm_file; 540 struct shm_file_data *sfd = shm_file_data(file); 541 542 if (sfd->vm_ops->pagesize) 543 return sfd->vm_ops->pagesize(vma); 544 545 return PAGE_SIZE; 546} 547 548#ifdef CONFIG_NUMA 549static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new) 550{ 551 struct file *file = vma->vm_file; 552 struct shm_file_data *sfd = shm_file_data(file); 553 int err = 0; 554 555 if (sfd->vm_ops->set_policy) 556 err = sfd->vm_ops->set_policy(vma, new); 557 return err; 558} 559 560static struct mempolicy *shm_get_policy(struct vm_area_struct *vma, 561 unsigned long addr) 562{ 563 struct file *file = vma->vm_file; 564 struct shm_file_data *sfd = shm_file_data(file); 565 struct mempolicy *pol = NULL; 566 567 if (sfd->vm_ops->get_policy) 568 pol = sfd->vm_ops->get_policy(vma, addr); 569 else if (vma->vm_policy) 570 pol = vma->vm_policy; 571 572 return pol; 573} 574#endif 575 576static int shm_mmap(struct file *file, struct vm_area_struct *vma) 577{ 578 struct shm_file_data *sfd = shm_file_data(file); 579 int ret; 580 581 /* 582 * In case of remap_file_pages() emulation, the file can represent an 583 * IPC ID that was removed, and possibly even reused by another shm 584 * segment already. Propagate this case as an error to caller. 585 */ 586 ret = __shm_open(vma); 587 if (ret) 588 return ret; 589 590 ret = call_mmap(sfd->file, vma); 591 if (ret) { 592 shm_close(vma); 593 return ret; 594 } 595 sfd->vm_ops = vma->vm_ops; 596#ifdef CONFIG_MMU 597 WARN_ON(!sfd->vm_ops->fault); 598#endif 599 vma->vm_ops = &shm_vm_ops; 600 return 0; 601} 602 603static int shm_release(struct inode *ino, struct file *file) 604{ 605 struct shm_file_data *sfd = shm_file_data(file); 606 607 put_ipc_ns(sfd->ns); 608 fput(sfd->file); 609 shm_file_data(file) = NULL; 610 kfree(sfd); 611 return 0; 612} 613 614static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync) 615{ 616 struct shm_file_data *sfd = shm_file_data(file); 617 618 if (!sfd->file->f_op->fsync) 619 return -EINVAL; 620 return sfd->file->f_op->fsync(sfd->file, start, end, datasync); 621} 622 623static long shm_fallocate(struct file *file, int mode, loff_t offset, 624 loff_t len) 625{ 626 struct shm_file_data *sfd = shm_file_data(file); 627 628 if (!sfd->file->f_op->fallocate) 629 return -EOPNOTSUPP; 630 return sfd->file->f_op->fallocate(file, mode, offset, len); 631} 632 633static unsigned long shm_get_unmapped_area(struct file *file, 634 unsigned long addr, unsigned long len, unsigned long pgoff, 635 unsigned long flags) 636{ 637 struct shm_file_data *sfd = shm_file_data(file); 638 639 return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len, 640 pgoff, flags); 641} 642 643static const struct file_operations shm_file_operations = { 644 .mmap = shm_mmap, 645 .fsync = shm_fsync, 646 .release = shm_release, 647 .get_unmapped_area = shm_get_unmapped_area, 648 .llseek = noop_llseek, 649 .fallocate = shm_fallocate, 650}; 651 652/* 653 * shm_file_operations_huge is now identical to shm_file_operations, 654 * but we keep it distinct for the sake of is_file_shm_hugepages(). 655 */ 656static const struct file_operations shm_file_operations_huge = { 657 .mmap = shm_mmap, 658 .fsync = shm_fsync, 659 .release = shm_release, 660 .get_unmapped_area = shm_get_unmapped_area, 661 .llseek = noop_llseek, 662 .fallocate = shm_fallocate, 663}; 664 665bool is_file_shm_hugepages(struct file *file) 666{ 667 return file->f_op == &shm_file_operations_huge; 668} 669 670static const struct vm_operations_struct shm_vm_ops = { 671 .open = shm_open, /* callback for a new vm-area open */ 672 .close = shm_close, /* callback for when the vm-area is released */ 673 .fault = shm_fault, 674 .may_split = shm_may_split, 675 .pagesize = shm_pagesize, 676#if defined(CONFIG_NUMA) 677 .set_policy = shm_set_policy, 678 .get_policy = shm_get_policy, 679#endif 680}; 681 682/** 683 * newseg - Create a new shared memory segment 684 * @ns: namespace 685 * @params: ptr to the structure that contains key, size and shmflg 686 * 687 * Called with shm_ids.rwsem held as a writer. 688 */ 689static int newseg(struct ipc_namespace *ns, struct ipc_params *params) 690{ 691 key_t key = params->key; 692 int shmflg = params->flg; 693 size_t size = params->u.size; 694 int error; 695 struct shmid_kernel *shp; 696 size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 697 struct file *file; 698 char name[13]; 699 vm_flags_t acctflag = 0; 700 701 if (size < SHMMIN || size > ns->shm_ctlmax) 702 return -EINVAL; 703 704 if (numpages << PAGE_SHIFT < size) 705 return -ENOSPC; 706 707 if (ns->shm_tot + numpages < ns->shm_tot || 708 ns->shm_tot + numpages > ns->shm_ctlall) 709 return -ENOSPC; 710 711 shp = kmalloc(sizeof(*shp), GFP_KERNEL_ACCOUNT); 712 if (unlikely(!shp)) 713 return -ENOMEM; 714 715 shp->shm_perm.key = key; 716 shp->shm_perm.mode = (shmflg & S_IRWXUGO); 717 shp->mlock_ucounts = NULL; 718 719 shp->shm_perm.security = NULL; 720 error = security_shm_alloc(&shp->shm_perm); 721 if (error) { 722 kfree(shp); 723 return error; 724 } 725 726 sprintf(name, "SYSV%08x", key); 727 if (shmflg & SHM_HUGETLB) { 728 struct hstate *hs; 729 size_t hugesize; 730 731 hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); 732 if (!hs) { 733 error = -EINVAL; 734 goto no_file; 735 } 736 hugesize = ALIGN(size, huge_page_size(hs)); 737 738 /* hugetlb_file_setup applies strict accounting */ 739 if (shmflg & SHM_NORESERVE) 740 acctflag = VM_NORESERVE; 741 file = hugetlb_file_setup(name, hugesize, acctflag, 742 HUGETLB_SHMFS_INODE, (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); 743 } else { 744 /* 745 * Do not allow no accounting for OVERCOMMIT_NEVER, even 746 * if it's asked for. 747 */ 748 if ((shmflg & SHM_NORESERVE) && 749 sysctl_overcommit_memory != OVERCOMMIT_NEVER) 750 acctflag = VM_NORESERVE; 751 file = shmem_kernel_file_setup(name, size, acctflag); 752 } 753 error = PTR_ERR(file); 754 if (IS_ERR(file)) 755 goto no_file; 756 757 shp->shm_cprid = get_pid(task_tgid(current)); 758 shp->shm_lprid = NULL; 759 shp->shm_atim = shp->shm_dtim = 0; 760 shp->shm_ctim = ktime_get_real_seconds(); 761 shp->shm_segsz = size; 762 shp->shm_nattch = 0; 763 shp->shm_file = file; 764 shp->shm_creator = current; 765 766 /* ipc_addid() locks shp upon success. */ 767 error = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); 768 if (error < 0) 769 goto no_id; 770 771 shp->ns = ns; 772 773 task_lock(current); 774 list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist); 775 task_unlock(current); 776 777 /* 778 * shmid gets reported as "inode#" in /proc/pid/maps. 779 * proc-ps tools use this. Changing this will break them. 780 */ 781 file_inode(file)->i_ino = shp->shm_perm.id; 782 783 ns->shm_tot += numpages; 784 error = shp->shm_perm.id; 785 786 ipc_unlock_object(&shp->shm_perm); 787 rcu_read_unlock(); 788 return error; 789 790no_id: 791 ipc_update_pid(&shp->shm_cprid, NULL); 792 ipc_update_pid(&shp->shm_lprid, NULL); 793 fput(file); 794 ipc_rcu_putref(&shp->shm_perm, shm_rcu_free); 795 return error; 796no_file: 797 call_rcu(&shp->shm_perm.rcu, shm_rcu_free); 798 return error; 799} 800 801/* 802 * Called with shm_ids.rwsem and ipcp locked. 803 */ 804static int shm_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params) 805{ 806 struct shmid_kernel *shp; 807 808 shp = container_of(ipcp, struct shmid_kernel, shm_perm); 809 if (shp->shm_segsz < params->u.size) 810 return -EINVAL; 811 812 return 0; 813} 814 815long ksys_shmget(key_t key, size_t size, int shmflg) 816{ 817 struct ipc_namespace *ns; 818 static const struct ipc_ops shm_ops = { 819 .getnew = newseg, 820 .associate = security_shm_associate, 821 .more_checks = shm_more_checks, 822 }; 823 struct ipc_params shm_params; 824 825 ns = current->nsproxy->ipc_ns; 826 827 shm_params.key = key; 828 shm_params.flg = shmflg; 829 shm_params.u.size = size; 830 831 return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params); 832} 833 834SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg) 835{ 836 return ksys_shmget(key, size, shmflg); 837} 838 839static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version) 840{ 841 switch (version) { 842 case IPC_64: 843 return copy_to_user(buf, in, sizeof(*in)); 844 case IPC_OLD: 845 { 846 struct shmid_ds out; 847 848 memset(&out, 0, sizeof(out)); 849 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm); 850 out.shm_segsz = in->shm_segsz; 851 out.shm_atime = in->shm_atime; 852 out.shm_dtime = in->shm_dtime; 853 out.shm_ctime = in->shm_ctime; 854 out.shm_cpid = in->shm_cpid; 855 out.shm_lpid = in->shm_lpid; 856 out.shm_nattch = in->shm_nattch; 857 858 return copy_to_user(buf, &out, sizeof(out)); 859 } 860 default: 861 return -EINVAL; 862 } 863} 864 865static inline unsigned long 866copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version) 867{ 868 switch (version) { 869 case IPC_64: 870 if (copy_from_user(out, buf, sizeof(*out))) 871 return -EFAULT; 872 return 0; 873 case IPC_OLD: 874 { 875 struct shmid_ds tbuf_old; 876 877 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) 878 return -EFAULT; 879 880 out->shm_perm.uid = tbuf_old.shm_perm.uid; 881 out->shm_perm.gid = tbuf_old.shm_perm.gid; 882 out->shm_perm.mode = tbuf_old.shm_perm.mode; 883 884 return 0; 885 } 886 default: 887 return -EINVAL; 888 } 889} 890 891static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version) 892{ 893 switch (version) { 894 case IPC_64: 895 return copy_to_user(buf, in, sizeof(*in)); 896 case IPC_OLD: 897 { 898 struct shminfo out; 899 900 if (in->shmmax > INT_MAX) 901 out.shmmax = INT_MAX; 902 else 903 out.shmmax = (int)in->shmmax; 904 905 out.shmmin = in->shmmin; 906 out.shmmni = in->shmmni; 907 out.shmseg = in->shmseg; 908 out.shmall = in->shmall; 909 910 return copy_to_user(buf, &out, sizeof(out)); 911 } 912 default: 913 return -EINVAL; 914 } 915} 916 917/* 918 * Calculate and add used RSS and swap pages of a shm. 919 * Called with shm_ids.rwsem held as a reader 920 */ 921static void shm_add_rss_swap(struct shmid_kernel *shp, 922 unsigned long *rss_add, unsigned long *swp_add) 923{ 924 struct inode *inode; 925 926 inode = file_inode(shp->shm_file); 927 928 if (is_file_hugepages(shp->shm_file)) { 929 struct address_space *mapping = inode->i_mapping; 930 struct hstate *h = hstate_file(shp->shm_file); 931 *rss_add += pages_per_huge_page(h) * mapping->nrpages; 932 } else { 933#ifdef CONFIG_SHMEM 934 struct shmem_inode_info *info = SHMEM_I(inode); 935 936 spin_lock_irq(&info->lock); 937 *rss_add += inode->i_mapping->nrpages; 938 *swp_add += info->swapped; 939 spin_unlock_irq(&info->lock); 940#else 941 *rss_add += inode->i_mapping->nrpages; 942#endif 943 } 944} 945 946/* 947 * Called with shm_ids.rwsem held as a reader 948 */ 949static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss, 950 unsigned long *swp) 951{ 952 int next_id; 953 int total, in_use; 954 955 *rss = 0; 956 *swp = 0; 957 958 in_use = shm_ids(ns).in_use; 959 960 for (total = 0, next_id = 0; total < in_use; next_id++) { 961 struct kern_ipc_perm *ipc; 962 struct shmid_kernel *shp; 963 964 ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id); 965 if (ipc == NULL) 966 continue; 967 shp = container_of(ipc, struct shmid_kernel, shm_perm); 968 969 shm_add_rss_swap(shp, rss, swp); 970 971 total++; 972 } 973} 974 975/* 976 * This function handles some shmctl commands which require the rwsem 977 * to be held in write mode. 978 * NOTE: no locks must be held, the rwsem is taken inside this function. 979 */ 980static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd, 981 struct shmid64_ds *shmid64) 982{ 983 struct kern_ipc_perm *ipcp; 984 struct shmid_kernel *shp; 985 int err; 986 987 down_write(&shm_ids(ns).rwsem); 988 rcu_read_lock(); 989 990 ipcp = ipcctl_obtain_check(ns, &shm_ids(ns), shmid, cmd, 991 &shmid64->shm_perm, 0); 992 if (IS_ERR(ipcp)) { 993 err = PTR_ERR(ipcp); 994 goto out_unlock1; 995 } 996 997 shp = container_of(ipcp, struct shmid_kernel, shm_perm); 998 999 err = security_shm_shmctl(&shp->shm_perm, cmd); 1000 if (err) 1001 goto out_unlock1; 1002 1003 switch (cmd) { 1004 case IPC_RMID: 1005 ipc_lock_object(&shp->shm_perm); 1006 /* do_shm_rmid unlocks the ipc object and rcu */ 1007 do_shm_rmid(ns, ipcp); 1008 goto out_up; 1009 case IPC_SET: 1010 ipc_lock_object(&shp->shm_perm); 1011 err = ipc_update_perm(&shmid64->shm_perm, ipcp); 1012 if (err) 1013 goto out_unlock0; 1014 shp->shm_ctim = ktime_get_real_seconds(); 1015 break; 1016 default: 1017 err = -EINVAL; 1018 goto out_unlock1; 1019 } 1020 1021out_unlock0: 1022 ipc_unlock_object(&shp->shm_perm); 1023out_unlock1: 1024 rcu_read_unlock(); 1025out_up: 1026 up_write(&shm_ids(ns).rwsem); 1027 return err; 1028} 1029 1030static int shmctl_ipc_info(struct ipc_namespace *ns, 1031 struct shminfo64 *shminfo) 1032{ 1033 int err = security_shm_shmctl(NULL, IPC_INFO); 1034 if (!err) { 1035 memset(shminfo, 0, sizeof(*shminfo)); 1036 shminfo->shmmni = shminfo->shmseg = ns->shm_ctlmni; 1037 shminfo->shmmax = ns->shm_ctlmax; 1038 shminfo->shmall = ns->shm_ctlall; 1039 shminfo->shmmin = SHMMIN; 1040 down_read(&shm_ids(ns).rwsem); 1041 err = ipc_get_maxidx(&shm_ids(ns)); 1042 up_read(&shm_ids(ns).rwsem); 1043 if (err < 0) 1044 err = 0; 1045 } 1046 return err; 1047} 1048 1049static int shmctl_shm_info(struct ipc_namespace *ns, 1050 struct shm_info *shm_info) 1051{ 1052 int err = security_shm_shmctl(NULL, SHM_INFO); 1053 if (!err) { 1054 memset(shm_info, 0, sizeof(*shm_info)); 1055 down_read(&shm_ids(ns).rwsem); 1056 shm_info->used_ids = shm_ids(ns).in_use; 1057 shm_get_stat(ns, &shm_info->shm_rss, &shm_info->shm_swp); 1058 shm_info->shm_tot = ns->shm_tot; 1059 shm_info->swap_attempts = 0; 1060 shm_info->swap_successes = 0; 1061 err = ipc_get_maxidx(&shm_ids(ns)); 1062 up_read(&shm_ids(ns).rwsem); 1063 if (err < 0) 1064 err = 0; 1065 } 1066 return err; 1067} 1068 1069static int shmctl_stat(struct ipc_namespace *ns, int shmid, 1070 int cmd, struct shmid64_ds *tbuf) 1071{ 1072 struct shmid_kernel *shp; 1073 int err; 1074 1075 memset(tbuf, 0, sizeof(*tbuf)); 1076 1077 rcu_read_lock(); 1078 if (cmd == SHM_STAT || cmd == SHM_STAT_ANY) { 1079 shp = shm_obtain_object(ns, shmid); 1080 if (IS_ERR(shp)) { 1081 err = PTR_ERR(shp); 1082 goto out_unlock; 1083 } 1084 } else { /* IPC_STAT */ 1085 shp = shm_obtain_object_check(ns, shmid); 1086 if (IS_ERR(shp)) { 1087 err = PTR_ERR(shp); 1088 goto out_unlock; 1089 } 1090 } 1091 1092 /* 1093 * Semantically SHM_STAT_ANY ought to be identical to 1094 * that functionality provided by the /proc/sysvipc/ 1095 * interface. As such, only audit these calls and 1096 * do not do traditional S_IRUGO permission checks on 1097 * the ipc object. 1098 */ 1099 if (cmd == SHM_STAT_ANY) 1100 audit_ipc_obj(&shp->shm_perm); 1101 else { 1102 err = -EACCES; 1103 if (ipcperms(ns, &shp->shm_perm, S_IRUGO)) 1104 goto out_unlock; 1105 } 1106 1107 err = security_shm_shmctl(&shp->shm_perm, cmd); 1108 if (err) 1109 goto out_unlock; 1110 1111 ipc_lock_object(&shp->shm_perm); 1112 1113 if (!ipc_valid_object(&shp->shm_perm)) { 1114 ipc_unlock_object(&shp->shm_perm); 1115 err = -EIDRM; 1116 goto out_unlock; 1117 } 1118 1119 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf->shm_perm); 1120 tbuf->shm_segsz = shp->shm_segsz; 1121 tbuf->shm_atime = shp->shm_atim; 1122 tbuf->shm_dtime = shp->shm_dtim; 1123 tbuf->shm_ctime = shp->shm_ctim; 1124#ifndef CONFIG_64BIT 1125 tbuf->shm_atime_high = shp->shm_atim >> 32; 1126 tbuf->shm_dtime_high = shp->shm_dtim >> 32; 1127 tbuf->shm_ctime_high = shp->shm_ctim >> 32; 1128#endif 1129 tbuf->shm_cpid = pid_vnr(shp->shm_cprid); 1130 tbuf->shm_lpid = pid_vnr(shp->shm_lprid); 1131 tbuf->shm_nattch = shp->shm_nattch; 1132 1133 if (cmd == IPC_STAT) { 1134 /* 1135 * As defined in SUS: 1136 * Return 0 on success 1137 */ 1138 err = 0; 1139 } else { 1140 /* 1141 * SHM_STAT and SHM_STAT_ANY (both Linux specific) 1142 * Return the full id, including the sequence number 1143 */ 1144 err = shp->shm_perm.id; 1145 } 1146 1147 ipc_unlock_object(&shp->shm_perm); 1148out_unlock: 1149 rcu_read_unlock(); 1150 return err; 1151} 1152 1153static int shmctl_do_lock(struct ipc_namespace *ns, int shmid, int cmd) 1154{ 1155 struct shmid_kernel *shp; 1156 struct file *shm_file; 1157 int err; 1158 1159 rcu_read_lock(); 1160 shp = shm_obtain_object_check(ns, shmid); 1161 if (IS_ERR(shp)) { 1162 err = PTR_ERR(shp); 1163 goto out_unlock1; 1164 } 1165 1166 audit_ipc_obj(&(shp->shm_perm)); 1167 err = security_shm_shmctl(&shp->shm_perm, cmd); 1168 if (err) 1169 goto out_unlock1; 1170 1171 ipc_lock_object(&shp->shm_perm); 1172 1173 /* check if shm_destroy() is tearing down shp */ 1174 if (!ipc_valid_object(&shp->shm_perm)) { 1175 err = -EIDRM; 1176 goto out_unlock0; 1177 } 1178 1179 if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) { 1180 kuid_t euid = current_euid(); 1181 1182 if (!uid_eq(euid, shp->shm_perm.uid) && 1183 !uid_eq(euid, shp->shm_perm.cuid)) { 1184 err = -EPERM; 1185 goto out_unlock0; 1186 } 1187 if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) { 1188 err = -EPERM; 1189 goto out_unlock0; 1190 } 1191 } 1192 1193 shm_file = shp->shm_file; 1194 if (is_file_hugepages(shm_file)) 1195 goto out_unlock0; 1196 1197 if (cmd == SHM_LOCK) { 1198 struct ucounts *ucounts = current_ucounts(); 1199 1200 err = shmem_lock(shm_file, 1, ucounts); 1201 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) { 1202 shp->shm_perm.mode |= SHM_LOCKED; 1203 shp->mlock_ucounts = ucounts; 1204 } 1205 goto out_unlock0; 1206 } 1207 1208 /* SHM_UNLOCK */ 1209 if (!(shp->shm_perm.mode & SHM_LOCKED)) 1210 goto out_unlock0; 1211 shmem_lock(shm_file, 0, shp->mlock_ucounts); 1212 shp->shm_perm.mode &= ~SHM_LOCKED; 1213 shp->mlock_ucounts = NULL; 1214 get_file(shm_file); 1215 ipc_unlock_object(&shp->shm_perm); 1216 rcu_read_unlock(); 1217 shmem_unlock_mapping(shm_file->f_mapping); 1218 1219 fput(shm_file); 1220 return err; 1221 1222out_unlock0: 1223 ipc_unlock_object(&shp->shm_perm); 1224out_unlock1: 1225 rcu_read_unlock(); 1226 return err; 1227} 1228 1229static long ksys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf, int version) 1230{ 1231 int err; 1232 struct ipc_namespace *ns; 1233 struct shmid64_ds sem64; 1234 1235 if (cmd < 0 || shmid < 0) 1236 return -EINVAL; 1237 1238 ns = current->nsproxy->ipc_ns; 1239 1240 switch (cmd) { 1241 case IPC_INFO: { 1242 struct shminfo64 shminfo; 1243 err = shmctl_ipc_info(ns, &shminfo); 1244 if (err < 0) 1245 return err; 1246 if (copy_shminfo_to_user(buf, &shminfo, version)) 1247 err = -EFAULT; 1248 return err; 1249 } 1250 case SHM_INFO: { 1251 struct shm_info shm_info; 1252 err = shmctl_shm_info(ns, &shm_info); 1253 if (err < 0) 1254 return err; 1255 if (copy_to_user(buf, &shm_info, sizeof(shm_info))) 1256 err = -EFAULT; 1257 return err; 1258 } 1259 case SHM_STAT: 1260 case SHM_STAT_ANY: 1261 case IPC_STAT: { 1262 err = shmctl_stat(ns, shmid, cmd, &sem64); 1263 if (err < 0) 1264 return err; 1265 if (copy_shmid_to_user(buf, &sem64, version)) 1266 err = -EFAULT; 1267 return err; 1268 } 1269 case IPC_SET: 1270 if (copy_shmid_from_user(&sem64, buf, version)) 1271 return -EFAULT; 1272 fallthrough; 1273 case IPC_RMID: 1274 return shmctl_down(ns, shmid, cmd, &sem64); 1275 case SHM_LOCK: 1276 case SHM_UNLOCK: 1277 return shmctl_do_lock(ns, shmid, cmd); 1278 default: 1279 return -EINVAL; 1280 } 1281} 1282 1283SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) 1284{ 1285 return ksys_shmctl(shmid, cmd, buf, IPC_64); 1286} 1287 1288#ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION 1289long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf) 1290{ 1291 int version = ipc_parse_version(&cmd); 1292 1293 return ksys_shmctl(shmid, cmd, buf, version); 1294} 1295 1296SYSCALL_DEFINE3(old_shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) 1297{ 1298 return ksys_old_shmctl(shmid, cmd, buf); 1299} 1300#endif 1301 1302#ifdef CONFIG_COMPAT 1303 1304struct compat_shmid_ds { 1305 struct compat_ipc_perm shm_perm; 1306 int shm_segsz; 1307 old_time32_t shm_atime; 1308 old_time32_t shm_dtime; 1309 old_time32_t shm_ctime; 1310 compat_ipc_pid_t shm_cpid; 1311 compat_ipc_pid_t shm_lpid; 1312 unsigned short shm_nattch; 1313 unsigned short shm_unused; 1314 compat_uptr_t shm_unused2; 1315 compat_uptr_t shm_unused3; 1316}; 1317 1318struct compat_shminfo64 { 1319 compat_ulong_t shmmax; 1320 compat_ulong_t shmmin; 1321 compat_ulong_t shmmni; 1322 compat_ulong_t shmseg; 1323 compat_ulong_t shmall; 1324 compat_ulong_t __unused1; 1325 compat_ulong_t __unused2; 1326 compat_ulong_t __unused3; 1327 compat_ulong_t __unused4; 1328}; 1329 1330struct compat_shm_info { 1331 compat_int_t used_ids; 1332 compat_ulong_t shm_tot, shm_rss, shm_swp; 1333 compat_ulong_t swap_attempts, swap_successes; 1334}; 1335 1336static int copy_compat_shminfo_to_user(void __user *buf, struct shminfo64 *in, 1337 int version) 1338{ 1339 if (in->shmmax > INT_MAX) 1340 in->shmmax = INT_MAX; 1341 if (version == IPC_64) { 1342 struct compat_shminfo64 info; 1343 memset(&info, 0, sizeof(info)); 1344 info.shmmax = in->shmmax; 1345 info.shmmin = in->shmmin; 1346 info.shmmni = in->shmmni; 1347 info.shmseg = in->shmseg; 1348 info.shmall = in->shmall; 1349 return copy_to_user(buf, &info, sizeof(info)); 1350 } else { 1351 struct shminfo info; 1352 memset(&info, 0, sizeof(info)); 1353 info.shmmax = in->shmmax; 1354 info.shmmin = in->shmmin; 1355 info.shmmni = in->shmmni; 1356 info.shmseg = in->shmseg; 1357 info.shmall = in->shmall; 1358 return copy_to_user(buf, &info, sizeof(info)); 1359 } 1360} 1361 1362static int put_compat_shm_info(struct shm_info *ip, 1363 struct compat_shm_info __user *uip) 1364{ 1365 struct compat_shm_info info; 1366 1367 memset(&info, 0, sizeof(info)); 1368 info.used_ids = ip->used_ids; 1369 info.shm_tot = ip->shm_tot; 1370 info.shm_rss = ip->shm_rss; 1371 info.shm_swp = ip->shm_swp; 1372 info.swap_attempts = ip->swap_attempts; 1373 info.swap_successes = ip->swap_successes; 1374 return copy_to_user(uip, &info, sizeof(info)); 1375} 1376 1377static int copy_compat_shmid_to_user(void __user *buf, struct shmid64_ds *in, 1378 int version) 1379{ 1380 if (version == IPC_64) { 1381 struct compat_shmid64_ds v; 1382 memset(&v, 0, sizeof(v)); 1383 to_compat_ipc64_perm(&v.shm_perm, &in->shm_perm); 1384 v.shm_atime = lower_32_bits(in->shm_atime); 1385 v.shm_atime_high = upper_32_bits(in->shm_atime); 1386 v.shm_dtime = lower_32_bits(in->shm_dtime); 1387 v.shm_dtime_high = upper_32_bits(in->shm_dtime); 1388 v.shm_ctime = lower_32_bits(in->shm_ctime); 1389 v.shm_ctime_high = upper_32_bits(in->shm_ctime); 1390 v.shm_segsz = in->shm_segsz; 1391 v.shm_nattch = in->shm_nattch; 1392 v.shm_cpid = in->shm_cpid; 1393 v.shm_lpid = in->shm_lpid; 1394 return copy_to_user(buf, &v, sizeof(v)); 1395 } else { 1396 struct compat_shmid_ds v; 1397 memset(&v, 0, sizeof(v)); 1398 to_compat_ipc_perm(&v.shm_perm, &in->shm_perm); 1399 v.shm_perm.key = in->shm_perm.key; 1400 v.shm_atime = in->shm_atime; 1401 v.shm_dtime = in->shm_dtime; 1402 v.shm_ctime = in->shm_ctime; 1403 v.shm_segsz = in->shm_segsz; 1404 v.shm_nattch = in->shm_nattch; 1405 v.shm_cpid = in->shm_cpid; 1406 v.shm_lpid = in->shm_lpid; 1407 return copy_to_user(buf, &v, sizeof(v)); 1408 } 1409} 1410 1411static int copy_compat_shmid_from_user(struct shmid64_ds *out, void __user *buf, 1412 int version) 1413{ 1414 memset(out, 0, sizeof(*out)); 1415 if (version == IPC_64) { 1416 struct compat_shmid64_ds __user *p = buf; 1417 return get_compat_ipc64_perm(&out->shm_perm, &p->shm_perm); 1418 } else { 1419 struct compat_shmid_ds __user *p = buf; 1420 return get_compat_ipc_perm(&out->shm_perm, &p->shm_perm); 1421 } 1422} 1423 1424static long compat_ksys_shmctl(int shmid, int cmd, void __user *uptr, int version) 1425{ 1426 struct ipc_namespace *ns; 1427 struct shmid64_ds sem64; 1428 int err; 1429 1430 ns = current->nsproxy->ipc_ns; 1431 1432 if (cmd < 0 || shmid < 0) 1433 return -EINVAL; 1434 1435 switch (cmd) { 1436 case IPC_INFO: { 1437 struct shminfo64 shminfo; 1438 err = shmctl_ipc_info(ns, &shminfo); 1439 if (err < 0) 1440 return err; 1441 if (copy_compat_shminfo_to_user(uptr, &shminfo, version)) 1442 err = -EFAULT; 1443 return err; 1444 } 1445 case SHM_INFO: { 1446 struct shm_info shm_info; 1447 err = shmctl_shm_info(ns, &shm_info); 1448 if (err < 0) 1449 return err; 1450 if (put_compat_shm_info(&shm_info, uptr)) 1451 err = -EFAULT; 1452 return err; 1453 } 1454 case IPC_STAT: 1455 case SHM_STAT_ANY: 1456 case SHM_STAT: 1457 err = shmctl_stat(ns, shmid, cmd, &sem64); 1458 if (err < 0) 1459 return err; 1460 if (copy_compat_shmid_to_user(uptr, &sem64, version)) 1461 err = -EFAULT; 1462 return err; 1463 1464 case IPC_SET: 1465 if (copy_compat_shmid_from_user(&sem64, uptr, version)) 1466 return -EFAULT; 1467 fallthrough; 1468 case IPC_RMID: 1469 return shmctl_down(ns, shmid, cmd, &sem64); 1470 case SHM_LOCK: 1471 case SHM_UNLOCK: 1472 return shmctl_do_lock(ns, shmid, cmd); 1473 default: 1474 return -EINVAL; 1475 } 1476 return err; 1477} 1478 1479COMPAT_SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, void __user *, uptr) 1480{ 1481 return compat_ksys_shmctl(shmid, cmd, uptr, IPC_64); 1482} 1483 1484#ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION 1485long compat_ksys_old_shmctl(int shmid, int cmd, void __user *uptr) 1486{ 1487 int version = compat_ipc_parse_version(&cmd); 1488 1489 return compat_ksys_shmctl(shmid, cmd, uptr, version); 1490} 1491 1492COMPAT_SYSCALL_DEFINE3(old_shmctl, int, shmid, int, cmd, void __user *, uptr) 1493{ 1494 return compat_ksys_old_shmctl(shmid, cmd, uptr); 1495} 1496#endif 1497#endif 1498 1499/* 1500 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists. 1501 * 1502 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The 1503 * "raddr" thing points to kernel space, and there has to be a wrapper around 1504 * this. 1505 */ 1506long do_shmat(int shmid, char __user *shmaddr, int shmflg, 1507 ulong *raddr, unsigned long shmlba) 1508{ 1509 struct shmid_kernel *shp; 1510 unsigned long addr = (unsigned long)shmaddr; 1511 unsigned long size; 1512 struct file *file, *base; 1513 int err; 1514 unsigned long flags = MAP_SHARED; 1515 unsigned long prot; 1516 int acc_mode; 1517 struct ipc_namespace *ns; 1518 struct shm_file_data *sfd; 1519 int f_flags; 1520 unsigned long populate = 0; 1521 1522 err = -EINVAL; 1523 if (shmid < 0) 1524 goto out; 1525 1526 if (addr) { 1527 if (addr & (shmlba - 1)) { 1528 if (shmflg & SHM_RND) { 1529 addr &= ~(shmlba - 1); /* round down */ 1530 1531 /* 1532 * Ensure that the round-down is non-nil 1533 * when remapping. This can happen for 1534 * cases when addr < shmlba. 1535 */ 1536 if (!addr && (shmflg & SHM_REMAP)) 1537 goto out; 1538 } else 1539#ifndef __ARCH_FORCE_SHMLBA 1540 if (addr & ~PAGE_MASK) 1541#endif 1542 goto out; 1543 } 1544 1545 flags |= MAP_FIXED; 1546 } else if ((shmflg & SHM_REMAP)) 1547 goto out; 1548 1549 if (shmflg & SHM_RDONLY) { 1550 prot = PROT_READ; 1551 acc_mode = S_IRUGO; 1552 f_flags = O_RDONLY; 1553 } else { 1554 prot = PROT_READ | PROT_WRITE; 1555 acc_mode = S_IRUGO | S_IWUGO; 1556 f_flags = O_RDWR; 1557 } 1558 if (shmflg & SHM_EXEC) { 1559 prot |= PROT_EXEC; 1560 acc_mode |= S_IXUGO; 1561 } 1562 1563 /* 1564 * We cannot rely on the fs check since SYSV IPC does have an 1565 * additional creator id... 1566 */ 1567 ns = current->nsproxy->ipc_ns; 1568 rcu_read_lock(); 1569 shp = shm_obtain_object_check(ns, shmid); 1570 if (IS_ERR(shp)) { 1571 err = PTR_ERR(shp); 1572 goto out_unlock; 1573 } 1574 1575 err = -EACCES; 1576 if (ipcperms(ns, &shp->shm_perm, acc_mode)) 1577 goto out_unlock; 1578 1579 err = security_shm_shmat(&shp->shm_perm, shmaddr, shmflg); 1580 if (err) 1581 goto out_unlock; 1582 1583 ipc_lock_object(&shp->shm_perm); 1584 1585 /* check if shm_destroy() is tearing down shp */ 1586 if (!ipc_valid_object(&shp->shm_perm)) { 1587 ipc_unlock_object(&shp->shm_perm); 1588 err = -EIDRM; 1589 goto out_unlock; 1590 } 1591 1592 /* 1593 * We need to take a reference to the real shm file to prevent the 1594 * pointer from becoming stale in cases where the lifetime of the outer 1595 * file extends beyond that of the shm segment. It's not usually 1596 * possible, but it can happen during remap_file_pages() emulation as 1597 * that unmaps the memory, then does ->mmap() via file reference only. 1598 * We'll deny the ->mmap() if the shm segment was since removed, but to 1599 * detect shm ID reuse we need to compare the file pointers. 1600 */ 1601 base = get_file(shp->shm_file); 1602 shp->shm_nattch++; 1603 size = i_size_read(file_inode(base)); 1604 ipc_unlock_object(&shp->shm_perm); 1605 rcu_read_unlock(); 1606 1607 err = -ENOMEM; 1608 sfd = kzalloc(sizeof(*sfd), GFP_KERNEL); 1609 if (!sfd) { 1610 fput(base); 1611 goto out_nattch; 1612 } 1613 1614 file = alloc_file_clone(base, f_flags, 1615 is_file_hugepages(base) ? 1616 &shm_file_operations_huge : 1617 &shm_file_operations); 1618 err = PTR_ERR(file); 1619 if (IS_ERR(file)) { 1620 kfree(sfd); 1621 fput(base); 1622 goto out_nattch; 1623 } 1624 1625 sfd->id = shp->shm_perm.id; 1626 sfd->ns = get_ipc_ns(ns); 1627 sfd->file = base; 1628 sfd->vm_ops = NULL; 1629 file->private_data = sfd; 1630 1631 err = security_mmap_file(file, prot, flags); 1632 if (err) 1633 goto out_fput; 1634 1635 if (mmap_write_lock_killable(current->mm)) { 1636 err = -EINTR; 1637 goto out_fput; 1638 } 1639 1640 if (addr && !(shmflg & SHM_REMAP)) { 1641 err = -EINVAL; 1642 if (addr + size < addr) 1643 goto invalid; 1644 1645 if (find_vma_intersection(current->mm, addr, addr + size)) 1646 goto invalid; 1647 } 1648 1649 addr = do_mmap(file, addr, size, prot, flags, 0, &populate, NULL); 1650 *raddr = addr; 1651 err = 0; 1652 if (IS_ERR_VALUE(addr)) 1653 err = (long)addr; 1654invalid: 1655 mmap_write_unlock(current->mm); 1656 if (populate) 1657 mm_populate(addr, populate); 1658 1659out_fput: 1660 fput(file); 1661 1662out_nattch: 1663 down_write(&shm_ids(ns).rwsem); 1664 shp = shm_lock(ns, shmid); 1665 shp->shm_nattch--; 1666 1667 if (shm_may_destroy(shp)) 1668 shm_destroy(ns, shp); 1669 else 1670 shm_unlock(shp); 1671 up_write(&shm_ids(ns).rwsem); 1672 return err; 1673 1674out_unlock: 1675 rcu_read_unlock(); 1676out: 1677 return err; 1678} 1679 1680SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg) 1681{ 1682 unsigned long ret; 1683 long err; 1684 1685 err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA); 1686 if (err) 1687 return err; 1688 force_successful_syscall_return(); 1689 return (long)ret; 1690} 1691 1692#ifdef CONFIG_COMPAT 1693 1694#ifndef COMPAT_SHMLBA 1695#define COMPAT_SHMLBA SHMLBA 1696#endif 1697 1698COMPAT_SYSCALL_DEFINE3(shmat, int, shmid, compat_uptr_t, shmaddr, int, shmflg) 1699{ 1700 unsigned long ret; 1701 long err; 1702 1703 err = do_shmat(shmid, compat_ptr(shmaddr), shmflg, &ret, COMPAT_SHMLBA); 1704 if (err) 1705 return err; 1706 force_successful_syscall_return(); 1707 return (long)ret; 1708} 1709#endif 1710 1711/* 1712 * detach and kill segment if marked destroyed. 1713 * The work is done in shm_close. 1714 */ 1715long ksys_shmdt(char __user *shmaddr) 1716{ 1717 struct mm_struct *mm = current->mm; 1718 struct vm_area_struct *vma; 1719 unsigned long addr = (unsigned long)shmaddr; 1720 int retval = -EINVAL; 1721#ifdef CONFIG_MMU 1722 loff_t size = 0; 1723 struct file *file; 1724 struct vm_area_struct *next; 1725#endif 1726 1727 if (addr & ~PAGE_MASK) 1728 return retval; 1729 1730 if (mmap_write_lock_killable(mm)) 1731 return -EINTR; 1732 1733 /* 1734 * This function tries to be smart and unmap shm segments that 1735 * were modified by partial mlock or munmap calls: 1736 * - It first determines the size of the shm segment that should be 1737 * unmapped: It searches for a vma that is backed by shm and that 1738 * started at address shmaddr. It records it's size and then unmaps 1739 * it. 1740 * - Then it unmaps all shm vmas that started at shmaddr and that 1741 * are within the initially determined size and that are from the 1742 * same shm segment from which we determined the size. 1743 * Errors from do_munmap are ignored: the function only fails if 1744 * it's called with invalid parameters or if it's called to unmap 1745 * a part of a vma. Both calls in this function are for full vmas, 1746 * the parameters are directly copied from the vma itself and always 1747 * valid - therefore do_munmap cannot fail. (famous last words?) 1748 */ 1749 /* 1750 * If it had been mremap()'d, the starting address would not 1751 * match the usual checks anyway. So assume all vma's are 1752 * above the starting address given. 1753 */ 1754 vma = find_vma(mm, addr); 1755 1756#ifdef CONFIG_MMU 1757 while (vma) { 1758 next = vma->vm_next; 1759 1760 /* 1761 * Check if the starting address would match, i.e. it's 1762 * a fragment created by mprotect() and/or munmap(), or it 1763 * otherwise it starts at this address with no hassles. 1764 */ 1765 if ((vma->vm_ops == &shm_vm_ops) && 1766 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) { 1767 1768 /* 1769 * Record the file of the shm segment being 1770 * unmapped. With mremap(), someone could place 1771 * page from another segment but with equal offsets 1772 * in the range we are unmapping. 1773 */ 1774 file = vma->vm_file; 1775 size = i_size_read(file_inode(vma->vm_file)); 1776 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL); 1777 /* 1778 * We discovered the size of the shm segment, so 1779 * break out of here and fall through to the next 1780 * loop that uses the size information to stop 1781 * searching for matching vma's. 1782 */ 1783 retval = 0; 1784 vma = next; 1785 break; 1786 } 1787 vma = next; 1788 } 1789 1790 /* 1791 * We need look no further than the maximum address a fragment 1792 * could possibly have landed at. Also cast things to loff_t to 1793 * prevent overflows and make comparisons vs. equal-width types. 1794 */ 1795 size = PAGE_ALIGN(size); 1796 while (vma && (loff_t)(vma->vm_end - addr) <= size) { 1797 next = vma->vm_next; 1798 1799 /* finding a matching vma now does not alter retval */ 1800 if ((vma->vm_ops == &shm_vm_ops) && 1801 ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) && 1802 (vma->vm_file == file)) 1803 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL); 1804 vma = next; 1805 } 1806 1807#else /* CONFIG_MMU */ 1808 /* under NOMMU conditions, the exact address to be destroyed must be 1809 * given 1810 */ 1811 if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) { 1812 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL); 1813 retval = 0; 1814 } 1815 1816#endif 1817 1818 mmap_write_unlock(mm); 1819 return retval; 1820} 1821 1822SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) 1823{ 1824 return ksys_shmdt(shmaddr); 1825} 1826 1827#ifdef CONFIG_PROC_FS 1828static int sysvipc_shm_proc_show(struct seq_file *s, void *it) 1829{ 1830 struct pid_namespace *pid_ns = ipc_seq_pid_ns(s); 1831 struct user_namespace *user_ns = seq_user_ns(s); 1832 struct kern_ipc_perm *ipcp = it; 1833 struct shmid_kernel *shp; 1834 unsigned long rss = 0, swp = 0; 1835 1836 shp = container_of(ipcp, struct shmid_kernel, shm_perm); 1837 shm_add_rss_swap(shp, &rss, &swp); 1838 1839#if BITS_PER_LONG <= 32 1840#define SIZE_SPEC "%10lu" 1841#else 1842#define SIZE_SPEC "%21lu" 1843#endif 1844 1845 seq_printf(s, 1846 "%10d %10d %4o " SIZE_SPEC " %5u %5u " 1847 "%5lu %5u %5u %5u %5u %10llu %10llu %10llu " 1848 SIZE_SPEC " " SIZE_SPEC "\n", 1849 shp->shm_perm.key, 1850 shp->shm_perm.id, 1851 shp->shm_perm.mode, 1852 shp->shm_segsz, 1853 pid_nr_ns(shp->shm_cprid, pid_ns), 1854 pid_nr_ns(shp->shm_lprid, pid_ns), 1855 shp->shm_nattch, 1856 from_kuid_munged(user_ns, shp->shm_perm.uid), 1857 from_kgid_munged(user_ns, shp->shm_perm.gid), 1858 from_kuid_munged(user_ns, shp->shm_perm.cuid), 1859 from_kgid_munged(user_ns, shp->shm_perm.cgid), 1860 shp->shm_atim, 1861 shp->shm_dtim, 1862 shp->shm_ctim, 1863 rss * PAGE_SIZE, 1864 swp * PAGE_SIZE); 1865 1866 return 0; 1867} 1868#endif