inotify_user.c (22245B)
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * fs/inotify_user.c - inotify support for userspace 4 * 5 * Authors: 6 * John McCutchan <ttb@tentacle.dhs.org> 7 * Robert Love <rml@novell.com> 8 * 9 * Copyright (C) 2005 John McCutchan 10 * Copyright 2006 Hewlett-Packard Development Company, L.P. 11 * 12 * Copyright (C) 2009 Eric Paris <Red Hat Inc> 13 * inotify was largely rewriten to make use of the fsnotify infrastructure 14 */ 15 16#include <linux/file.h> 17#include <linux/fs.h> /* struct inode */ 18#include <linux/fsnotify_backend.h> 19#include <linux/idr.h> 20#include <linux/init.h> /* fs_initcall */ 21#include <linux/inotify.h> 22#include <linux/kernel.h> /* roundup() */ 23#include <linux/namei.h> /* LOOKUP_FOLLOW */ 24#include <linux/sched/signal.h> 25#include <linux/slab.h> /* struct kmem_cache */ 26#include <linux/syscalls.h> 27#include <linux/types.h> 28#include <linux/anon_inodes.h> 29#include <linux/uaccess.h> 30#include <linux/poll.h> 31#include <linux/wait.h> 32#include <linux/memcontrol.h> 33#include <linux/security.h> 34 35#include "inotify.h" 36#include "../fdinfo.h" 37 38#include <asm/ioctls.h> 39 40/* 41 * An inotify watch requires allocating an inotify_inode_mark structure as 42 * well as pinning the watched inode. Doubling the size of a VFS inode 43 * should be more than enough to cover the additional filesystem inode 44 * size increase. 45 */ 46#define INOTIFY_WATCH_COST (sizeof(struct inotify_inode_mark) + \ 47 2 * sizeof(struct inode)) 48 49/* configurable via /proc/sys/fs/inotify/ */ 50static int inotify_max_queued_events __read_mostly; 51 52struct kmem_cache *inotify_inode_mark_cachep __read_mostly; 53 54#ifdef CONFIG_SYSCTL 55 56#include <linux/sysctl.h> 57 58static long it_zero = 0; 59static long it_int_max = INT_MAX; 60 61static struct ctl_table inotify_table[] = { 62 { 63 .procname = "max_user_instances", 64 .data = &init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES], 65 .maxlen = sizeof(long), 66 .mode = 0644, 67 .proc_handler = proc_doulongvec_minmax, 68 .extra1 = &it_zero, 69 .extra2 = &it_int_max, 70 }, 71 { 72 .procname = "max_user_watches", 73 .data = &init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES], 74 .maxlen = sizeof(long), 75 .mode = 0644, 76 .proc_handler = proc_doulongvec_minmax, 77 .extra1 = &it_zero, 78 .extra2 = &it_int_max, 79 }, 80 { 81 .procname = "max_queued_events", 82 .data = &inotify_max_queued_events, 83 .maxlen = sizeof(int), 84 .mode = 0644, 85 .proc_handler = proc_dointvec_minmax, 86 .extra1 = SYSCTL_ZERO 87 }, 88 { } 89}; 90 91static void __init inotify_sysctls_init(void) 92{ 93 register_sysctl("fs/inotify", inotify_table); 94} 95 96#else 97#define inotify_sysctls_init() do { } while (0) 98#endif /* CONFIG_SYSCTL */ 99 100static inline __u32 inotify_arg_to_mask(struct inode *inode, u32 arg) 101{ 102 __u32 mask; 103 104 /* 105 * Everything should receive events when the inode is unmounted. 106 * All directories care about children. 107 */ 108 mask = (FS_UNMOUNT); 109 if (S_ISDIR(inode->i_mode)) 110 mask |= FS_EVENT_ON_CHILD; 111 112 /* mask off the flags used to open the fd */ 113 mask |= (arg & INOTIFY_USER_MASK); 114 115 return mask; 116} 117 118#define INOTIFY_MARK_FLAGS \ 119 (FSNOTIFY_MARK_FLAG_EXCL_UNLINK | FSNOTIFY_MARK_FLAG_IN_ONESHOT) 120 121static inline unsigned int inotify_arg_to_flags(u32 arg) 122{ 123 unsigned int flags = 0; 124 125 if (arg & IN_EXCL_UNLINK) 126 flags |= FSNOTIFY_MARK_FLAG_EXCL_UNLINK; 127 if (arg & IN_ONESHOT) 128 flags |= FSNOTIFY_MARK_FLAG_IN_ONESHOT; 129 130 return flags; 131} 132 133static inline u32 inotify_mask_to_arg(__u32 mask) 134{ 135 return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED | 136 IN_Q_OVERFLOW); 137} 138 139/* intofiy userspace file descriptor functions */ 140static __poll_t inotify_poll(struct file *file, poll_table *wait) 141{ 142 struct fsnotify_group *group = file->private_data; 143 __poll_t ret = 0; 144 145 poll_wait(file, &group->notification_waitq, wait); 146 spin_lock(&group->notification_lock); 147 if (!fsnotify_notify_queue_is_empty(group)) 148 ret = EPOLLIN | EPOLLRDNORM; 149 spin_unlock(&group->notification_lock); 150 151 return ret; 152} 153 154static int round_event_name_len(struct fsnotify_event *fsn_event) 155{ 156 struct inotify_event_info *event; 157 158 event = INOTIFY_E(fsn_event); 159 if (!event->name_len) 160 return 0; 161 return roundup(event->name_len + 1, sizeof(struct inotify_event)); 162} 163 164/* 165 * Get an inotify_kernel_event if one exists and is small 166 * enough to fit in "count". Return an error pointer if 167 * not large enough. 168 * 169 * Called with the group->notification_lock held. 170 */ 171static struct fsnotify_event *get_one_event(struct fsnotify_group *group, 172 size_t count) 173{ 174 size_t event_size = sizeof(struct inotify_event); 175 struct fsnotify_event *event; 176 177 event = fsnotify_peek_first_event(group); 178 if (!event) 179 return NULL; 180 181 pr_debug("%s: group=%p event=%p\n", __func__, group, event); 182 183 event_size += round_event_name_len(event); 184 if (event_size > count) 185 return ERR_PTR(-EINVAL); 186 187 /* held the notification_lock the whole time, so this is the 188 * same event we peeked above */ 189 fsnotify_remove_first_event(group); 190 191 return event; 192} 193 194/* 195 * Copy an event to user space, returning how much we copied. 196 * 197 * We already checked that the event size is smaller than the 198 * buffer we had in "get_one_event()" above. 199 */ 200static ssize_t copy_event_to_user(struct fsnotify_group *group, 201 struct fsnotify_event *fsn_event, 202 char __user *buf) 203{ 204 struct inotify_event inotify_event; 205 struct inotify_event_info *event; 206 size_t event_size = sizeof(struct inotify_event); 207 size_t name_len; 208 size_t pad_name_len; 209 210 pr_debug("%s: group=%p event=%p\n", __func__, group, fsn_event); 211 212 event = INOTIFY_E(fsn_event); 213 name_len = event->name_len; 214 /* 215 * round up name length so it is a multiple of event_size 216 * plus an extra byte for the terminating '\0'. 217 */ 218 pad_name_len = round_event_name_len(fsn_event); 219 inotify_event.len = pad_name_len; 220 inotify_event.mask = inotify_mask_to_arg(event->mask); 221 inotify_event.wd = event->wd; 222 inotify_event.cookie = event->sync_cookie; 223 224 /* send the main event */ 225 if (copy_to_user(buf, &inotify_event, event_size)) 226 return -EFAULT; 227 228 buf += event_size; 229 230 /* 231 * fsnotify only stores the pathname, so here we have to send the pathname 232 * and then pad that pathname out to a multiple of sizeof(inotify_event) 233 * with zeros. 234 */ 235 if (pad_name_len) { 236 /* copy the path name */ 237 if (copy_to_user(buf, event->name, name_len)) 238 return -EFAULT; 239 buf += name_len; 240 241 /* fill userspace with 0's */ 242 if (clear_user(buf, pad_name_len - name_len)) 243 return -EFAULT; 244 event_size += pad_name_len; 245 } 246 247 return event_size; 248} 249 250static ssize_t inotify_read(struct file *file, char __user *buf, 251 size_t count, loff_t *pos) 252{ 253 struct fsnotify_group *group; 254 struct fsnotify_event *kevent; 255 char __user *start; 256 int ret; 257 DEFINE_WAIT_FUNC(wait, woken_wake_function); 258 259 start = buf; 260 group = file->private_data; 261 262 add_wait_queue(&group->notification_waitq, &wait); 263 while (1) { 264 spin_lock(&group->notification_lock); 265 kevent = get_one_event(group, count); 266 spin_unlock(&group->notification_lock); 267 268 pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent); 269 270 if (kevent) { 271 ret = PTR_ERR(kevent); 272 if (IS_ERR(kevent)) 273 break; 274 ret = copy_event_to_user(group, kevent, buf); 275 fsnotify_destroy_event(group, kevent); 276 if (ret < 0) 277 break; 278 buf += ret; 279 count -= ret; 280 continue; 281 } 282 283 ret = -EAGAIN; 284 if (file->f_flags & O_NONBLOCK) 285 break; 286 ret = -ERESTARTSYS; 287 if (signal_pending(current)) 288 break; 289 290 if (start != buf) 291 break; 292 293 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 294 } 295 remove_wait_queue(&group->notification_waitq, &wait); 296 297 if (start != buf && ret != -EFAULT) 298 ret = buf - start; 299 return ret; 300} 301 302static int inotify_release(struct inode *ignored, struct file *file) 303{ 304 struct fsnotify_group *group = file->private_data; 305 306 pr_debug("%s: group=%p\n", __func__, group); 307 308 /* free this group, matching get was inotify_init->fsnotify_obtain_group */ 309 fsnotify_destroy_group(group); 310 311 return 0; 312} 313 314static long inotify_ioctl(struct file *file, unsigned int cmd, 315 unsigned long arg) 316{ 317 struct fsnotify_group *group; 318 struct fsnotify_event *fsn_event; 319 void __user *p; 320 int ret = -ENOTTY; 321 size_t send_len = 0; 322 323 group = file->private_data; 324 p = (void __user *) arg; 325 326 pr_debug("%s: group=%p cmd=%u\n", __func__, group, cmd); 327 328 switch (cmd) { 329 case FIONREAD: 330 spin_lock(&group->notification_lock); 331 list_for_each_entry(fsn_event, &group->notification_list, 332 list) { 333 send_len += sizeof(struct inotify_event); 334 send_len += round_event_name_len(fsn_event); 335 } 336 spin_unlock(&group->notification_lock); 337 ret = put_user(send_len, (int __user *) p); 338 break; 339#ifdef CONFIG_CHECKPOINT_RESTORE 340 case INOTIFY_IOC_SETNEXTWD: 341 ret = -EINVAL; 342 if (arg >= 1 && arg <= INT_MAX) { 343 struct inotify_group_private_data *data; 344 345 data = &group->inotify_data; 346 spin_lock(&data->idr_lock); 347 idr_set_cursor(&data->idr, (unsigned int)arg); 348 spin_unlock(&data->idr_lock); 349 ret = 0; 350 } 351 break; 352#endif /* CONFIG_CHECKPOINT_RESTORE */ 353 } 354 355 return ret; 356} 357 358static const struct file_operations inotify_fops = { 359 .show_fdinfo = inotify_show_fdinfo, 360 .poll = inotify_poll, 361 .read = inotify_read, 362 .fasync = fsnotify_fasync, 363 .release = inotify_release, 364 .unlocked_ioctl = inotify_ioctl, 365 .compat_ioctl = inotify_ioctl, 366 .llseek = noop_llseek, 367}; 368 369 370/* 371 * find_inode - resolve a user-given path to a specific inode 372 */ 373static int inotify_find_inode(const char __user *dirname, struct path *path, 374 unsigned int flags, __u64 mask) 375{ 376 int error; 377 378 error = user_path_at(AT_FDCWD, dirname, flags, path); 379 if (error) 380 return error; 381 /* you can only watch an inode if you have read permissions on it */ 382 error = path_permission(path, MAY_READ); 383 if (error) { 384 path_put(path); 385 return error; 386 } 387 error = security_path_notify(path, mask, 388 FSNOTIFY_OBJ_TYPE_INODE); 389 if (error) 390 path_put(path); 391 392 return error; 393} 394 395static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock, 396 struct inotify_inode_mark *i_mark) 397{ 398 int ret; 399 400 idr_preload(GFP_KERNEL); 401 spin_lock(idr_lock); 402 403 ret = idr_alloc_cyclic(idr, i_mark, 1, 0, GFP_NOWAIT); 404 if (ret >= 0) { 405 /* we added the mark to the idr, take a reference */ 406 i_mark->wd = ret; 407 fsnotify_get_mark(&i_mark->fsn_mark); 408 } 409 410 spin_unlock(idr_lock); 411 idr_preload_end(); 412 return ret < 0 ? ret : 0; 413} 414 415static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group, 416 int wd) 417{ 418 struct idr *idr = &group->inotify_data.idr; 419 spinlock_t *idr_lock = &group->inotify_data.idr_lock; 420 struct inotify_inode_mark *i_mark; 421 422 assert_spin_locked(idr_lock); 423 424 i_mark = idr_find(idr, wd); 425 if (i_mark) { 426 struct fsnotify_mark *fsn_mark = &i_mark->fsn_mark; 427 428 fsnotify_get_mark(fsn_mark); 429 /* One ref for being in the idr, one ref we just took */ 430 BUG_ON(refcount_read(&fsn_mark->refcnt) < 2); 431 } 432 433 return i_mark; 434} 435 436static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group, 437 int wd) 438{ 439 struct inotify_inode_mark *i_mark; 440 spinlock_t *idr_lock = &group->inotify_data.idr_lock; 441 442 spin_lock(idr_lock); 443 i_mark = inotify_idr_find_locked(group, wd); 444 spin_unlock(idr_lock); 445 446 return i_mark; 447} 448 449/* 450 * Remove the mark from the idr (if present) and drop the reference 451 * on the mark because it was in the idr. 452 */ 453static void inotify_remove_from_idr(struct fsnotify_group *group, 454 struct inotify_inode_mark *i_mark) 455{ 456 struct idr *idr = &group->inotify_data.idr; 457 spinlock_t *idr_lock = &group->inotify_data.idr_lock; 458 struct inotify_inode_mark *found_i_mark = NULL; 459 int wd; 460 461 spin_lock(idr_lock); 462 wd = i_mark->wd; 463 464 /* 465 * does this i_mark think it is in the idr? we shouldn't get called 466 * if it wasn't.... 467 */ 468 if (wd == -1) { 469 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n", 470 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group); 471 goto out; 472 } 473 474 /* Lets look in the idr to see if we find it */ 475 found_i_mark = inotify_idr_find_locked(group, wd); 476 if (unlikely(!found_i_mark)) { 477 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n", 478 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group); 479 goto out; 480 } 481 482 /* 483 * We found an mark in the idr at the right wd, but it's 484 * not the mark we were told to remove. eparis seriously 485 * fucked up somewhere. 486 */ 487 if (unlikely(found_i_mark != i_mark)) { 488 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p " 489 "found_i_mark=%p found_i_mark->wd=%d " 490 "found_i_mark->group=%p\n", __func__, i_mark, 491 i_mark->wd, i_mark->fsn_mark.group, found_i_mark, 492 found_i_mark->wd, found_i_mark->fsn_mark.group); 493 goto out; 494 } 495 496 /* 497 * One ref for being in the idr 498 * one ref grabbed by inotify_idr_find 499 */ 500 if (unlikely(refcount_read(&i_mark->fsn_mark.refcnt) < 2)) { 501 printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n", 502 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group); 503 /* we can't really recover with bad ref cnting.. */ 504 BUG(); 505 } 506 507 idr_remove(idr, wd); 508 /* Removed from the idr, drop that ref. */ 509 fsnotify_put_mark(&i_mark->fsn_mark); 510out: 511 i_mark->wd = -1; 512 spin_unlock(idr_lock); 513 /* match the ref taken by inotify_idr_find_locked() */ 514 if (found_i_mark) 515 fsnotify_put_mark(&found_i_mark->fsn_mark); 516} 517 518/* 519 * Send IN_IGNORED for this wd, remove this wd from the idr. 520 */ 521void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark, 522 struct fsnotify_group *group) 523{ 524 struct inotify_inode_mark *i_mark; 525 526 /* Queue ignore event for the watch */ 527 inotify_handle_inode_event(fsn_mark, FS_IN_IGNORED, NULL, NULL, NULL, 528 0); 529 530 i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark); 531 /* remove this mark from the idr */ 532 inotify_remove_from_idr(group, i_mark); 533 534 dec_inotify_watches(group->inotify_data.ucounts); 535} 536 537static int inotify_update_existing_watch(struct fsnotify_group *group, 538 struct inode *inode, 539 u32 arg) 540{ 541 struct fsnotify_mark *fsn_mark; 542 struct inotify_inode_mark *i_mark; 543 __u32 old_mask, new_mask; 544 int replace = !(arg & IN_MASK_ADD); 545 int create = (arg & IN_MASK_CREATE); 546 int ret; 547 548 fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group); 549 if (!fsn_mark) 550 return -ENOENT; 551 else if (create) { 552 ret = -EEXIST; 553 goto out; 554 } 555 556 i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark); 557 558 spin_lock(&fsn_mark->lock); 559 old_mask = fsn_mark->mask; 560 if (replace) { 561 fsn_mark->mask = 0; 562 fsn_mark->flags &= ~INOTIFY_MARK_FLAGS; 563 } 564 fsn_mark->mask |= inotify_arg_to_mask(inode, arg); 565 fsn_mark->flags |= inotify_arg_to_flags(arg); 566 new_mask = fsn_mark->mask; 567 spin_unlock(&fsn_mark->lock); 568 569 if (old_mask != new_mask) { 570 /* more bits in old than in new? */ 571 int dropped = (old_mask & ~new_mask); 572 /* more bits in this fsn_mark than the inode's mask? */ 573 int do_inode = (new_mask & ~inode->i_fsnotify_mask); 574 575 /* update the inode with this new fsn_mark */ 576 if (dropped || do_inode) 577 fsnotify_recalc_mask(inode->i_fsnotify_marks); 578 579 } 580 581 /* return the wd */ 582 ret = i_mark->wd; 583 584out: 585 /* match the get from fsnotify_find_mark() */ 586 fsnotify_put_mark(fsn_mark); 587 588 return ret; 589} 590 591static int inotify_new_watch(struct fsnotify_group *group, 592 struct inode *inode, 593 u32 arg) 594{ 595 struct inotify_inode_mark *tmp_i_mark; 596 int ret; 597 struct idr *idr = &group->inotify_data.idr; 598 spinlock_t *idr_lock = &group->inotify_data.idr_lock; 599 600 tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); 601 if (unlikely(!tmp_i_mark)) 602 return -ENOMEM; 603 604 fsnotify_init_mark(&tmp_i_mark->fsn_mark, group); 605 tmp_i_mark->fsn_mark.mask = inotify_arg_to_mask(inode, arg); 606 tmp_i_mark->fsn_mark.flags = inotify_arg_to_flags(arg); 607 tmp_i_mark->wd = -1; 608 609 ret = inotify_add_to_idr(idr, idr_lock, tmp_i_mark); 610 if (ret) 611 goto out_err; 612 613 /* increment the number of watches the user has */ 614 if (!inc_inotify_watches(group->inotify_data.ucounts)) { 615 inotify_remove_from_idr(group, tmp_i_mark); 616 ret = -ENOSPC; 617 goto out_err; 618 } 619 620 /* we are on the idr, now get on the inode */ 621 ret = fsnotify_add_inode_mark_locked(&tmp_i_mark->fsn_mark, inode, 0); 622 if (ret) { 623 /* we failed to get on the inode, get off the idr */ 624 inotify_remove_from_idr(group, tmp_i_mark); 625 goto out_err; 626 } 627 628 629 /* return the watch descriptor for this new mark */ 630 ret = tmp_i_mark->wd; 631 632out_err: 633 /* match the ref from fsnotify_init_mark() */ 634 fsnotify_put_mark(&tmp_i_mark->fsn_mark); 635 636 return ret; 637} 638 639static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg) 640{ 641 int ret = 0; 642 643 fsnotify_group_lock(group); 644 /* try to update and existing watch with the new arg */ 645 ret = inotify_update_existing_watch(group, inode, arg); 646 /* no mark present, try to add a new one */ 647 if (ret == -ENOENT) 648 ret = inotify_new_watch(group, inode, arg); 649 fsnotify_group_unlock(group); 650 651 return ret; 652} 653 654static struct fsnotify_group *inotify_new_group(unsigned int max_events) 655{ 656 struct fsnotify_group *group; 657 struct inotify_event_info *oevent; 658 659 group = fsnotify_alloc_group(&inotify_fsnotify_ops, 660 FSNOTIFY_GROUP_USER); 661 if (IS_ERR(group)) 662 return group; 663 664 oevent = kmalloc(sizeof(struct inotify_event_info), GFP_KERNEL_ACCOUNT); 665 if (unlikely(!oevent)) { 666 fsnotify_destroy_group(group); 667 return ERR_PTR(-ENOMEM); 668 } 669 group->overflow_event = &oevent->fse; 670 fsnotify_init_event(group->overflow_event); 671 oevent->mask = FS_Q_OVERFLOW; 672 oevent->wd = -1; 673 oevent->sync_cookie = 0; 674 oevent->name_len = 0; 675 676 group->max_events = max_events; 677 group->memcg = get_mem_cgroup_from_mm(current->mm); 678 679 spin_lock_init(&group->inotify_data.idr_lock); 680 idr_init(&group->inotify_data.idr); 681 group->inotify_data.ucounts = inc_ucount(current_user_ns(), 682 current_euid(), 683 UCOUNT_INOTIFY_INSTANCES); 684 685 if (!group->inotify_data.ucounts) { 686 fsnotify_destroy_group(group); 687 return ERR_PTR(-EMFILE); 688 } 689 690 return group; 691} 692 693 694/* inotify syscalls */ 695static int do_inotify_init(int flags) 696{ 697 struct fsnotify_group *group; 698 int ret; 699 700 /* Check the IN_* constants for consistency. */ 701 BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC); 702 BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK); 703 704 if (flags & ~(IN_CLOEXEC | IN_NONBLOCK)) 705 return -EINVAL; 706 707 /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */ 708 group = inotify_new_group(inotify_max_queued_events); 709 if (IS_ERR(group)) 710 return PTR_ERR(group); 711 712 ret = anon_inode_getfd("inotify", &inotify_fops, group, 713 O_RDONLY | flags); 714 if (ret < 0) 715 fsnotify_destroy_group(group); 716 717 return ret; 718} 719 720SYSCALL_DEFINE1(inotify_init1, int, flags) 721{ 722 return do_inotify_init(flags); 723} 724 725SYSCALL_DEFINE0(inotify_init) 726{ 727 return do_inotify_init(0); 728} 729 730SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname, 731 u32, mask) 732{ 733 struct fsnotify_group *group; 734 struct inode *inode; 735 struct path path; 736 struct fd f; 737 int ret; 738 unsigned flags = 0; 739 740 /* 741 * We share a lot of code with fs/dnotify. We also share 742 * the bit layout between inotify's IN_* and the fsnotify 743 * FS_*. This check ensures that only the inotify IN_* 744 * bits get passed in and set in watches/events. 745 */ 746 if (unlikely(mask & ~ALL_INOTIFY_BITS)) 747 return -EINVAL; 748 /* 749 * Require at least one valid bit set in the mask. 750 * Without _something_ set, we would have no events to 751 * watch for. 752 */ 753 if (unlikely(!(mask & ALL_INOTIFY_BITS))) 754 return -EINVAL; 755 756 f = fdget(fd); 757 if (unlikely(!f.file)) 758 return -EBADF; 759 760 /* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */ 761 if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE))) { 762 ret = -EINVAL; 763 goto fput_and_out; 764 } 765 766 /* verify that this is indeed an inotify instance */ 767 if (unlikely(f.file->f_op != &inotify_fops)) { 768 ret = -EINVAL; 769 goto fput_and_out; 770 } 771 772 if (!(mask & IN_DONT_FOLLOW)) 773 flags |= LOOKUP_FOLLOW; 774 if (mask & IN_ONLYDIR) 775 flags |= LOOKUP_DIRECTORY; 776 777 ret = inotify_find_inode(pathname, &path, flags, 778 (mask & IN_ALL_EVENTS)); 779 if (ret) 780 goto fput_and_out; 781 782 /* inode held in place by reference to path; group by fget on fd */ 783 inode = path.dentry->d_inode; 784 group = f.file->private_data; 785 786 /* create/update an inode mark */ 787 ret = inotify_update_watch(group, inode, mask); 788 path_put(&path); 789fput_and_out: 790 fdput(f); 791 return ret; 792} 793 794SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd) 795{ 796 struct fsnotify_group *group; 797 struct inotify_inode_mark *i_mark; 798 struct fd f; 799 int ret = -EINVAL; 800 801 f = fdget(fd); 802 if (unlikely(!f.file)) 803 return -EBADF; 804 805 /* verify that this is indeed an inotify instance */ 806 if (unlikely(f.file->f_op != &inotify_fops)) 807 goto out; 808 809 group = f.file->private_data; 810 811 i_mark = inotify_idr_find(group, wd); 812 if (unlikely(!i_mark)) 813 goto out; 814 815 ret = 0; 816 817 fsnotify_destroy_mark(&i_mark->fsn_mark, group); 818 819 /* match ref taken by inotify_idr_find */ 820 fsnotify_put_mark(&i_mark->fsn_mark); 821 822out: 823 fdput(f); 824 return ret; 825} 826 827/* 828 * inotify_user_setup - Our initialization function. Note that we cannot return 829 * error because we have compiled-in VFS hooks. So an (unlikely) failure here 830 * must result in panic(). 831 */ 832static int __init inotify_user_setup(void) 833{ 834 unsigned long watches_max; 835 struct sysinfo si; 836 837 si_meminfo(&si); 838 /* 839 * Allow up to 1% of addressable memory to be allocated for inotify 840 * watches (per user) limited to the range [8192, 1048576]. 841 */ 842 watches_max = (((si.totalram - si.totalhigh) / 100) << PAGE_SHIFT) / 843 INOTIFY_WATCH_COST; 844 watches_max = clamp(watches_max, 8192UL, 1048576UL); 845 846 BUILD_BUG_ON(IN_ACCESS != FS_ACCESS); 847 BUILD_BUG_ON(IN_MODIFY != FS_MODIFY); 848 BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB); 849 BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE); 850 BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE); 851 BUILD_BUG_ON(IN_OPEN != FS_OPEN); 852 BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM); 853 BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO); 854 BUILD_BUG_ON(IN_CREATE != FS_CREATE); 855 BUILD_BUG_ON(IN_DELETE != FS_DELETE); 856 BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF); 857 BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF); 858 BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT); 859 BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW); 860 BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED); 861 BUILD_BUG_ON(IN_ISDIR != FS_ISDIR); 862 863 BUILD_BUG_ON(HWEIGHT32(ALL_INOTIFY_BITS) != 22); 864 865 inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark, 866 SLAB_PANIC|SLAB_ACCOUNT); 867 868 inotify_max_queued_events = 16384; 869 init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES] = 128; 870 init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES] = watches_max; 871 inotify_sysctls_init(); 872 873 return 0; 874} 875fs_initcall(inotify_user_setup);