dnotify.c (11319B)
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * Directory notifications for Linux. 4 * 5 * Copyright (C) 2000,2001,2002 Stephen Rothwell 6 * 7 * Copyright (C) 2009 Eric Paris <Red Hat Inc> 8 * dnotify was largly rewritten to use the new fsnotify infrastructure 9 */ 10#include <linux/fs.h> 11#include <linux/module.h> 12#include <linux/sched.h> 13#include <linux/sched/signal.h> 14#include <linux/dnotify.h> 15#include <linux/init.h> 16#include <linux/security.h> 17#include <linux/spinlock.h> 18#include <linux/slab.h> 19#include <linux/fdtable.h> 20#include <linux/fsnotify_backend.h> 21 22static int dir_notify_enable __read_mostly = 1; 23#ifdef CONFIG_SYSCTL 24static struct ctl_table dnotify_sysctls[] = { 25 { 26 .procname = "dir-notify-enable", 27 .data = &dir_notify_enable, 28 .maxlen = sizeof(int), 29 .mode = 0644, 30 .proc_handler = proc_dointvec, 31 }, 32 {} 33}; 34static void __init dnotify_sysctl_init(void) 35{ 36 register_sysctl_init("fs", dnotify_sysctls); 37} 38#else 39#define dnotify_sysctl_init() do { } while (0) 40#endif 41 42static struct kmem_cache *dnotify_struct_cache __read_mostly; 43static struct kmem_cache *dnotify_mark_cache __read_mostly; 44static struct fsnotify_group *dnotify_group __read_mostly; 45 46/* 47 * dnotify will attach one of these to each inode (i_fsnotify_marks) which 48 * is being watched by dnotify. If multiple userspace applications are watching 49 * the same directory with dnotify their information is chained in dn 50 */ 51struct dnotify_mark { 52 struct fsnotify_mark fsn_mark; 53 struct dnotify_struct *dn; 54}; 55 56/* 57 * When a process starts or stops watching an inode the set of events which 58 * dnotify cares about for that inode may change. This function runs the 59 * list of everything receiving dnotify events about this directory and calculates 60 * the set of all those events. After it updates what dnotify is interested in 61 * it calls the fsnotify function so it can update the set of all events relevant 62 * to this inode. 63 */ 64static void dnotify_recalc_inode_mask(struct fsnotify_mark *fsn_mark) 65{ 66 __u32 new_mask = 0; 67 struct dnotify_struct *dn; 68 struct dnotify_mark *dn_mark = container_of(fsn_mark, 69 struct dnotify_mark, 70 fsn_mark); 71 72 assert_spin_locked(&fsn_mark->lock); 73 74 for (dn = dn_mark->dn; dn != NULL; dn = dn->dn_next) 75 new_mask |= (dn->dn_mask & ~FS_DN_MULTISHOT); 76 if (fsn_mark->mask == new_mask) 77 return; 78 fsn_mark->mask = new_mask; 79 80 fsnotify_recalc_mask(fsn_mark->connector); 81} 82 83/* 84 * Mains fsnotify call where events are delivered to dnotify. 85 * Find the dnotify mark on the relevant inode, run the list of dnotify structs 86 * on that mark and determine which of them has expressed interest in receiving 87 * events of this type. When found send the correct process and signal and 88 * destroy the dnotify struct if it was not registered to receive multiple 89 * events. 90 */ 91static int dnotify_handle_event(struct fsnotify_mark *inode_mark, u32 mask, 92 struct inode *inode, struct inode *dir, 93 const struct qstr *name, u32 cookie) 94{ 95 struct dnotify_mark *dn_mark; 96 struct dnotify_struct *dn; 97 struct dnotify_struct **prev; 98 struct fown_struct *fown; 99 __u32 test_mask = mask & ~FS_EVENT_ON_CHILD; 100 101 /* not a dir, dnotify doesn't care */ 102 if (!dir && !(mask & FS_ISDIR)) 103 return 0; 104 105 dn_mark = container_of(inode_mark, struct dnotify_mark, fsn_mark); 106 107 spin_lock(&inode_mark->lock); 108 prev = &dn_mark->dn; 109 while ((dn = *prev) != NULL) { 110 if ((dn->dn_mask & test_mask) == 0) { 111 prev = &dn->dn_next; 112 continue; 113 } 114 fown = &dn->dn_filp->f_owner; 115 send_sigio(fown, dn->dn_fd, POLL_MSG); 116 if (dn->dn_mask & FS_DN_MULTISHOT) 117 prev = &dn->dn_next; 118 else { 119 *prev = dn->dn_next; 120 kmem_cache_free(dnotify_struct_cache, dn); 121 dnotify_recalc_inode_mask(inode_mark); 122 } 123 } 124 125 spin_unlock(&inode_mark->lock); 126 127 return 0; 128} 129 130static void dnotify_free_mark(struct fsnotify_mark *fsn_mark) 131{ 132 struct dnotify_mark *dn_mark = container_of(fsn_mark, 133 struct dnotify_mark, 134 fsn_mark); 135 136 BUG_ON(dn_mark->dn); 137 138 kmem_cache_free(dnotify_mark_cache, dn_mark); 139} 140 141static const struct fsnotify_ops dnotify_fsnotify_ops = { 142 .handle_inode_event = dnotify_handle_event, 143 .free_mark = dnotify_free_mark, 144}; 145 146/* 147 * Called every time a file is closed. Looks first for a dnotify mark on the 148 * inode. If one is found run all of the ->dn structures attached to that 149 * mark for one relevant to this process closing the file and remove that 150 * dnotify_struct. If that was the last dnotify_struct also remove the 151 * fsnotify_mark. 152 */ 153void dnotify_flush(struct file *filp, fl_owner_t id) 154{ 155 struct fsnotify_mark *fsn_mark; 156 struct dnotify_mark *dn_mark; 157 struct dnotify_struct *dn; 158 struct dnotify_struct **prev; 159 struct inode *inode; 160 bool free = false; 161 162 inode = file_inode(filp); 163 if (!S_ISDIR(inode->i_mode)) 164 return; 165 166 fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, dnotify_group); 167 if (!fsn_mark) 168 return; 169 dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark); 170 171 fsnotify_group_lock(dnotify_group); 172 173 spin_lock(&fsn_mark->lock); 174 prev = &dn_mark->dn; 175 while ((dn = *prev) != NULL) { 176 if ((dn->dn_owner == id) && (dn->dn_filp == filp)) { 177 *prev = dn->dn_next; 178 kmem_cache_free(dnotify_struct_cache, dn); 179 dnotify_recalc_inode_mask(fsn_mark); 180 break; 181 } 182 prev = &dn->dn_next; 183 } 184 185 spin_unlock(&fsn_mark->lock); 186 187 /* nothing else could have found us thanks to the dnotify_groups 188 mark_mutex */ 189 if (dn_mark->dn == NULL) { 190 fsnotify_detach_mark(fsn_mark); 191 free = true; 192 } 193 194 fsnotify_group_unlock(dnotify_group); 195 196 if (free) 197 fsnotify_free_mark(fsn_mark); 198 fsnotify_put_mark(fsn_mark); 199} 200 201/* this conversion is done only at watch creation */ 202static __u32 convert_arg(unsigned long arg) 203{ 204 __u32 new_mask = FS_EVENT_ON_CHILD; 205 206 if (arg & DN_MULTISHOT) 207 new_mask |= FS_DN_MULTISHOT; 208 if (arg & DN_DELETE) 209 new_mask |= (FS_DELETE | FS_MOVED_FROM); 210 if (arg & DN_MODIFY) 211 new_mask |= FS_MODIFY; 212 if (arg & DN_ACCESS) 213 new_mask |= FS_ACCESS; 214 if (arg & DN_ATTRIB) 215 new_mask |= FS_ATTRIB; 216 if (arg & DN_RENAME) 217 new_mask |= FS_RENAME; 218 if (arg & DN_CREATE) 219 new_mask |= (FS_CREATE | FS_MOVED_TO); 220 221 return new_mask; 222} 223 224/* 225 * If multiple processes watch the same inode with dnotify there is only one 226 * dnotify mark in inode->i_fsnotify_marks but we chain a dnotify_struct 227 * onto that mark. This function either attaches the new dnotify_struct onto 228 * that list, or it |= the mask onto an existing dnofiy_struct. 229 */ 230static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark *dn_mark, 231 fl_owner_t id, int fd, struct file *filp, __u32 mask) 232{ 233 struct dnotify_struct *odn; 234 235 odn = dn_mark->dn; 236 while (odn != NULL) { 237 /* adding more events to existing dnofiy_struct? */ 238 if ((odn->dn_owner == id) && (odn->dn_filp == filp)) { 239 odn->dn_fd = fd; 240 odn->dn_mask |= mask; 241 return -EEXIST; 242 } 243 odn = odn->dn_next; 244 } 245 246 dn->dn_mask = mask; 247 dn->dn_fd = fd; 248 dn->dn_filp = filp; 249 dn->dn_owner = id; 250 dn->dn_next = dn_mark->dn; 251 dn_mark->dn = dn; 252 253 return 0; 254} 255 256/* 257 * When a process calls fcntl to attach a dnotify watch to a directory it ends 258 * up here. Allocate both a mark for fsnotify to add and a dnotify_struct to be 259 * attached to the fsnotify_mark. 260 */ 261int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) 262{ 263 struct dnotify_mark *new_dn_mark, *dn_mark; 264 struct fsnotify_mark *new_fsn_mark, *fsn_mark; 265 struct dnotify_struct *dn; 266 struct inode *inode; 267 fl_owner_t id = current->files; 268 struct file *f; 269 int destroy = 0, error = 0; 270 __u32 mask; 271 272 /* we use these to tell if we need to kfree */ 273 new_fsn_mark = NULL; 274 dn = NULL; 275 276 if (!dir_notify_enable) { 277 error = -EINVAL; 278 goto out_err; 279 } 280 281 /* a 0 mask means we are explicitly removing the watch */ 282 if ((arg & ~DN_MULTISHOT) == 0) { 283 dnotify_flush(filp, id); 284 error = 0; 285 goto out_err; 286 } 287 288 /* dnotify only works on directories */ 289 inode = file_inode(filp); 290 if (!S_ISDIR(inode->i_mode)) { 291 error = -ENOTDIR; 292 goto out_err; 293 } 294 295 /* 296 * convert the userspace DN_* "arg" to the internal FS_* 297 * defined in fsnotify 298 */ 299 mask = convert_arg(arg); 300 301 error = security_path_notify(&filp->f_path, mask, 302 FSNOTIFY_OBJ_TYPE_INODE); 303 if (error) 304 goto out_err; 305 306 /* expect most fcntl to add new rather than augment old */ 307 dn = kmem_cache_alloc(dnotify_struct_cache, GFP_KERNEL); 308 if (!dn) { 309 error = -ENOMEM; 310 goto out_err; 311 } 312 313 /* new fsnotify mark, we expect most fcntl calls to add a new mark */ 314 new_dn_mark = kmem_cache_alloc(dnotify_mark_cache, GFP_KERNEL); 315 if (!new_dn_mark) { 316 error = -ENOMEM; 317 goto out_err; 318 } 319 320 /* set up the new_fsn_mark and new_dn_mark */ 321 new_fsn_mark = &new_dn_mark->fsn_mark; 322 fsnotify_init_mark(new_fsn_mark, dnotify_group); 323 new_fsn_mark->mask = mask; 324 new_dn_mark->dn = NULL; 325 326 /* this is needed to prevent the fcntl/close race described below */ 327 fsnotify_group_lock(dnotify_group); 328 329 /* add the new_fsn_mark or find an old one. */ 330 fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, dnotify_group); 331 if (fsn_mark) { 332 dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark); 333 spin_lock(&fsn_mark->lock); 334 } else { 335 error = fsnotify_add_inode_mark_locked(new_fsn_mark, inode, 0); 336 if (error) { 337 fsnotify_group_unlock(dnotify_group); 338 goto out_err; 339 } 340 spin_lock(&new_fsn_mark->lock); 341 fsn_mark = new_fsn_mark; 342 dn_mark = new_dn_mark; 343 /* we used new_fsn_mark, so don't free it */ 344 new_fsn_mark = NULL; 345 } 346 347 rcu_read_lock(); 348 f = lookup_fd_rcu(fd); 349 rcu_read_unlock(); 350 351 /* if (f != filp) means that we lost a race and another task/thread 352 * actually closed the fd we are still playing with before we grabbed 353 * the dnotify_groups mark_mutex and fsn_mark->lock. Since closing the 354 * fd is the only time we clean up the marks we need to get our mark 355 * off the list. */ 356 if (f != filp) { 357 /* if we added ourselves, shoot ourselves, it's possible that 358 * the flush actually did shoot this fsn_mark. That's fine too 359 * since multiple calls to destroy_mark is perfectly safe, if 360 * we found a dn_mark already attached to the inode, just sod 361 * off silently as the flush at close time dealt with it. 362 */ 363 if (dn_mark == new_dn_mark) 364 destroy = 1; 365 error = 0; 366 goto out; 367 } 368 369 __f_setown(filp, task_pid(current), PIDTYPE_TGID, 0); 370 371 error = attach_dn(dn, dn_mark, id, fd, filp, mask); 372 /* !error means that we attached the dn to the dn_mark, so don't free it */ 373 if (!error) 374 dn = NULL; 375 /* -EEXIST means that we didn't add this new dn and used an old one. 376 * that isn't an error (and the unused dn should be freed) */ 377 else if (error == -EEXIST) 378 error = 0; 379 380 dnotify_recalc_inode_mask(fsn_mark); 381out: 382 spin_unlock(&fsn_mark->lock); 383 384 if (destroy) 385 fsnotify_detach_mark(fsn_mark); 386 fsnotify_group_unlock(dnotify_group); 387 if (destroy) 388 fsnotify_free_mark(fsn_mark); 389 fsnotify_put_mark(fsn_mark); 390out_err: 391 if (new_fsn_mark) 392 fsnotify_put_mark(new_fsn_mark); 393 if (dn) 394 kmem_cache_free(dnotify_struct_cache, dn); 395 return error; 396} 397 398static int __init dnotify_init(void) 399{ 400 dnotify_struct_cache = KMEM_CACHE(dnotify_struct, 401 SLAB_PANIC|SLAB_ACCOUNT); 402 dnotify_mark_cache = KMEM_CACHE(dnotify_mark, SLAB_PANIC|SLAB_ACCOUNT); 403 404 dnotify_group = fsnotify_alloc_group(&dnotify_fsnotify_ops, 405 FSNOTIFY_GROUP_NOFS); 406 if (IS_ERR(dnotify_group)) 407 panic("unable to allocate fsnotify group for dnotify\n"); 408 dnotify_sysctl_init(); 409 return 0; 410} 411 412module_init(dnotify_init)