diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-01-29 18:53:37 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-01-29 18:53:37 -0800 |
| commit | 896f8d23d0cb5889021d66eab6107e97109c5459 (patch) | |
| tree | f8ceade3203209679ece27d3cace410178dfc99c /include/linux | |
| parent | 33c84e89abe4a92ab699c33029bd54269d574782 (diff) | |
| parent | 3e4827b05d2ac2d377ed136a52829ec46787bf4b (diff) | |
| download | cachepc-linux-896f8d23d0cb5889021d66eab6107e97109c5459.tar.gz cachepc-linux-896f8d23d0cb5889021d66eab6107e97109c5459.zip | |
Merge tag 'for-5.6/io_uring-vfs-2020-01-29' of git://git.kernel.dk/linux-block
Pull io_uring updates from Jens Axboe:
- Support for various new opcodes (fallocate, openat, close, statx,
fadvise, madvise, openat2, non-vectored read/write, send/recv, and
epoll_ctl)
- Faster ring quiesce for fileset updates
- Optimizations for overflow condition checking
- Support for max-sized clamping
- Support for probing what opcodes are supported
- Support for io-wq backend sharing between "sibling" rings
- Support for registering personalities
- Lots of little fixes and improvements
* tag 'for-5.6/io_uring-vfs-2020-01-29' of git://git.kernel.dk/linux-block: (64 commits)
io_uring: add support for epoll_ctl(2)
eventpoll: support non-blocking do_epoll_ctl() calls
eventpoll: abstract out epoll_ctl() handler
io_uring: fix linked command file table usage
io_uring: support using a registered personality for commands
io_uring: allow registering credentials
io_uring: add io-wq workqueue sharing
io-wq: allow grabbing existing io-wq
io_uring/io-wq: don't use static creds/mm assignments
io-wq: make the io_wq ref counted
io_uring: fix refcounting with batched allocations at OOM
io_uring: add comment for drain_next
io_uring: don't attempt to copy iovec for READ/WRITE
io_uring: honor IOSQE_ASYNC for linked reqs
io_uring: prep req when do IOSQE_ASYNC
io_uring: use labeled array init in io_op_defs
io_uring: optimise sqe-to-req flags translation
io_uring: remove REQ_F_IO_DRAINED
io_uring: file switch work needs to get flushed on exit
io_uring: hide uring_fd in ctx
...
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/eventpoll.h | 9 | ||||
| -rw-r--r-- | include/linux/mm.h | 1 | ||||
| -rw-r--r-- | include/linux/percpu-refcount.h | 26 |
3 files changed, 31 insertions, 5 deletions
diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h index bc6d79b00c4e..8f000fada5a4 100644 --- a/include/linux/eventpoll.h +++ b/include/linux/eventpoll.h @@ -61,6 +61,15 @@ static inline void eventpoll_release(struct file *file) eventpoll_release_file(file); } +int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds, + bool nonblock); + +/* Tells if the epoll_ctl(2) operation needs an event copy from userspace */ +static inline int ep_op_has_event(int op) +{ + return op != EPOLL_CTL_DEL; +} + #else static inline void eventpoll_init_file(struct file *file) {} diff --git a/include/linux/mm.h b/include/linux/mm.h index 67f8451b9a12..1233bf45164d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2323,6 +2323,7 @@ extern int __do_munmap(struct mm_struct *, unsigned long, size_t, struct list_head *uf, bool downgrade); extern int do_munmap(struct mm_struct *, unsigned long, size_t, struct list_head *uf); +extern int do_madvise(unsigned long start, size_t len_in, int behavior); static inline unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 390031e816dc..22d9d183950d 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -210,15 +210,17 @@ static inline void percpu_ref_get(struct percpu_ref *ref) } /** - * percpu_ref_tryget - try to increment a percpu refcount + * percpu_ref_tryget_many - try to increment a percpu refcount * @ref: percpu_ref to try-get + * @nr: number of references to get * - * Increment a percpu refcount unless its count already reached zero. + * Increment a percpu refcount by @nr unless its count already reached zero. * Returns %true on success; %false on failure. * * This function is safe to call as long as @ref is between init and exit. */ -static inline bool percpu_ref_tryget(struct percpu_ref *ref) +static inline bool percpu_ref_tryget_many(struct percpu_ref *ref, + unsigned long nr) { unsigned long __percpu *percpu_count; bool ret; @@ -226,10 +228,10 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref) rcu_read_lock(); if (__ref_is_percpu(ref, &percpu_count)) { - this_cpu_inc(*percpu_count); + this_cpu_add(*percpu_count, nr); ret = true; } else { - ret = atomic_long_inc_not_zero(&ref->count); + ret = atomic_long_add_unless(&ref->count, nr, 0); } rcu_read_unlock(); @@ -238,6 +240,20 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref) } /** + * percpu_ref_tryget - try to increment a percpu refcount + * @ref: percpu_ref to try-get + * + * Increment a percpu refcount unless its count already reached zero. + * Returns %true on success; %false on failure. + * + * This function is safe to call as long as @ref is between init and exit. + */ +static inline bool percpu_ref_tryget(struct percpu_ref *ref) +{ + return percpu_ref_tryget_many(ref, 1); +} + +/** * percpu_ref_tryget_live - try to increment a live percpu refcount * @ref: percpu_ref to try-get * |
