wait.h (42956B)
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_WAIT_H 3#define _LINUX_WAIT_H 4/* 5 * Linux wait queue related types and methods 6 */ 7#include <linux/list.h> 8#include <linux/stddef.h> 9#include <linux/spinlock.h> 10 11#include <asm/current.h> 12#include <uapi/linux/wait.h> 13 14typedef struct wait_queue_entry wait_queue_entry_t; 15 16typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key); 17int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key); 18 19/* wait_queue_entry::flags */ 20#define WQ_FLAG_EXCLUSIVE 0x01 21#define WQ_FLAG_WOKEN 0x02 22#define WQ_FLAG_BOOKMARK 0x04 23#define WQ_FLAG_CUSTOM 0x08 24#define WQ_FLAG_DONE 0x10 25#define WQ_FLAG_PRIORITY 0x20 26 27/* 28 * A single wait-queue entry structure: 29 */ 30struct wait_queue_entry { 31 unsigned int flags; 32 void *private; 33 wait_queue_func_t func; 34 struct list_head entry; 35}; 36 37struct wait_queue_head { 38 spinlock_t lock; 39 struct list_head head; 40}; 41typedef struct wait_queue_head wait_queue_head_t; 42 43struct task_struct; 44 45/* 46 * Macros for declaration and initialisaton of the datatypes 47 */ 48 49#define __WAITQUEUE_INITIALIZER(name, tsk) { \ 50 .private = tsk, \ 51 .func = default_wake_function, \ 52 .entry = { NULL, NULL } } 53 54#define DECLARE_WAITQUEUE(name, tsk) \ 55 struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk) 56 57#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ 58 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ 59 .head = LIST_HEAD_INIT(name.head) } 60 61#define DECLARE_WAIT_QUEUE_HEAD(name) \ 62 struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name) 63 64extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *); 65 66#define init_waitqueue_head(wq_head) \ 67 do { \ 68 static struct lock_class_key __key; \ 69 \ 70 __init_waitqueue_head((wq_head), #wq_head, &__key); \ 71 } while (0) 72 73#ifdef CONFIG_LOCKDEP 74# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \ 75 ({ init_waitqueue_head(&name); name; }) 76# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \ 77 struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) 78#else 79# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name) 80#endif 81 82static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p) 83{ 84 wq_entry->flags = 0; 85 wq_entry->private = p; 86 wq_entry->func = default_wake_function; 87} 88 89static inline void 90init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func) 91{ 92 wq_entry->flags = 0; 93 wq_entry->private = NULL; 94 wq_entry->func = func; 95} 96 97/** 98 * waitqueue_active -- locklessly test for waiters on the queue 99 * @wq_head: the waitqueue to test for waiters 100 * 101 * returns true if the wait list is not empty 102 * 103 * NOTE: this function is lockless and requires care, incorrect usage _will_ 104 * lead to sporadic and non-obvious failure. 105 * 106 * Use either while holding wait_queue_head::lock or when used for wakeups 107 * with an extra smp_mb() like:: 108 * 109 * CPU0 - waker CPU1 - waiter 110 * 111 * for (;;) { 112 * @cond = true; prepare_to_wait(&wq_head, &wait, state); 113 * smp_mb(); // smp_mb() from set_current_state() 114 * if (waitqueue_active(wq_head)) if (@cond) 115 * wake_up(wq_head); break; 116 * schedule(); 117 * } 118 * finish_wait(&wq_head, &wait); 119 * 120 * Because without the explicit smp_mb() it's possible for the 121 * waitqueue_active() load to get hoisted over the @cond store such that we'll 122 * observe an empty wait list while the waiter might not observe @cond. 123 * 124 * Also note that this 'optimization' trades a spin_lock() for an smp_mb(), 125 * which (when the lock is uncontended) are of roughly equal cost. 126 */ 127static inline int waitqueue_active(struct wait_queue_head *wq_head) 128{ 129 return !list_empty(&wq_head->head); 130} 131 132/** 133 * wq_has_single_sleeper - check if there is only one sleeper 134 * @wq_head: wait queue head 135 * 136 * Returns true of wq_head has only one sleeper on the list. 137 * 138 * Please refer to the comment for waitqueue_active. 139 */ 140static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head) 141{ 142 return list_is_singular(&wq_head->head); 143} 144 145/** 146 * wq_has_sleeper - check if there are any waiting processes 147 * @wq_head: wait queue head 148 * 149 * Returns true if wq_head has waiting processes 150 * 151 * Please refer to the comment for waitqueue_active. 152 */ 153static inline bool wq_has_sleeper(struct wait_queue_head *wq_head) 154{ 155 /* 156 * We need to be sure we are in sync with the 157 * add_wait_queue modifications to the wait queue. 158 * 159 * This memory barrier should be paired with one on the 160 * waiting side. 161 */ 162 smp_mb(); 163 return waitqueue_active(wq_head); 164} 165 166extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); 167extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); 168extern void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); 169extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); 170 171static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) 172{ 173 struct list_head *head = &wq_head->head; 174 struct wait_queue_entry *wq; 175 176 list_for_each_entry(wq, &wq_head->head, entry) { 177 if (!(wq->flags & WQ_FLAG_PRIORITY)) 178 break; 179 head = &wq->entry; 180 } 181 list_add(&wq_entry->entry, head); 182} 183 184/* 185 * Used for wake-one threads: 186 */ 187static inline void 188__add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) 189{ 190 wq_entry->flags |= WQ_FLAG_EXCLUSIVE; 191 __add_wait_queue(wq_head, wq_entry); 192} 193 194static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) 195{ 196 list_add_tail(&wq_entry->entry, &wq_head->head); 197} 198 199static inline void 200__add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) 201{ 202 wq_entry->flags |= WQ_FLAG_EXCLUSIVE; 203 __add_wait_queue_entry_tail(wq_head, wq_entry); 204} 205 206static inline void 207__remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) 208{ 209 list_del(&wq_entry->entry); 210} 211 212void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key); 213void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); 214void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head, 215 unsigned int mode, void *key, wait_queue_entry_t *bookmark); 216void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); 217void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); 218void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr); 219void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode); 220void __wake_up_pollfree(struct wait_queue_head *wq_head); 221 222#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL) 223#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL) 224#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL) 225#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1) 226#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0) 227 228#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL) 229#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL) 230#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL) 231#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE) 232 233/* 234 * Wakeup macros to be used to report events to the targets. 235 */ 236#define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m)) 237#define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m)) 238#define wake_up_poll(x, m) \ 239 __wake_up(x, TASK_NORMAL, 1, poll_to_key(m)) 240#define wake_up_locked_poll(x, m) \ 241 __wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m)) 242#define wake_up_interruptible_poll(x, m) \ 243 __wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m)) 244#define wake_up_interruptible_sync_poll(x, m) \ 245 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m)) 246#define wake_up_interruptible_sync_poll_locked(x, m) \ 247 __wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m)) 248 249/** 250 * wake_up_pollfree - signal that a polled waitqueue is going away 251 * @wq_head: the wait queue head 252 * 253 * In the very rare cases where a ->poll() implementation uses a waitqueue whose 254 * lifetime is tied to a task rather than to the 'struct file' being polled, 255 * this function must be called before the waitqueue is freed so that 256 * non-blocking polls (e.g. epoll) are notified that the queue is going away. 257 * 258 * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via 259 * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU. 260 */ 261static inline void wake_up_pollfree(struct wait_queue_head *wq_head) 262{ 263 /* 264 * For performance reasons, we don't always take the queue lock here. 265 * Therefore, we might race with someone removing the last entry from 266 * the queue, and proceed while they still hold the queue lock. 267 * However, rcu_read_lock() is required to be held in such cases, so we 268 * can safely proceed with an RCU-delayed free. 269 */ 270 if (waitqueue_active(wq_head)) 271 __wake_up_pollfree(wq_head); 272} 273 274#define ___wait_cond_timeout(condition) \ 275({ \ 276 bool __cond = (condition); \ 277 if (__cond && !__ret) \ 278 __ret = 1; \ 279 __cond || !__ret; \ 280}) 281 282#define ___wait_is_interruptible(state) \ 283 (!__builtin_constant_p(state) || \ 284 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \ 285 286extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags); 287 288/* 289 * The below macro ___wait_event() has an explicit shadow of the __ret 290 * variable when used from the wait_event_*() macros. 291 * 292 * This is so that both can use the ___wait_cond_timeout() construct 293 * to wrap the condition. 294 * 295 * The type inconsistency of the wait_event_*() __ret variable is also 296 * on purpose; we use long where we can return timeout values and int 297 * otherwise. 298 */ 299 300#define ___wait_event(wq_head, condition, state, exclusive, ret, cmd) \ 301({ \ 302 __label__ __out; \ 303 struct wait_queue_entry __wq_entry; \ 304 long __ret = ret; /* explicit shadow */ \ 305 \ 306 init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \ 307 for (;;) { \ 308 long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\ 309 \ 310 if (condition) \ 311 break; \ 312 \ 313 if (___wait_is_interruptible(state) && __int) { \ 314 __ret = __int; \ 315 goto __out; \ 316 } \ 317 \ 318 cmd; \ 319 } \ 320 finish_wait(&wq_head, &__wq_entry); \ 321__out: __ret; \ 322}) 323 324#define __wait_event(wq_head, condition) \ 325 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ 326 schedule()) 327 328/** 329 * wait_event - sleep until a condition gets true 330 * @wq_head: the waitqueue to wait on 331 * @condition: a C expression for the event to wait for 332 * 333 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the 334 * @condition evaluates to true. The @condition is checked each time 335 * the waitqueue @wq_head is woken up. 336 * 337 * wake_up() has to be called after changing any variable that could 338 * change the result of the wait condition. 339 */ 340#define wait_event(wq_head, condition) \ 341do { \ 342 might_sleep(); \ 343 if (condition) \ 344 break; \ 345 __wait_event(wq_head, condition); \ 346} while (0) 347 348#define __io_wait_event(wq_head, condition) \ 349 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ 350 io_schedule()) 351 352/* 353 * io_wait_event() -- like wait_event() but with io_schedule() 354 */ 355#define io_wait_event(wq_head, condition) \ 356do { \ 357 might_sleep(); \ 358 if (condition) \ 359 break; \ 360 __io_wait_event(wq_head, condition); \ 361} while (0) 362 363#define __wait_event_freezable(wq_head, condition) \ 364 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \ 365 freezable_schedule()) 366 367/** 368 * wait_event_freezable - sleep (or freeze) until a condition gets true 369 * @wq_head: the waitqueue to wait on 370 * @condition: a C expression for the event to wait for 371 * 372 * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute 373 * to system load) until the @condition evaluates to true. The 374 * @condition is checked each time the waitqueue @wq_head is woken up. 375 * 376 * wake_up() has to be called after changing any variable that could 377 * change the result of the wait condition. 378 */ 379#define wait_event_freezable(wq_head, condition) \ 380({ \ 381 int __ret = 0; \ 382 might_sleep(); \ 383 if (!(condition)) \ 384 __ret = __wait_event_freezable(wq_head, condition); \ 385 __ret; \ 386}) 387 388#define __wait_event_timeout(wq_head, condition, timeout) \ 389 ___wait_event(wq_head, ___wait_cond_timeout(condition), \ 390 TASK_UNINTERRUPTIBLE, 0, timeout, \ 391 __ret = schedule_timeout(__ret)) 392 393/** 394 * wait_event_timeout - sleep until a condition gets true or a timeout elapses 395 * @wq_head: the waitqueue to wait on 396 * @condition: a C expression for the event to wait for 397 * @timeout: timeout, in jiffies 398 * 399 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the 400 * @condition evaluates to true. The @condition is checked each time 401 * the waitqueue @wq_head is woken up. 402 * 403 * wake_up() has to be called after changing any variable that could 404 * change the result of the wait condition. 405 * 406 * Returns: 407 * 0 if the @condition evaluated to %false after the @timeout elapsed, 408 * 1 if the @condition evaluated to %true after the @timeout elapsed, 409 * or the remaining jiffies (at least 1) if the @condition evaluated 410 * to %true before the @timeout elapsed. 411 */ 412#define wait_event_timeout(wq_head, condition, timeout) \ 413({ \ 414 long __ret = timeout; \ 415 might_sleep(); \ 416 if (!___wait_cond_timeout(condition)) \ 417 __ret = __wait_event_timeout(wq_head, condition, timeout); \ 418 __ret; \ 419}) 420 421#define __wait_event_freezable_timeout(wq_head, condition, timeout) \ 422 ___wait_event(wq_head, ___wait_cond_timeout(condition), \ 423 TASK_INTERRUPTIBLE, 0, timeout, \ 424 __ret = freezable_schedule_timeout(__ret)) 425 426/* 427 * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid 428 * increasing load and is freezable. 429 */ 430#define wait_event_freezable_timeout(wq_head, condition, timeout) \ 431({ \ 432 long __ret = timeout; \ 433 might_sleep(); \ 434 if (!___wait_cond_timeout(condition)) \ 435 __ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \ 436 __ret; \ 437}) 438 439#define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \ 440 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0, \ 441 cmd1; schedule(); cmd2) 442/* 443 * Just like wait_event_cmd(), except it sets exclusive flag 444 */ 445#define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \ 446do { \ 447 if (condition) \ 448 break; \ 449 __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2); \ 450} while (0) 451 452#define __wait_event_cmd(wq_head, condition, cmd1, cmd2) \ 453 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ 454 cmd1; schedule(); cmd2) 455 456/** 457 * wait_event_cmd - sleep until a condition gets true 458 * @wq_head: the waitqueue to wait on 459 * @condition: a C expression for the event to wait for 460 * @cmd1: the command will be executed before sleep 461 * @cmd2: the command will be executed after sleep 462 * 463 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the 464 * @condition evaluates to true. The @condition is checked each time 465 * the waitqueue @wq_head is woken up. 466 * 467 * wake_up() has to be called after changing any variable that could 468 * change the result of the wait condition. 469 */ 470#define wait_event_cmd(wq_head, condition, cmd1, cmd2) \ 471do { \ 472 if (condition) \ 473 break; \ 474 __wait_event_cmd(wq_head, condition, cmd1, cmd2); \ 475} while (0) 476 477#define __wait_event_interruptible(wq_head, condition) \ 478 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \ 479 schedule()) 480 481/** 482 * wait_event_interruptible - sleep until a condition gets true 483 * @wq_head: the waitqueue to wait on 484 * @condition: a C expression for the event to wait for 485 * 486 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 487 * @condition evaluates to true or a signal is received. 488 * The @condition is checked each time the waitqueue @wq_head is woken up. 489 * 490 * wake_up() has to be called after changing any variable that could 491 * change the result of the wait condition. 492 * 493 * The function will return -ERESTARTSYS if it was interrupted by a 494 * signal and 0 if @condition evaluated to true. 495 */ 496#define wait_event_interruptible(wq_head, condition) \ 497({ \ 498 int __ret = 0; \ 499 might_sleep(); \ 500 if (!(condition)) \ 501 __ret = __wait_event_interruptible(wq_head, condition); \ 502 __ret; \ 503}) 504 505#define __wait_event_interruptible_timeout(wq_head, condition, timeout) \ 506 ___wait_event(wq_head, ___wait_cond_timeout(condition), \ 507 TASK_INTERRUPTIBLE, 0, timeout, \ 508 __ret = schedule_timeout(__ret)) 509 510/** 511 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses 512 * @wq_head: the waitqueue to wait on 513 * @condition: a C expression for the event to wait for 514 * @timeout: timeout, in jiffies 515 * 516 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 517 * @condition evaluates to true or a signal is received. 518 * The @condition is checked each time the waitqueue @wq_head is woken up. 519 * 520 * wake_up() has to be called after changing any variable that could 521 * change the result of the wait condition. 522 * 523 * Returns: 524 * 0 if the @condition evaluated to %false after the @timeout elapsed, 525 * 1 if the @condition evaluated to %true after the @timeout elapsed, 526 * the remaining jiffies (at least 1) if the @condition evaluated 527 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was 528 * interrupted by a signal. 529 */ 530#define wait_event_interruptible_timeout(wq_head, condition, timeout) \ 531({ \ 532 long __ret = timeout; \ 533 might_sleep(); \ 534 if (!___wait_cond_timeout(condition)) \ 535 __ret = __wait_event_interruptible_timeout(wq_head, \ 536 condition, timeout); \ 537 __ret; \ 538}) 539 540#define __wait_event_hrtimeout(wq_head, condition, timeout, state) \ 541({ \ 542 int __ret = 0; \ 543 struct hrtimer_sleeper __t; \ 544 \ 545 hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \ 546 HRTIMER_MODE_REL); \ 547 if ((timeout) != KTIME_MAX) \ 548 hrtimer_start_range_ns(&__t.timer, timeout, \ 549 current->timer_slack_ns, \ 550 HRTIMER_MODE_REL); \ 551 \ 552 __ret = ___wait_event(wq_head, condition, state, 0, 0, \ 553 if (!__t.task) { \ 554 __ret = -ETIME; \ 555 break; \ 556 } \ 557 schedule()); \ 558 \ 559 hrtimer_cancel(&__t.timer); \ 560 destroy_hrtimer_on_stack(&__t.timer); \ 561 __ret; \ 562}) 563 564/** 565 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses 566 * @wq_head: the waitqueue to wait on 567 * @condition: a C expression for the event to wait for 568 * @timeout: timeout, as a ktime_t 569 * 570 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the 571 * @condition evaluates to true or a signal is received. 572 * The @condition is checked each time the waitqueue @wq_head is woken up. 573 * 574 * wake_up() has to be called after changing any variable that could 575 * change the result of the wait condition. 576 * 577 * The function returns 0 if @condition became true, or -ETIME if the timeout 578 * elapsed. 579 */ 580#define wait_event_hrtimeout(wq_head, condition, timeout) \ 581({ \ 582 int __ret = 0; \ 583 might_sleep(); \ 584 if (!(condition)) \ 585 __ret = __wait_event_hrtimeout(wq_head, condition, timeout, \ 586 TASK_UNINTERRUPTIBLE); \ 587 __ret; \ 588}) 589 590/** 591 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses 592 * @wq: the waitqueue to wait on 593 * @condition: a C expression for the event to wait for 594 * @timeout: timeout, as a ktime_t 595 * 596 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 597 * @condition evaluates to true or a signal is received. 598 * The @condition is checked each time the waitqueue @wq is woken up. 599 * 600 * wake_up() has to be called after changing any variable that could 601 * change the result of the wait condition. 602 * 603 * The function returns 0 if @condition became true, -ERESTARTSYS if it was 604 * interrupted by a signal, or -ETIME if the timeout elapsed. 605 */ 606#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \ 607({ \ 608 long __ret = 0; \ 609 might_sleep(); \ 610 if (!(condition)) \ 611 __ret = __wait_event_hrtimeout(wq, condition, timeout, \ 612 TASK_INTERRUPTIBLE); \ 613 __ret; \ 614}) 615 616#define __wait_event_interruptible_exclusive(wq, condition) \ 617 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \ 618 schedule()) 619 620#define wait_event_interruptible_exclusive(wq, condition) \ 621({ \ 622 int __ret = 0; \ 623 might_sleep(); \ 624 if (!(condition)) \ 625 __ret = __wait_event_interruptible_exclusive(wq, condition); \ 626 __ret; \ 627}) 628 629#define __wait_event_killable_exclusive(wq, condition) \ 630 ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \ 631 schedule()) 632 633#define wait_event_killable_exclusive(wq, condition) \ 634({ \ 635 int __ret = 0; \ 636 might_sleep(); \ 637 if (!(condition)) \ 638 __ret = __wait_event_killable_exclusive(wq, condition); \ 639 __ret; \ 640}) 641 642 643#define __wait_event_freezable_exclusive(wq, condition) \ 644 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \ 645 freezable_schedule()) 646 647#define wait_event_freezable_exclusive(wq, condition) \ 648({ \ 649 int __ret = 0; \ 650 might_sleep(); \ 651 if (!(condition)) \ 652 __ret = __wait_event_freezable_exclusive(wq, condition); \ 653 __ret; \ 654}) 655 656/** 657 * wait_event_idle - wait for a condition without contributing to system load 658 * @wq_head: the waitqueue to wait on 659 * @condition: a C expression for the event to wait for 660 * 661 * The process is put to sleep (TASK_IDLE) until the 662 * @condition evaluates to true. 663 * The @condition is checked each time the waitqueue @wq_head is woken up. 664 * 665 * wake_up() has to be called after changing any variable that could 666 * change the result of the wait condition. 667 * 668 */ 669#define wait_event_idle(wq_head, condition) \ 670do { \ 671 might_sleep(); \ 672 if (!(condition)) \ 673 ___wait_event(wq_head, condition, TASK_IDLE, 0, 0, schedule()); \ 674} while (0) 675 676/** 677 * wait_event_idle_exclusive - wait for a condition with contributing to system load 678 * @wq_head: the waitqueue to wait on 679 * @condition: a C expression for the event to wait for 680 * 681 * The process is put to sleep (TASK_IDLE) until the 682 * @condition evaluates to true. 683 * The @condition is checked each time the waitqueue @wq_head is woken up. 684 * 685 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag 686 * set thus if other processes wait on the same list, when this 687 * process is woken further processes are not considered. 688 * 689 * wake_up() has to be called after changing any variable that could 690 * change the result of the wait condition. 691 * 692 */ 693#define wait_event_idle_exclusive(wq_head, condition) \ 694do { \ 695 might_sleep(); \ 696 if (!(condition)) \ 697 ___wait_event(wq_head, condition, TASK_IDLE, 1, 0, schedule()); \ 698} while (0) 699 700#define __wait_event_idle_timeout(wq_head, condition, timeout) \ 701 ___wait_event(wq_head, ___wait_cond_timeout(condition), \ 702 TASK_IDLE, 0, timeout, \ 703 __ret = schedule_timeout(__ret)) 704 705/** 706 * wait_event_idle_timeout - sleep without load until a condition becomes true or a timeout elapses 707 * @wq_head: the waitqueue to wait on 708 * @condition: a C expression for the event to wait for 709 * @timeout: timeout, in jiffies 710 * 711 * The process is put to sleep (TASK_IDLE) until the 712 * @condition evaluates to true. The @condition is checked each time 713 * the waitqueue @wq_head is woken up. 714 * 715 * wake_up() has to be called after changing any variable that could 716 * change the result of the wait condition. 717 * 718 * Returns: 719 * 0 if the @condition evaluated to %false after the @timeout elapsed, 720 * 1 if the @condition evaluated to %true after the @timeout elapsed, 721 * or the remaining jiffies (at least 1) if the @condition evaluated 722 * to %true before the @timeout elapsed. 723 */ 724#define wait_event_idle_timeout(wq_head, condition, timeout) \ 725({ \ 726 long __ret = timeout; \ 727 might_sleep(); \ 728 if (!___wait_cond_timeout(condition)) \ 729 __ret = __wait_event_idle_timeout(wq_head, condition, timeout); \ 730 __ret; \ 731}) 732 733#define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \ 734 ___wait_event(wq_head, ___wait_cond_timeout(condition), \ 735 TASK_IDLE, 1, timeout, \ 736 __ret = schedule_timeout(__ret)) 737 738/** 739 * wait_event_idle_exclusive_timeout - sleep without load until a condition becomes true or a timeout elapses 740 * @wq_head: the waitqueue to wait on 741 * @condition: a C expression for the event to wait for 742 * @timeout: timeout, in jiffies 743 * 744 * The process is put to sleep (TASK_IDLE) until the 745 * @condition evaluates to true. The @condition is checked each time 746 * the waitqueue @wq_head is woken up. 747 * 748 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag 749 * set thus if other processes wait on the same list, when this 750 * process is woken further processes are not considered. 751 * 752 * wake_up() has to be called after changing any variable that could 753 * change the result of the wait condition. 754 * 755 * Returns: 756 * 0 if the @condition evaluated to %false after the @timeout elapsed, 757 * 1 if the @condition evaluated to %true after the @timeout elapsed, 758 * or the remaining jiffies (at least 1) if the @condition evaluated 759 * to %true before the @timeout elapsed. 760 */ 761#define wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \ 762({ \ 763 long __ret = timeout; \ 764 might_sleep(); \ 765 if (!___wait_cond_timeout(condition)) \ 766 __ret = __wait_event_idle_exclusive_timeout(wq_head, condition, timeout);\ 767 __ret; \ 768}) 769 770extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *); 771extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *); 772 773#define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \ 774({ \ 775 int __ret; \ 776 DEFINE_WAIT(__wait); \ 777 if (exclusive) \ 778 __wait.flags |= WQ_FLAG_EXCLUSIVE; \ 779 do { \ 780 __ret = fn(&(wq), &__wait); \ 781 if (__ret) \ 782 break; \ 783 } while (!(condition)); \ 784 __remove_wait_queue(&(wq), &__wait); \ 785 __set_current_state(TASK_RUNNING); \ 786 __ret; \ 787}) 788 789 790/** 791 * wait_event_interruptible_locked - sleep until a condition gets true 792 * @wq: the waitqueue to wait on 793 * @condition: a C expression for the event to wait for 794 * 795 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 796 * @condition evaluates to true or a signal is received. 797 * The @condition is checked each time the waitqueue @wq is woken up. 798 * 799 * It must be called with wq.lock being held. This spinlock is 800 * unlocked while sleeping but @condition testing is done while lock 801 * is held and when this macro exits the lock is held. 802 * 803 * The lock is locked/unlocked using spin_lock()/spin_unlock() 804 * functions which must match the way they are locked/unlocked outside 805 * of this macro. 806 * 807 * wake_up_locked() has to be called after changing any variable that could 808 * change the result of the wait condition. 809 * 810 * The function will return -ERESTARTSYS if it was interrupted by a 811 * signal and 0 if @condition evaluated to true. 812 */ 813#define wait_event_interruptible_locked(wq, condition) \ 814 ((condition) \ 815 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr)) 816 817/** 818 * wait_event_interruptible_locked_irq - sleep until a condition gets true 819 * @wq: the waitqueue to wait on 820 * @condition: a C expression for the event to wait for 821 * 822 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 823 * @condition evaluates to true or a signal is received. 824 * The @condition is checked each time the waitqueue @wq is woken up. 825 * 826 * It must be called with wq.lock being held. This spinlock is 827 * unlocked while sleeping but @condition testing is done while lock 828 * is held and when this macro exits the lock is held. 829 * 830 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq() 831 * functions which must match the way they are locked/unlocked outside 832 * of this macro. 833 * 834 * wake_up_locked() has to be called after changing any variable that could 835 * change the result of the wait condition. 836 * 837 * The function will return -ERESTARTSYS if it was interrupted by a 838 * signal and 0 if @condition evaluated to true. 839 */ 840#define wait_event_interruptible_locked_irq(wq, condition) \ 841 ((condition) \ 842 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq)) 843 844/** 845 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true 846 * @wq: the waitqueue to wait on 847 * @condition: a C expression for the event to wait for 848 * 849 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 850 * @condition evaluates to true or a signal is received. 851 * The @condition is checked each time the waitqueue @wq is woken up. 852 * 853 * It must be called with wq.lock being held. This spinlock is 854 * unlocked while sleeping but @condition testing is done while lock 855 * is held and when this macro exits the lock is held. 856 * 857 * The lock is locked/unlocked using spin_lock()/spin_unlock() 858 * functions which must match the way they are locked/unlocked outside 859 * of this macro. 860 * 861 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag 862 * set thus when other process waits process on the list if this 863 * process is awaken further processes are not considered. 864 * 865 * wake_up_locked() has to be called after changing any variable that could 866 * change the result of the wait condition. 867 * 868 * The function will return -ERESTARTSYS if it was interrupted by a 869 * signal and 0 if @condition evaluated to true. 870 */ 871#define wait_event_interruptible_exclusive_locked(wq, condition) \ 872 ((condition) \ 873 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr)) 874 875/** 876 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true 877 * @wq: the waitqueue to wait on 878 * @condition: a C expression for the event to wait for 879 * 880 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 881 * @condition evaluates to true or a signal is received. 882 * The @condition is checked each time the waitqueue @wq is woken up. 883 * 884 * It must be called with wq.lock being held. This spinlock is 885 * unlocked while sleeping but @condition testing is done while lock 886 * is held and when this macro exits the lock is held. 887 * 888 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq() 889 * functions which must match the way they are locked/unlocked outside 890 * of this macro. 891 * 892 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag 893 * set thus when other process waits process on the list if this 894 * process is awaken further processes are not considered. 895 * 896 * wake_up_locked() has to be called after changing any variable that could 897 * change the result of the wait condition. 898 * 899 * The function will return -ERESTARTSYS if it was interrupted by a 900 * signal and 0 if @condition evaluated to true. 901 */ 902#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \ 903 ((condition) \ 904 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq)) 905 906 907#define __wait_event_killable(wq, condition) \ 908 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule()) 909 910/** 911 * wait_event_killable - sleep until a condition gets true 912 * @wq_head: the waitqueue to wait on 913 * @condition: a C expression for the event to wait for 914 * 915 * The process is put to sleep (TASK_KILLABLE) until the 916 * @condition evaluates to true or a signal is received. 917 * The @condition is checked each time the waitqueue @wq_head is woken up. 918 * 919 * wake_up() has to be called after changing any variable that could 920 * change the result of the wait condition. 921 * 922 * The function will return -ERESTARTSYS if it was interrupted by a 923 * signal and 0 if @condition evaluated to true. 924 */ 925#define wait_event_killable(wq_head, condition) \ 926({ \ 927 int __ret = 0; \ 928 might_sleep(); \ 929 if (!(condition)) \ 930 __ret = __wait_event_killable(wq_head, condition); \ 931 __ret; \ 932}) 933 934#define __wait_event_killable_timeout(wq_head, condition, timeout) \ 935 ___wait_event(wq_head, ___wait_cond_timeout(condition), \ 936 TASK_KILLABLE, 0, timeout, \ 937 __ret = schedule_timeout(__ret)) 938 939/** 940 * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses 941 * @wq_head: the waitqueue to wait on 942 * @condition: a C expression for the event to wait for 943 * @timeout: timeout, in jiffies 944 * 945 * The process is put to sleep (TASK_KILLABLE) until the 946 * @condition evaluates to true or a kill signal is received. 947 * The @condition is checked each time the waitqueue @wq_head is woken up. 948 * 949 * wake_up() has to be called after changing any variable that could 950 * change the result of the wait condition. 951 * 952 * Returns: 953 * 0 if the @condition evaluated to %false after the @timeout elapsed, 954 * 1 if the @condition evaluated to %true after the @timeout elapsed, 955 * the remaining jiffies (at least 1) if the @condition evaluated 956 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was 957 * interrupted by a kill signal. 958 * 959 * Only kill signals interrupt this process. 960 */ 961#define wait_event_killable_timeout(wq_head, condition, timeout) \ 962({ \ 963 long __ret = timeout; \ 964 might_sleep(); \ 965 if (!___wait_cond_timeout(condition)) \ 966 __ret = __wait_event_killable_timeout(wq_head, \ 967 condition, timeout); \ 968 __ret; \ 969}) 970 971 972#define __wait_event_lock_irq(wq_head, condition, lock, cmd) \ 973 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ 974 spin_unlock_irq(&lock); \ 975 cmd; \ 976 schedule(); \ 977 spin_lock_irq(&lock)) 978 979/** 980 * wait_event_lock_irq_cmd - sleep until a condition gets true. The 981 * condition is checked under the lock. This 982 * is expected to be called with the lock 983 * taken. 984 * @wq_head: the waitqueue to wait on 985 * @condition: a C expression for the event to wait for 986 * @lock: a locked spinlock_t, which will be released before cmd 987 * and schedule() and reacquired afterwards. 988 * @cmd: a command which is invoked outside the critical section before 989 * sleep 990 * 991 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the 992 * @condition evaluates to true. The @condition is checked each time 993 * the waitqueue @wq_head is woken up. 994 * 995 * wake_up() has to be called after changing any variable that could 996 * change the result of the wait condition. 997 * 998 * This is supposed to be called while holding the lock. The lock is 999 * dropped before invoking the cmd and going to sleep and is reacquired 1000 * afterwards. 1001 */ 1002#define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd) \ 1003do { \ 1004 if (condition) \ 1005 break; \ 1006 __wait_event_lock_irq(wq_head, condition, lock, cmd); \ 1007} while (0) 1008 1009/** 1010 * wait_event_lock_irq - sleep until a condition gets true. The 1011 * condition is checked under the lock. This 1012 * is expected to be called with the lock 1013 * taken. 1014 * @wq_head: the waitqueue to wait on 1015 * @condition: a C expression for the event to wait for 1016 * @lock: a locked spinlock_t, which will be released before schedule() 1017 * and reacquired afterwards. 1018 * 1019 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the 1020 * @condition evaluates to true. The @condition is checked each time 1021 * the waitqueue @wq_head is woken up. 1022 * 1023 * wake_up() has to be called after changing any variable that could 1024 * change the result of the wait condition. 1025 * 1026 * This is supposed to be called while holding the lock. The lock is 1027 * dropped before going to sleep and is reacquired afterwards. 1028 */ 1029#define wait_event_lock_irq(wq_head, condition, lock) \ 1030do { \ 1031 if (condition) \ 1032 break; \ 1033 __wait_event_lock_irq(wq_head, condition, lock, ); \ 1034} while (0) 1035 1036 1037#define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd) \ 1038 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \ 1039 spin_unlock_irq(&lock); \ 1040 cmd; \ 1041 schedule(); \ 1042 spin_lock_irq(&lock)) 1043 1044/** 1045 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true. 1046 * The condition is checked under the lock. This is expected to 1047 * be called with the lock taken. 1048 * @wq_head: the waitqueue to wait on 1049 * @condition: a C expression for the event to wait for 1050 * @lock: a locked spinlock_t, which will be released before cmd and 1051 * schedule() and reacquired afterwards. 1052 * @cmd: a command which is invoked outside the critical section before 1053 * sleep 1054 * 1055 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 1056 * @condition evaluates to true or a signal is received. The @condition is 1057 * checked each time the waitqueue @wq_head is woken up. 1058 * 1059 * wake_up() has to be called after changing any variable that could 1060 * change the result of the wait condition. 1061 * 1062 * This is supposed to be called while holding the lock. The lock is 1063 * dropped before invoking the cmd and going to sleep and is reacquired 1064 * afterwards. 1065 * 1066 * The macro will return -ERESTARTSYS if it was interrupted by a signal 1067 * and 0 if @condition evaluated to true. 1068 */ 1069#define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd) \ 1070({ \ 1071 int __ret = 0; \ 1072 if (!(condition)) \ 1073 __ret = __wait_event_interruptible_lock_irq(wq_head, \ 1074 condition, lock, cmd); \ 1075 __ret; \ 1076}) 1077 1078/** 1079 * wait_event_interruptible_lock_irq - sleep until a condition gets true. 1080 * The condition is checked under the lock. This is expected 1081 * to be called with the lock taken. 1082 * @wq_head: the waitqueue to wait on 1083 * @condition: a C expression for the event to wait for 1084 * @lock: a locked spinlock_t, which will be released before schedule() 1085 * and reacquired afterwards. 1086 * 1087 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 1088 * @condition evaluates to true or signal is received. The @condition is 1089 * checked each time the waitqueue @wq_head is woken up. 1090 * 1091 * wake_up() has to be called after changing any variable that could 1092 * change the result of the wait condition. 1093 * 1094 * This is supposed to be called while holding the lock. The lock is 1095 * dropped before going to sleep and is reacquired afterwards. 1096 * 1097 * The macro will return -ERESTARTSYS if it was interrupted by a signal 1098 * and 0 if @condition evaluated to true. 1099 */ 1100#define wait_event_interruptible_lock_irq(wq_head, condition, lock) \ 1101({ \ 1102 int __ret = 0; \ 1103 if (!(condition)) \ 1104 __ret = __wait_event_interruptible_lock_irq(wq_head, \ 1105 condition, lock,); \ 1106 __ret; \ 1107}) 1108 1109#define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state) \ 1110 ___wait_event(wq_head, ___wait_cond_timeout(condition), \ 1111 state, 0, timeout, \ 1112 spin_unlock_irq(&lock); \ 1113 __ret = schedule_timeout(__ret); \ 1114 spin_lock_irq(&lock)); 1115 1116/** 1117 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets 1118 * true or a timeout elapses. The condition is checked under 1119 * the lock. This is expected to be called with the lock taken. 1120 * @wq_head: the waitqueue to wait on 1121 * @condition: a C expression for the event to wait for 1122 * @lock: a locked spinlock_t, which will be released before schedule() 1123 * and reacquired afterwards. 1124 * @timeout: timeout, in jiffies 1125 * 1126 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 1127 * @condition evaluates to true or signal is received. The @condition is 1128 * checked each time the waitqueue @wq_head is woken up. 1129 * 1130 * wake_up() has to be called after changing any variable that could 1131 * change the result of the wait condition. 1132 * 1133 * This is supposed to be called while holding the lock. The lock is 1134 * dropped before going to sleep and is reacquired afterwards. 1135 * 1136 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it 1137 * was interrupted by a signal, and the remaining jiffies otherwise 1138 * if the condition evaluated to true before the timeout elapsed. 1139 */ 1140#define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock, \ 1141 timeout) \ 1142({ \ 1143 long __ret = timeout; \ 1144 if (!___wait_cond_timeout(condition)) \ 1145 __ret = __wait_event_lock_irq_timeout( \ 1146 wq_head, condition, lock, timeout, \ 1147 TASK_INTERRUPTIBLE); \ 1148 __ret; \ 1149}) 1150 1151#define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout) \ 1152({ \ 1153 long __ret = timeout; \ 1154 if (!___wait_cond_timeout(condition)) \ 1155 __ret = __wait_event_lock_irq_timeout( \ 1156 wq_head, condition, lock, timeout, \ 1157 TASK_UNINTERRUPTIBLE); \ 1158 __ret; \ 1159}) 1160 1161/* 1162 * Waitqueues which are removed from the waitqueue_head at wakeup time 1163 */ 1164void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); 1165bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); 1166long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); 1167void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); 1168long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout); 1169int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key); 1170int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key); 1171 1172#define DEFINE_WAIT_FUNC(name, function) \ 1173 struct wait_queue_entry name = { \ 1174 .private = current, \ 1175 .func = function, \ 1176 .entry = LIST_HEAD_INIT((name).entry), \ 1177 } 1178 1179#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function) 1180 1181#define init_wait(wait) \ 1182 do { \ 1183 (wait)->private = current; \ 1184 (wait)->func = autoremove_wake_function; \ 1185 INIT_LIST_HEAD(&(wait)->entry); \ 1186 (wait)->flags = 0; \ 1187 } while (0) 1188 1189typedef int (*task_call_f)(struct task_struct *p, void *arg); 1190extern int task_call_func(struct task_struct *p, task_call_f func, void *arg); 1191 1192#endif /* _LINUX_WAIT_H */