cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

timer.c (62999B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 *  Kernel internal timers
      4 *
      5 *  Copyright (C) 1991, 1992  Linus Torvalds
      6 *
      7 *  1997-01-28  Modified by Finn Arne Gangstad to make timers scale better.
      8 *
      9 *  1997-09-10  Updated NTP code according to technical memorandum Jan '96
     10 *              "A Kernel Model for Precision Timekeeping" by Dave Mills
     11 *  1998-12-24  Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
     12 *              serialize accesses to xtime/lost_ticks).
     13 *                              Copyright (C) 1998  Andrea Arcangeli
     14 *  1999-03-10  Improved NTP compatibility by Ulrich Windl
     15 *  2002-05-31	Move sys_sysinfo here and make its locking sane, Robert Love
     16 *  2000-10-05  Implemented scalable SMP per-CPU timer handling.
     17 *                              Copyright (C) 2000, 2001, 2002  Ingo Molnar
     18 *              Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
     19 */
     20
     21#include <linux/kernel_stat.h>
     22#include <linux/export.h>
     23#include <linux/interrupt.h>
     24#include <linux/percpu.h>
     25#include <linux/init.h>
     26#include <linux/mm.h>
     27#include <linux/swap.h>
     28#include <linux/pid_namespace.h>
     29#include <linux/notifier.h>
     30#include <linux/thread_info.h>
     31#include <linux/time.h>
     32#include <linux/jiffies.h>
     33#include <linux/posix-timers.h>
     34#include <linux/cpu.h>
     35#include <linux/syscalls.h>
     36#include <linux/delay.h>
     37#include <linux/tick.h>
     38#include <linux/kallsyms.h>
     39#include <linux/irq_work.h>
     40#include <linux/sched/signal.h>
     41#include <linux/sched/sysctl.h>
     42#include <linux/sched/nohz.h>
     43#include <linux/sched/debug.h>
     44#include <linux/slab.h>
     45#include <linux/compat.h>
     46#include <linux/random.h>
     47#include <linux/sysctl.h>
     48
     49#include <linux/uaccess.h>
     50#include <asm/unistd.h>
     51#include <asm/div64.h>
     52#include <asm/timex.h>
     53#include <asm/io.h>
     54
     55#include "tick-internal.h"
     56
     57#define CREATE_TRACE_POINTS
     58#include <trace/events/timer.h>
     59
     60__visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
     61
     62EXPORT_SYMBOL(jiffies_64);
     63
     64/*
     65 * The timer wheel has LVL_DEPTH array levels. Each level provides an array of
     66 * LVL_SIZE buckets. Each level is driven by its own clock and therefor each
     67 * level has a different granularity.
     68 *
     69 * The level granularity is:		LVL_CLK_DIV ^ lvl
     70 * The level clock frequency is:	HZ / (LVL_CLK_DIV ^ level)
     71 *
     72 * The array level of a newly armed timer depends on the relative expiry
     73 * time. The farther the expiry time is away the higher the array level and
     74 * therefor the granularity becomes.
     75 *
     76 * Contrary to the original timer wheel implementation, which aims for 'exact'
     77 * expiry of the timers, this implementation removes the need for recascading
     78 * the timers into the lower array levels. The previous 'classic' timer wheel
     79 * implementation of the kernel already violated the 'exact' expiry by adding
     80 * slack to the expiry time to provide batched expiration. The granularity
     81 * levels provide implicit batching.
     82 *
     83 * This is an optimization of the original timer wheel implementation for the
     84 * majority of the timer wheel use cases: timeouts. The vast majority of
     85 * timeout timers (networking, disk I/O ...) are canceled before expiry. If
     86 * the timeout expires it indicates that normal operation is disturbed, so it
     87 * does not matter much whether the timeout comes with a slight delay.
     88 *
     89 * The only exception to this are networking timers with a small expiry
     90 * time. They rely on the granularity. Those fit into the first wheel level,
     91 * which has HZ granularity.
     92 *
     93 * We don't have cascading anymore. timers with a expiry time above the
     94 * capacity of the last wheel level are force expired at the maximum timeout
     95 * value of the last wheel level. From data sampling we know that the maximum
     96 * value observed is 5 days (network connection tracking), so this should not
     97 * be an issue.
     98 *
     99 * The currently chosen array constants values are a good compromise between
    100 * array size and granularity.
    101 *
    102 * This results in the following granularity and range levels:
    103 *
    104 * HZ 1000 steps
    105 * Level Offset  Granularity            Range
    106 *  0      0         1 ms                0 ms -         63 ms
    107 *  1     64         8 ms               64 ms -        511 ms
    108 *  2    128        64 ms              512 ms -       4095 ms (512ms - ~4s)
    109 *  3    192       512 ms             4096 ms -      32767 ms (~4s - ~32s)
    110 *  4    256      4096 ms (~4s)      32768 ms -     262143 ms (~32s - ~4m)
    111 *  5    320     32768 ms (~32s)    262144 ms -    2097151 ms (~4m - ~34m)
    112 *  6    384    262144 ms (~4m)    2097152 ms -   16777215 ms (~34m - ~4h)
    113 *  7    448   2097152 ms (~34m)  16777216 ms -  134217727 ms (~4h - ~1d)
    114 *  8    512  16777216 ms (~4h)  134217728 ms - 1073741822 ms (~1d - ~12d)
    115 *
    116 * HZ  300
    117 * Level Offset  Granularity            Range
    118 *  0	   0         3 ms                0 ms -        210 ms
    119 *  1	  64        26 ms              213 ms -       1703 ms (213ms - ~1s)
    120 *  2	 128       213 ms             1706 ms -      13650 ms (~1s - ~13s)
    121 *  3	 192      1706 ms (~1s)      13653 ms -     109223 ms (~13s - ~1m)
    122 *  4	 256     13653 ms (~13s)    109226 ms -     873810 ms (~1m - ~14m)
    123 *  5	 320    109226 ms (~1m)     873813 ms -    6990503 ms (~14m - ~1h)
    124 *  6	 384    873813 ms (~14m)   6990506 ms -   55924050 ms (~1h - ~15h)
    125 *  7	 448   6990506 ms (~1h)   55924053 ms -  447392423 ms (~15h - ~5d)
    126 *  8    512  55924053 ms (~15h) 447392426 ms - 3579139406 ms (~5d - ~41d)
    127 *
    128 * HZ  250
    129 * Level Offset  Granularity            Range
    130 *  0	   0         4 ms                0 ms -        255 ms
    131 *  1	  64        32 ms              256 ms -       2047 ms (256ms - ~2s)
    132 *  2	 128       256 ms             2048 ms -      16383 ms (~2s - ~16s)
    133 *  3	 192      2048 ms (~2s)      16384 ms -     131071 ms (~16s - ~2m)
    134 *  4	 256     16384 ms (~16s)    131072 ms -    1048575 ms (~2m - ~17m)
    135 *  5	 320    131072 ms (~2m)    1048576 ms -    8388607 ms (~17m - ~2h)
    136 *  6	 384   1048576 ms (~17m)   8388608 ms -   67108863 ms (~2h - ~18h)
    137 *  7	 448   8388608 ms (~2h)   67108864 ms -  536870911 ms (~18h - ~6d)
    138 *  8    512  67108864 ms (~18h) 536870912 ms - 4294967288 ms (~6d - ~49d)
    139 *
    140 * HZ  100
    141 * Level Offset  Granularity            Range
    142 *  0	   0         10 ms               0 ms -        630 ms
    143 *  1	  64         80 ms             640 ms -       5110 ms (640ms - ~5s)
    144 *  2	 128        640 ms            5120 ms -      40950 ms (~5s - ~40s)
    145 *  3	 192       5120 ms (~5s)     40960 ms -     327670 ms (~40s - ~5m)
    146 *  4	 256      40960 ms (~40s)   327680 ms -    2621430 ms (~5m - ~43m)
    147 *  5	 320     327680 ms (~5m)   2621440 ms -   20971510 ms (~43m - ~5h)
    148 *  6	 384    2621440 ms (~43m) 20971520 ms -  167772150 ms (~5h - ~1d)
    149 *  7	 448   20971520 ms (~5h) 167772160 ms - 1342177270 ms (~1d - ~15d)
    150 */
    151
    152/* Clock divisor for the next level */
    153#define LVL_CLK_SHIFT	3
    154#define LVL_CLK_DIV	(1UL << LVL_CLK_SHIFT)
    155#define LVL_CLK_MASK	(LVL_CLK_DIV - 1)
    156#define LVL_SHIFT(n)	((n) * LVL_CLK_SHIFT)
    157#define LVL_GRAN(n)	(1UL << LVL_SHIFT(n))
    158
    159/*
    160 * The time start value for each level to select the bucket at enqueue
    161 * time. We start from the last possible delta of the previous level
    162 * so that we can later add an extra LVL_GRAN(n) to n (see calc_index()).
    163 */
    164#define LVL_START(n)	((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT))
    165
    166/* Size of each clock level */
    167#define LVL_BITS	6
    168#define LVL_SIZE	(1UL << LVL_BITS)
    169#define LVL_MASK	(LVL_SIZE - 1)
    170#define LVL_OFFS(n)	((n) * LVL_SIZE)
    171
    172/* Level depth */
    173#if HZ > 100
    174# define LVL_DEPTH	9
    175# else
    176# define LVL_DEPTH	8
    177#endif
    178
    179/* The cutoff (max. capacity of the wheel) */
    180#define WHEEL_TIMEOUT_CUTOFF	(LVL_START(LVL_DEPTH))
    181#define WHEEL_TIMEOUT_MAX	(WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1))
    182
    183/*
    184 * The resulting wheel size. If NOHZ is configured we allocate two
    185 * wheels so we have a separate storage for the deferrable timers.
    186 */
    187#define WHEEL_SIZE	(LVL_SIZE * LVL_DEPTH)
    188
    189#ifdef CONFIG_NO_HZ_COMMON
    190# define NR_BASES	2
    191# define BASE_STD	0
    192# define BASE_DEF	1
    193#else
    194# define NR_BASES	1
    195# define BASE_STD	0
    196# define BASE_DEF	0
    197#endif
    198
    199struct timer_base {
    200	raw_spinlock_t		lock;
    201	struct timer_list	*running_timer;
    202#ifdef CONFIG_PREEMPT_RT
    203	spinlock_t		expiry_lock;
    204	atomic_t		timer_waiters;
    205#endif
    206	unsigned long		clk;
    207	unsigned long		next_expiry;
    208	unsigned int		cpu;
    209	bool			next_expiry_recalc;
    210	bool			is_idle;
    211	bool			timers_pending;
    212	DECLARE_BITMAP(pending_map, WHEEL_SIZE);
    213	struct hlist_head	vectors[WHEEL_SIZE];
    214} ____cacheline_aligned;
    215
    216static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
    217
    218#ifdef CONFIG_NO_HZ_COMMON
    219
    220static DEFINE_STATIC_KEY_FALSE(timers_nohz_active);
    221static DEFINE_MUTEX(timer_keys_mutex);
    222
    223static void timer_update_keys(struct work_struct *work);
    224static DECLARE_WORK(timer_update_work, timer_update_keys);
    225
    226#ifdef CONFIG_SMP
    227static unsigned int sysctl_timer_migration = 1;
    228
    229DEFINE_STATIC_KEY_FALSE(timers_migration_enabled);
    230
    231static void timers_update_migration(void)
    232{
    233	if (sysctl_timer_migration && tick_nohz_active)
    234		static_branch_enable(&timers_migration_enabled);
    235	else
    236		static_branch_disable(&timers_migration_enabled);
    237}
    238
    239#ifdef CONFIG_SYSCTL
    240static int timer_migration_handler(struct ctl_table *table, int write,
    241			    void *buffer, size_t *lenp, loff_t *ppos)
    242{
    243	int ret;
    244
    245	mutex_lock(&timer_keys_mutex);
    246	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
    247	if (!ret && write)
    248		timers_update_migration();
    249	mutex_unlock(&timer_keys_mutex);
    250	return ret;
    251}
    252
    253static struct ctl_table timer_sysctl[] = {
    254	{
    255		.procname	= "timer_migration",
    256		.data		= &sysctl_timer_migration,
    257		.maxlen		= sizeof(unsigned int),
    258		.mode		= 0644,
    259		.proc_handler	= timer_migration_handler,
    260		.extra1		= SYSCTL_ZERO,
    261		.extra2		= SYSCTL_ONE,
    262	},
    263	{}
    264};
    265
    266static int __init timer_sysctl_init(void)
    267{
    268	register_sysctl("kernel", timer_sysctl);
    269	return 0;
    270}
    271device_initcall(timer_sysctl_init);
    272#endif /* CONFIG_SYSCTL */
    273#else /* CONFIG_SMP */
    274static inline void timers_update_migration(void) { }
    275#endif /* !CONFIG_SMP */
    276
    277static void timer_update_keys(struct work_struct *work)
    278{
    279	mutex_lock(&timer_keys_mutex);
    280	timers_update_migration();
    281	static_branch_enable(&timers_nohz_active);
    282	mutex_unlock(&timer_keys_mutex);
    283}
    284
    285void timers_update_nohz(void)
    286{
    287	schedule_work(&timer_update_work);
    288}
    289
    290static inline bool is_timers_nohz_active(void)
    291{
    292	return static_branch_unlikely(&timers_nohz_active);
    293}
    294#else
    295static inline bool is_timers_nohz_active(void) { return false; }
    296#endif /* NO_HZ_COMMON */
    297
    298static unsigned long round_jiffies_common(unsigned long j, int cpu,
    299		bool force_up)
    300{
    301	int rem;
    302	unsigned long original = j;
    303
    304	/*
    305	 * We don't want all cpus firing their timers at once hitting the
    306	 * same lock or cachelines, so we skew each extra cpu with an extra
    307	 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
    308	 * already did this.
    309	 * The skew is done by adding 3*cpunr, then round, then subtract this
    310	 * extra offset again.
    311	 */
    312	j += cpu * 3;
    313
    314	rem = j % HZ;
    315
    316	/*
    317	 * If the target jiffie is just after a whole second (which can happen
    318	 * due to delays of the timer irq, long irq off times etc etc) then
    319	 * we should round down to the whole second, not up. Use 1/4th second
    320	 * as cutoff for this rounding as an extreme upper bound for this.
    321	 * But never round down if @force_up is set.
    322	 */
    323	if (rem < HZ/4 && !force_up) /* round down */
    324		j = j - rem;
    325	else /* round up */
    326		j = j - rem + HZ;
    327
    328	/* now that we have rounded, subtract the extra skew again */
    329	j -= cpu * 3;
    330
    331	/*
    332	 * Make sure j is still in the future. Otherwise return the
    333	 * unmodified value.
    334	 */
    335	return time_is_after_jiffies(j) ? j : original;
    336}
    337
    338/**
    339 * __round_jiffies - function to round jiffies to a full second
    340 * @j: the time in (absolute) jiffies that should be rounded
    341 * @cpu: the processor number on which the timeout will happen
    342 *
    343 * __round_jiffies() rounds an absolute time in the future (in jiffies)
    344 * up or down to (approximately) full seconds. This is useful for timers
    345 * for which the exact time they fire does not matter too much, as long as
    346 * they fire approximately every X seconds.
    347 *
    348 * By rounding these timers to whole seconds, all such timers will fire
    349 * at the same time, rather than at various times spread out. The goal
    350 * of this is to have the CPU wake up less, which saves power.
    351 *
    352 * The exact rounding is skewed for each processor to avoid all
    353 * processors firing at the exact same time, which could lead
    354 * to lock contention or spurious cache line bouncing.
    355 *
    356 * The return value is the rounded version of the @j parameter.
    357 */
    358unsigned long __round_jiffies(unsigned long j, int cpu)
    359{
    360	return round_jiffies_common(j, cpu, false);
    361}
    362EXPORT_SYMBOL_GPL(__round_jiffies);
    363
    364/**
    365 * __round_jiffies_relative - function to round jiffies to a full second
    366 * @j: the time in (relative) jiffies that should be rounded
    367 * @cpu: the processor number on which the timeout will happen
    368 *
    369 * __round_jiffies_relative() rounds a time delta  in the future (in jiffies)
    370 * up or down to (approximately) full seconds. This is useful for timers
    371 * for which the exact time they fire does not matter too much, as long as
    372 * they fire approximately every X seconds.
    373 *
    374 * By rounding these timers to whole seconds, all such timers will fire
    375 * at the same time, rather than at various times spread out. The goal
    376 * of this is to have the CPU wake up less, which saves power.
    377 *
    378 * The exact rounding is skewed for each processor to avoid all
    379 * processors firing at the exact same time, which could lead
    380 * to lock contention or spurious cache line bouncing.
    381 *
    382 * The return value is the rounded version of the @j parameter.
    383 */
    384unsigned long __round_jiffies_relative(unsigned long j, int cpu)
    385{
    386	unsigned long j0 = jiffies;
    387
    388	/* Use j0 because jiffies might change while we run */
    389	return round_jiffies_common(j + j0, cpu, false) - j0;
    390}
    391EXPORT_SYMBOL_GPL(__round_jiffies_relative);
    392
    393/**
    394 * round_jiffies - function to round jiffies to a full second
    395 * @j: the time in (absolute) jiffies that should be rounded
    396 *
    397 * round_jiffies() rounds an absolute time in the future (in jiffies)
    398 * up or down to (approximately) full seconds. This is useful for timers
    399 * for which the exact time they fire does not matter too much, as long as
    400 * they fire approximately every X seconds.
    401 *
    402 * By rounding these timers to whole seconds, all such timers will fire
    403 * at the same time, rather than at various times spread out. The goal
    404 * of this is to have the CPU wake up less, which saves power.
    405 *
    406 * The return value is the rounded version of the @j parameter.
    407 */
    408unsigned long round_jiffies(unsigned long j)
    409{
    410	return round_jiffies_common(j, raw_smp_processor_id(), false);
    411}
    412EXPORT_SYMBOL_GPL(round_jiffies);
    413
    414/**
    415 * round_jiffies_relative - function to round jiffies to a full second
    416 * @j: the time in (relative) jiffies that should be rounded
    417 *
    418 * round_jiffies_relative() rounds a time delta  in the future (in jiffies)
    419 * up or down to (approximately) full seconds. This is useful for timers
    420 * for which the exact time they fire does not matter too much, as long as
    421 * they fire approximately every X seconds.
    422 *
    423 * By rounding these timers to whole seconds, all such timers will fire
    424 * at the same time, rather than at various times spread out. The goal
    425 * of this is to have the CPU wake up less, which saves power.
    426 *
    427 * The return value is the rounded version of the @j parameter.
    428 */
    429unsigned long round_jiffies_relative(unsigned long j)
    430{
    431	return __round_jiffies_relative(j, raw_smp_processor_id());
    432}
    433EXPORT_SYMBOL_GPL(round_jiffies_relative);
    434
    435/**
    436 * __round_jiffies_up - function to round jiffies up to a full second
    437 * @j: the time in (absolute) jiffies that should be rounded
    438 * @cpu: the processor number on which the timeout will happen
    439 *
    440 * This is the same as __round_jiffies() except that it will never
    441 * round down.  This is useful for timeouts for which the exact time
    442 * of firing does not matter too much, as long as they don't fire too
    443 * early.
    444 */
    445unsigned long __round_jiffies_up(unsigned long j, int cpu)
    446{
    447	return round_jiffies_common(j, cpu, true);
    448}
    449EXPORT_SYMBOL_GPL(__round_jiffies_up);
    450
    451/**
    452 * __round_jiffies_up_relative - function to round jiffies up to a full second
    453 * @j: the time in (relative) jiffies that should be rounded
    454 * @cpu: the processor number on which the timeout will happen
    455 *
    456 * This is the same as __round_jiffies_relative() except that it will never
    457 * round down.  This is useful for timeouts for which the exact time
    458 * of firing does not matter too much, as long as they don't fire too
    459 * early.
    460 */
    461unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
    462{
    463	unsigned long j0 = jiffies;
    464
    465	/* Use j0 because jiffies might change while we run */
    466	return round_jiffies_common(j + j0, cpu, true) - j0;
    467}
    468EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
    469
    470/**
    471 * round_jiffies_up - function to round jiffies up to a full second
    472 * @j: the time in (absolute) jiffies that should be rounded
    473 *
    474 * This is the same as round_jiffies() except that it will never
    475 * round down.  This is useful for timeouts for which the exact time
    476 * of firing does not matter too much, as long as they don't fire too
    477 * early.
    478 */
    479unsigned long round_jiffies_up(unsigned long j)
    480{
    481	return round_jiffies_common(j, raw_smp_processor_id(), true);
    482}
    483EXPORT_SYMBOL_GPL(round_jiffies_up);
    484
    485/**
    486 * round_jiffies_up_relative - function to round jiffies up to a full second
    487 * @j: the time in (relative) jiffies that should be rounded
    488 *
    489 * This is the same as round_jiffies_relative() except that it will never
    490 * round down.  This is useful for timeouts for which the exact time
    491 * of firing does not matter too much, as long as they don't fire too
    492 * early.
    493 */
    494unsigned long round_jiffies_up_relative(unsigned long j)
    495{
    496	return __round_jiffies_up_relative(j, raw_smp_processor_id());
    497}
    498EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
    499
    500
    501static inline unsigned int timer_get_idx(struct timer_list *timer)
    502{
    503	return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT;
    504}
    505
    506static inline void timer_set_idx(struct timer_list *timer, unsigned int idx)
    507{
    508	timer->flags = (timer->flags & ~TIMER_ARRAYMASK) |
    509			idx << TIMER_ARRAYSHIFT;
    510}
    511
    512/*
    513 * Helper function to calculate the array index for a given expiry
    514 * time.
    515 */
    516static inline unsigned calc_index(unsigned long expires, unsigned lvl,
    517				  unsigned long *bucket_expiry)
    518{
    519
    520	/*
    521	 * The timer wheel has to guarantee that a timer does not fire
    522	 * early. Early expiry can happen due to:
    523	 * - Timer is armed at the edge of a tick
    524	 * - Truncation of the expiry time in the outer wheel levels
    525	 *
    526	 * Round up with level granularity to prevent this.
    527	 */
    528	expires = (expires >> LVL_SHIFT(lvl)) + 1;
    529	*bucket_expiry = expires << LVL_SHIFT(lvl);
    530	return LVL_OFFS(lvl) + (expires & LVL_MASK);
    531}
    532
    533static int calc_wheel_index(unsigned long expires, unsigned long clk,
    534			    unsigned long *bucket_expiry)
    535{
    536	unsigned long delta = expires - clk;
    537	unsigned int idx;
    538
    539	if (delta < LVL_START(1)) {
    540		idx = calc_index(expires, 0, bucket_expiry);
    541	} else if (delta < LVL_START(2)) {
    542		idx = calc_index(expires, 1, bucket_expiry);
    543	} else if (delta < LVL_START(3)) {
    544		idx = calc_index(expires, 2, bucket_expiry);
    545	} else if (delta < LVL_START(4)) {
    546		idx = calc_index(expires, 3, bucket_expiry);
    547	} else if (delta < LVL_START(5)) {
    548		idx = calc_index(expires, 4, bucket_expiry);
    549	} else if (delta < LVL_START(6)) {
    550		idx = calc_index(expires, 5, bucket_expiry);
    551	} else if (delta < LVL_START(7)) {
    552		idx = calc_index(expires, 6, bucket_expiry);
    553	} else if (LVL_DEPTH > 8 && delta < LVL_START(8)) {
    554		idx = calc_index(expires, 7, bucket_expiry);
    555	} else if ((long) delta < 0) {
    556		idx = clk & LVL_MASK;
    557		*bucket_expiry = clk;
    558	} else {
    559		/*
    560		 * Force expire obscene large timeouts to expire at the
    561		 * capacity limit of the wheel.
    562		 */
    563		if (delta >= WHEEL_TIMEOUT_CUTOFF)
    564			expires = clk + WHEEL_TIMEOUT_MAX;
    565
    566		idx = calc_index(expires, LVL_DEPTH - 1, bucket_expiry);
    567	}
    568	return idx;
    569}
    570
    571static void
    572trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
    573{
    574	if (!is_timers_nohz_active())
    575		return;
    576
    577	/*
    578	 * TODO: This wants some optimizing similar to the code below, but we
    579	 * will do that when we switch from push to pull for deferrable timers.
    580	 */
    581	if (timer->flags & TIMER_DEFERRABLE) {
    582		if (tick_nohz_full_cpu(base->cpu))
    583			wake_up_nohz_cpu(base->cpu);
    584		return;
    585	}
    586
    587	/*
    588	 * We might have to IPI the remote CPU if the base is idle and the
    589	 * timer is not deferrable. If the other CPU is on the way to idle
    590	 * then it can't set base->is_idle as we hold the base lock:
    591	 */
    592	if (base->is_idle)
    593		wake_up_nohz_cpu(base->cpu);
    594}
    595
    596/*
    597 * Enqueue the timer into the hash bucket, mark it pending in
    598 * the bitmap, store the index in the timer flags then wake up
    599 * the target CPU if needed.
    600 */
    601static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
    602			  unsigned int idx, unsigned long bucket_expiry)
    603{
    604
    605	hlist_add_head(&timer->entry, base->vectors + idx);
    606	__set_bit(idx, base->pending_map);
    607	timer_set_idx(timer, idx);
    608
    609	trace_timer_start(timer, timer->expires, timer->flags);
    610
    611	/*
    612	 * Check whether this is the new first expiring timer. The
    613	 * effective expiry time of the timer is required here
    614	 * (bucket_expiry) instead of timer->expires.
    615	 */
    616	if (time_before(bucket_expiry, base->next_expiry)) {
    617		/*
    618		 * Set the next expiry time and kick the CPU so it
    619		 * can reevaluate the wheel:
    620		 */
    621		base->next_expiry = bucket_expiry;
    622		base->timers_pending = true;
    623		base->next_expiry_recalc = false;
    624		trigger_dyntick_cpu(base, timer);
    625	}
    626}
    627
    628static void internal_add_timer(struct timer_base *base, struct timer_list *timer)
    629{
    630	unsigned long bucket_expiry;
    631	unsigned int idx;
    632
    633	idx = calc_wheel_index(timer->expires, base->clk, &bucket_expiry);
    634	enqueue_timer(base, timer, idx, bucket_expiry);
    635}
    636
    637#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
    638
    639static const struct debug_obj_descr timer_debug_descr;
    640
    641struct timer_hint {
    642	void	(*function)(struct timer_list *t);
    643	long	offset;
    644};
    645
    646#define TIMER_HINT(fn, container, timr, hintfn)			\
    647	{							\
    648		.function = fn,					\
    649		.offset	  = offsetof(container, hintfn) -	\
    650			    offsetof(container, timr)		\
    651	}
    652
    653static const struct timer_hint timer_hints[] = {
    654	TIMER_HINT(delayed_work_timer_fn,
    655		   struct delayed_work, timer, work.func),
    656	TIMER_HINT(kthread_delayed_work_timer_fn,
    657		   struct kthread_delayed_work, timer, work.func),
    658};
    659
    660static void *timer_debug_hint(void *addr)
    661{
    662	struct timer_list *timer = addr;
    663	int i;
    664
    665	for (i = 0; i < ARRAY_SIZE(timer_hints); i++) {
    666		if (timer_hints[i].function == timer->function) {
    667			void (**fn)(void) = addr + timer_hints[i].offset;
    668
    669			return *fn;
    670		}
    671	}
    672
    673	return timer->function;
    674}
    675
    676static bool timer_is_static_object(void *addr)
    677{
    678	struct timer_list *timer = addr;
    679
    680	return (timer->entry.pprev == NULL &&
    681		timer->entry.next == TIMER_ENTRY_STATIC);
    682}
    683
    684/*
    685 * fixup_init is called when:
    686 * - an active object is initialized
    687 */
    688static bool timer_fixup_init(void *addr, enum debug_obj_state state)
    689{
    690	struct timer_list *timer = addr;
    691
    692	switch (state) {
    693	case ODEBUG_STATE_ACTIVE:
    694		del_timer_sync(timer);
    695		debug_object_init(timer, &timer_debug_descr);
    696		return true;
    697	default:
    698		return false;
    699	}
    700}
    701
    702/* Stub timer callback for improperly used timers. */
    703static void stub_timer(struct timer_list *unused)
    704{
    705	WARN_ON(1);
    706}
    707
    708/*
    709 * fixup_activate is called when:
    710 * - an active object is activated
    711 * - an unknown non-static object is activated
    712 */
    713static bool timer_fixup_activate(void *addr, enum debug_obj_state state)
    714{
    715	struct timer_list *timer = addr;
    716
    717	switch (state) {
    718	case ODEBUG_STATE_NOTAVAILABLE:
    719		timer_setup(timer, stub_timer, 0);
    720		return true;
    721
    722	case ODEBUG_STATE_ACTIVE:
    723		WARN_ON(1);
    724		fallthrough;
    725	default:
    726		return false;
    727	}
    728}
    729
    730/*
    731 * fixup_free is called when:
    732 * - an active object is freed
    733 */
    734static bool timer_fixup_free(void *addr, enum debug_obj_state state)
    735{
    736	struct timer_list *timer = addr;
    737
    738	switch (state) {
    739	case ODEBUG_STATE_ACTIVE:
    740		del_timer_sync(timer);
    741		debug_object_free(timer, &timer_debug_descr);
    742		return true;
    743	default:
    744		return false;
    745	}
    746}
    747
    748/*
    749 * fixup_assert_init is called when:
    750 * - an untracked/uninit-ed object is found
    751 */
    752static bool timer_fixup_assert_init(void *addr, enum debug_obj_state state)
    753{
    754	struct timer_list *timer = addr;
    755
    756	switch (state) {
    757	case ODEBUG_STATE_NOTAVAILABLE:
    758		timer_setup(timer, stub_timer, 0);
    759		return true;
    760	default:
    761		return false;
    762	}
    763}
    764
    765static const struct debug_obj_descr timer_debug_descr = {
    766	.name			= "timer_list",
    767	.debug_hint		= timer_debug_hint,
    768	.is_static_object	= timer_is_static_object,
    769	.fixup_init		= timer_fixup_init,
    770	.fixup_activate		= timer_fixup_activate,
    771	.fixup_free		= timer_fixup_free,
    772	.fixup_assert_init	= timer_fixup_assert_init,
    773};
    774
    775static inline void debug_timer_init(struct timer_list *timer)
    776{
    777	debug_object_init(timer, &timer_debug_descr);
    778}
    779
    780static inline void debug_timer_activate(struct timer_list *timer)
    781{
    782	debug_object_activate(timer, &timer_debug_descr);
    783}
    784
    785static inline void debug_timer_deactivate(struct timer_list *timer)
    786{
    787	debug_object_deactivate(timer, &timer_debug_descr);
    788}
    789
    790static inline void debug_timer_assert_init(struct timer_list *timer)
    791{
    792	debug_object_assert_init(timer, &timer_debug_descr);
    793}
    794
    795static void do_init_timer(struct timer_list *timer,
    796			  void (*func)(struct timer_list *),
    797			  unsigned int flags,
    798			  const char *name, struct lock_class_key *key);
    799
    800void init_timer_on_stack_key(struct timer_list *timer,
    801			     void (*func)(struct timer_list *),
    802			     unsigned int flags,
    803			     const char *name, struct lock_class_key *key)
    804{
    805	debug_object_init_on_stack(timer, &timer_debug_descr);
    806	do_init_timer(timer, func, flags, name, key);
    807}
    808EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
    809
    810void destroy_timer_on_stack(struct timer_list *timer)
    811{
    812	debug_object_free(timer, &timer_debug_descr);
    813}
    814EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
    815
    816#else
    817static inline void debug_timer_init(struct timer_list *timer) { }
    818static inline void debug_timer_activate(struct timer_list *timer) { }
    819static inline void debug_timer_deactivate(struct timer_list *timer) { }
    820static inline void debug_timer_assert_init(struct timer_list *timer) { }
    821#endif
    822
    823static inline void debug_init(struct timer_list *timer)
    824{
    825	debug_timer_init(timer);
    826	trace_timer_init(timer);
    827}
    828
    829static inline void debug_deactivate(struct timer_list *timer)
    830{
    831	debug_timer_deactivate(timer);
    832	trace_timer_cancel(timer);
    833}
    834
    835static inline void debug_assert_init(struct timer_list *timer)
    836{
    837	debug_timer_assert_init(timer);
    838}
    839
    840static void do_init_timer(struct timer_list *timer,
    841			  void (*func)(struct timer_list *),
    842			  unsigned int flags,
    843			  const char *name, struct lock_class_key *key)
    844{
    845	timer->entry.pprev = NULL;
    846	timer->function = func;
    847	if (WARN_ON_ONCE(flags & ~TIMER_INIT_FLAGS))
    848		flags &= TIMER_INIT_FLAGS;
    849	timer->flags = flags | raw_smp_processor_id();
    850	lockdep_init_map(&timer->lockdep_map, name, key, 0);
    851}
    852
    853/**
    854 * init_timer_key - initialize a timer
    855 * @timer: the timer to be initialized
    856 * @func: timer callback function
    857 * @flags: timer flags
    858 * @name: name of the timer
    859 * @key: lockdep class key of the fake lock used for tracking timer
    860 *       sync lock dependencies
    861 *
    862 * init_timer_key() must be done to a timer prior calling *any* of the
    863 * other timer functions.
    864 */
    865void init_timer_key(struct timer_list *timer,
    866		    void (*func)(struct timer_list *), unsigned int flags,
    867		    const char *name, struct lock_class_key *key)
    868{
    869	debug_init(timer);
    870	do_init_timer(timer, func, flags, name, key);
    871}
    872EXPORT_SYMBOL(init_timer_key);
    873
    874static inline void detach_timer(struct timer_list *timer, bool clear_pending)
    875{
    876	struct hlist_node *entry = &timer->entry;
    877
    878	debug_deactivate(timer);
    879
    880	__hlist_del(entry);
    881	if (clear_pending)
    882		entry->pprev = NULL;
    883	entry->next = LIST_POISON2;
    884}
    885
    886static int detach_if_pending(struct timer_list *timer, struct timer_base *base,
    887			     bool clear_pending)
    888{
    889	unsigned idx = timer_get_idx(timer);
    890
    891	if (!timer_pending(timer))
    892		return 0;
    893
    894	if (hlist_is_singular_node(&timer->entry, base->vectors + idx)) {
    895		__clear_bit(idx, base->pending_map);
    896		base->next_expiry_recalc = true;
    897	}
    898
    899	detach_timer(timer, clear_pending);
    900	return 1;
    901}
    902
    903static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
    904{
    905	struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu);
    906
    907	/*
    908	 * If the timer is deferrable and NO_HZ_COMMON is set then we need
    909	 * to use the deferrable base.
    910	 */
    911	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
    912		base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
    913	return base;
    914}
    915
    916static inline struct timer_base *get_timer_this_cpu_base(u32 tflags)
    917{
    918	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
    919
    920	/*
    921	 * If the timer is deferrable and NO_HZ_COMMON is set then we need
    922	 * to use the deferrable base.
    923	 */
    924	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
    925		base = this_cpu_ptr(&timer_bases[BASE_DEF]);
    926	return base;
    927}
    928
    929static inline struct timer_base *get_timer_base(u32 tflags)
    930{
    931	return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
    932}
    933
    934static inline struct timer_base *
    935get_target_base(struct timer_base *base, unsigned tflags)
    936{
    937#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
    938	if (static_branch_likely(&timers_migration_enabled) &&
    939	    !(tflags & TIMER_PINNED))
    940		return get_timer_cpu_base(tflags, get_nohz_timer_target());
    941#endif
    942	return get_timer_this_cpu_base(tflags);
    943}
    944
    945static inline void forward_timer_base(struct timer_base *base)
    946{
    947	unsigned long jnow = READ_ONCE(jiffies);
    948
    949	/*
    950	 * No need to forward if we are close enough below jiffies.
    951	 * Also while executing timers, base->clk is 1 offset ahead
    952	 * of jiffies to avoid endless requeuing to current jiffies.
    953	 */
    954	if ((long)(jnow - base->clk) < 1)
    955		return;
    956
    957	/*
    958	 * If the next expiry value is > jiffies, then we fast forward to
    959	 * jiffies otherwise we forward to the next expiry value.
    960	 */
    961	if (time_after(base->next_expiry, jnow)) {
    962		base->clk = jnow;
    963	} else {
    964		if (WARN_ON_ONCE(time_before(base->next_expiry, base->clk)))
    965			return;
    966		base->clk = base->next_expiry;
    967	}
    968}
    969
    970
    971/*
    972 * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means
    973 * that all timers which are tied to this base are locked, and the base itself
    974 * is locked too.
    975 *
    976 * So __run_timers/migrate_timers can safely modify all timers which could
    977 * be found in the base->vectors array.
    978 *
    979 * When a timer is migrating then the TIMER_MIGRATING flag is set and we need
    980 * to wait until the migration is done.
    981 */
    982static struct timer_base *lock_timer_base(struct timer_list *timer,
    983					  unsigned long *flags)
    984	__acquires(timer->base->lock)
    985{
    986	for (;;) {
    987		struct timer_base *base;
    988		u32 tf;
    989
    990		/*
    991		 * We need to use READ_ONCE() here, otherwise the compiler
    992		 * might re-read @tf between the check for TIMER_MIGRATING
    993		 * and spin_lock().
    994		 */
    995		tf = READ_ONCE(timer->flags);
    996
    997		if (!(tf & TIMER_MIGRATING)) {
    998			base = get_timer_base(tf);
    999			raw_spin_lock_irqsave(&base->lock, *flags);
   1000			if (timer->flags == tf)
   1001				return base;
   1002			raw_spin_unlock_irqrestore(&base->lock, *flags);
   1003		}
   1004		cpu_relax();
   1005	}
   1006}
   1007
   1008#define MOD_TIMER_PENDING_ONLY		0x01
   1009#define MOD_TIMER_REDUCE		0x02
   1010#define MOD_TIMER_NOTPENDING		0x04
   1011
   1012static inline int
   1013__mod_timer(struct timer_list *timer, unsigned long expires, unsigned int options)
   1014{
   1015	unsigned long clk = 0, flags, bucket_expiry;
   1016	struct timer_base *base, *new_base;
   1017	unsigned int idx = UINT_MAX;
   1018	int ret = 0;
   1019
   1020	BUG_ON(!timer->function);
   1021
   1022	/*
   1023	 * This is a common optimization triggered by the networking code - if
   1024	 * the timer is re-modified to have the same timeout or ends up in the
   1025	 * same array bucket then just return:
   1026	 */
   1027	if (!(options & MOD_TIMER_NOTPENDING) && timer_pending(timer)) {
   1028		/*
   1029		 * The downside of this optimization is that it can result in
   1030		 * larger granularity than you would get from adding a new
   1031		 * timer with this expiry.
   1032		 */
   1033		long diff = timer->expires - expires;
   1034
   1035		if (!diff)
   1036			return 1;
   1037		if (options & MOD_TIMER_REDUCE && diff <= 0)
   1038			return 1;
   1039
   1040		/*
   1041		 * We lock timer base and calculate the bucket index right
   1042		 * here. If the timer ends up in the same bucket, then we
   1043		 * just update the expiry time and avoid the whole
   1044		 * dequeue/enqueue dance.
   1045		 */
   1046		base = lock_timer_base(timer, &flags);
   1047		forward_timer_base(base);
   1048
   1049		if (timer_pending(timer) && (options & MOD_TIMER_REDUCE) &&
   1050		    time_before_eq(timer->expires, expires)) {
   1051			ret = 1;
   1052			goto out_unlock;
   1053		}
   1054
   1055		clk = base->clk;
   1056		idx = calc_wheel_index(expires, clk, &bucket_expiry);
   1057
   1058		/*
   1059		 * Retrieve and compare the array index of the pending
   1060		 * timer. If it matches set the expiry to the new value so a
   1061		 * subsequent call will exit in the expires check above.
   1062		 */
   1063		if (idx == timer_get_idx(timer)) {
   1064			if (!(options & MOD_TIMER_REDUCE))
   1065				timer->expires = expires;
   1066			else if (time_after(timer->expires, expires))
   1067				timer->expires = expires;
   1068			ret = 1;
   1069			goto out_unlock;
   1070		}
   1071	} else {
   1072		base = lock_timer_base(timer, &flags);
   1073		forward_timer_base(base);
   1074	}
   1075
   1076	ret = detach_if_pending(timer, base, false);
   1077	if (!ret && (options & MOD_TIMER_PENDING_ONLY))
   1078		goto out_unlock;
   1079
   1080	new_base = get_target_base(base, timer->flags);
   1081
   1082	if (base != new_base) {
   1083		/*
   1084		 * We are trying to schedule the timer on the new base.
   1085		 * However we can't change timer's base while it is running,
   1086		 * otherwise del_timer_sync() can't detect that the timer's
   1087		 * handler yet has not finished. This also guarantees that the
   1088		 * timer is serialized wrt itself.
   1089		 */
   1090		if (likely(base->running_timer != timer)) {
   1091			/* See the comment in lock_timer_base() */
   1092			timer->flags |= TIMER_MIGRATING;
   1093
   1094			raw_spin_unlock(&base->lock);
   1095			base = new_base;
   1096			raw_spin_lock(&base->lock);
   1097			WRITE_ONCE(timer->flags,
   1098				   (timer->flags & ~TIMER_BASEMASK) | base->cpu);
   1099			forward_timer_base(base);
   1100		}
   1101	}
   1102
   1103	debug_timer_activate(timer);
   1104
   1105	timer->expires = expires;
   1106	/*
   1107	 * If 'idx' was calculated above and the base time did not advance
   1108	 * between calculating 'idx' and possibly switching the base, only
   1109	 * enqueue_timer() is required. Otherwise we need to (re)calculate
   1110	 * the wheel index via internal_add_timer().
   1111	 */
   1112	if (idx != UINT_MAX && clk == base->clk)
   1113		enqueue_timer(base, timer, idx, bucket_expiry);
   1114	else
   1115		internal_add_timer(base, timer);
   1116
   1117out_unlock:
   1118	raw_spin_unlock_irqrestore(&base->lock, flags);
   1119
   1120	return ret;
   1121}
   1122
   1123/**
   1124 * mod_timer_pending - modify a pending timer's timeout
   1125 * @timer: the pending timer to be modified
   1126 * @expires: new timeout in jiffies
   1127 *
   1128 * mod_timer_pending() is the same for pending timers as mod_timer(),
   1129 * but will not re-activate and modify already deleted timers.
   1130 *
   1131 * It is useful for unserialized use of timers.
   1132 */
   1133int mod_timer_pending(struct timer_list *timer, unsigned long expires)
   1134{
   1135	return __mod_timer(timer, expires, MOD_TIMER_PENDING_ONLY);
   1136}
   1137EXPORT_SYMBOL(mod_timer_pending);
   1138
   1139/**
   1140 * mod_timer - modify a timer's timeout
   1141 * @timer: the timer to be modified
   1142 * @expires: new timeout in jiffies
   1143 *
   1144 * mod_timer() is a more efficient way to update the expire field of an
   1145 * active timer (if the timer is inactive it will be activated)
   1146 *
   1147 * mod_timer(timer, expires) is equivalent to:
   1148 *
   1149 *     del_timer(timer); timer->expires = expires; add_timer(timer);
   1150 *
   1151 * Note that if there are multiple unserialized concurrent users of the
   1152 * same timer, then mod_timer() is the only safe way to modify the timeout,
   1153 * since add_timer() cannot modify an already running timer.
   1154 *
   1155 * The function returns whether it has modified a pending timer or not.
   1156 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
   1157 * active timer returns 1.)
   1158 */
   1159int mod_timer(struct timer_list *timer, unsigned long expires)
   1160{
   1161	return __mod_timer(timer, expires, 0);
   1162}
   1163EXPORT_SYMBOL(mod_timer);
   1164
   1165/**
   1166 * timer_reduce - Modify a timer's timeout if it would reduce the timeout
   1167 * @timer:	The timer to be modified
   1168 * @expires:	New timeout in jiffies
   1169 *
   1170 * timer_reduce() is very similar to mod_timer(), except that it will only
   1171 * modify a running timer if that would reduce the expiration time (it will
   1172 * start a timer that isn't running).
   1173 */
   1174int timer_reduce(struct timer_list *timer, unsigned long expires)
   1175{
   1176	return __mod_timer(timer, expires, MOD_TIMER_REDUCE);
   1177}
   1178EXPORT_SYMBOL(timer_reduce);
   1179
   1180/**
   1181 * add_timer - start a timer
   1182 * @timer: the timer to be added
   1183 *
   1184 * The kernel will do a ->function(@timer) callback from the
   1185 * timer interrupt at the ->expires point in the future. The
   1186 * current time is 'jiffies'.
   1187 *
   1188 * The timer's ->expires, ->function fields must be set prior calling this
   1189 * function.
   1190 *
   1191 * Timers with an ->expires field in the past will be executed in the next
   1192 * timer tick.
   1193 */
   1194void add_timer(struct timer_list *timer)
   1195{
   1196	BUG_ON(timer_pending(timer));
   1197	__mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING);
   1198}
   1199EXPORT_SYMBOL(add_timer);
   1200
   1201/**
   1202 * add_timer_on - start a timer on a particular CPU
   1203 * @timer: the timer to be added
   1204 * @cpu: the CPU to start it on
   1205 *
   1206 * This is not very scalable on SMP. Double adds are not possible.
   1207 */
   1208void add_timer_on(struct timer_list *timer, int cpu)
   1209{
   1210	struct timer_base *new_base, *base;
   1211	unsigned long flags;
   1212
   1213	BUG_ON(timer_pending(timer) || !timer->function);
   1214
   1215	new_base = get_timer_cpu_base(timer->flags, cpu);
   1216
   1217	/*
   1218	 * If @timer was on a different CPU, it should be migrated with the
   1219	 * old base locked to prevent other operations proceeding with the
   1220	 * wrong base locked.  See lock_timer_base().
   1221	 */
   1222	base = lock_timer_base(timer, &flags);
   1223	if (base != new_base) {
   1224		timer->flags |= TIMER_MIGRATING;
   1225
   1226		raw_spin_unlock(&base->lock);
   1227		base = new_base;
   1228		raw_spin_lock(&base->lock);
   1229		WRITE_ONCE(timer->flags,
   1230			   (timer->flags & ~TIMER_BASEMASK) | cpu);
   1231	}
   1232	forward_timer_base(base);
   1233
   1234	debug_timer_activate(timer);
   1235	internal_add_timer(base, timer);
   1236	raw_spin_unlock_irqrestore(&base->lock, flags);
   1237}
   1238EXPORT_SYMBOL_GPL(add_timer_on);
   1239
   1240/**
   1241 * del_timer - deactivate a timer.
   1242 * @timer: the timer to be deactivated
   1243 *
   1244 * del_timer() deactivates a timer - this works on both active and inactive
   1245 * timers.
   1246 *
   1247 * The function returns whether it has deactivated a pending timer or not.
   1248 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
   1249 * active timer returns 1.)
   1250 */
   1251int del_timer(struct timer_list *timer)
   1252{
   1253	struct timer_base *base;
   1254	unsigned long flags;
   1255	int ret = 0;
   1256
   1257	debug_assert_init(timer);
   1258
   1259	if (timer_pending(timer)) {
   1260		base = lock_timer_base(timer, &flags);
   1261		ret = detach_if_pending(timer, base, true);
   1262		raw_spin_unlock_irqrestore(&base->lock, flags);
   1263	}
   1264
   1265	return ret;
   1266}
   1267EXPORT_SYMBOL(del_timer);
   1268
   1269/**
   1270 * try_to_del_timer_sync - Try to deactivate a timer
   1271 * @timer: timer to delete
   1272 *
   1273 * This function tries to deactivate a timer. Upon successful (ret >= 0)
   1274 * exit the timer is not queued and the handler is not running on any CPU.
   1275 */
   1276int try_to_del_timer_sync(struct timer_list *timer)
   1277{
   1278	struct timer_base *base;
   1279	unsigned long flags;
   1280	int ret = -1;
   1281
   1282	debug_assert_init(timer);
   1283
   1284	base = lock_timer_base(timer, &flags);
   1285
   1286	if (base->running_timer != timer)
   1287		ret = detach_if_pending(timer, base, true);
   1288
   1289	raw_spin_unlock_irqrestore(&base->lock, flags);
   1290
   1291	return ret;
   1292}
   1293EXPORT_SYMBOL(try_to_del_timer_sync);
   1294
   1295#ifdef CONFIG_PREEMPT_RT
   1296static __init void timer_base_init_expiry_lock(struct timer_base *base)
   1297{
   1298	spin_lock_init(&base->expiry_lock);
   1299}
   1300
   1301static inline void timer_base_lock_expiry(struct timer_base *base)
   1302{
   1303	spin_lock(&base->expiry_lock);
   1304}
   1305
   1306static inline void timer_base_unlock_expiry(struct timer_base *base)
   1307{
   1308	spin_unlock(&base->expiry_lock);
   1309}
   1310
   1311/*
   1312 * The counterpart to del_timer_wait_running().
   1313 *
   1314 * If there is a waiter for base->expiry_lock, then it was waiting for the
   1315 * timer callback to finish. Drop expiry_lock and reacquire it. That allows
   1316 * the waiter to acquire the lock and make progress.
   1317 */
   1318static void timer_sync_wait_running(struct timer_base *base)
   1319{
   1320	if (atomic_read(&base->timer_waiters)) {
   1321		raw_spin_unlock_irq(&base->lock);
   1322		spin_unlock(&base->expiry_lock);
   1323		spin_lock(&base->expiry_lock);
   1324		raw_spin_lock_irq(&base->lock);
   1325	}
   1326}
   1327
   1328/*
   1329 * This function is called on PREEMPT_RT kernels when the fast path
   1330 * deletion of a timer failed because the timer callback function was
   1331 * running.
   1332 *
   1333 * This prevents priority inversion, if the softirq thread on a remote CPU
   1334 * got preempted, and it prevents a life lock when the task which tries to
   1335 * delete a timer preempted the softirq thread running the timer callback
   1336 * function.
   1337 */
   1338static void del_timer_wait_running(struct timer_list *timer)
   1339{
   1340	u32 tf;
   1341
   1342	tf = READ_ONCE(timer->flags);
   1343	if (!(tf & (TIMER_MIGRATING | TIMER_IRQSAFE))) {
   1344		struct timer_base *base = get_timer_base(tf);
   1345
   1346		/*
   1347		 * Mark the base as contended and grab the expiry lock,
   1348		 * which is held by the softirq across the timer
   1349		 * callback. Drop the lock immediately so the softirq can
   1350		 * expire the next timer. In theory the timer could already
   1351		 * be running again, but that's more than unlikely and just
   1352		 * causes another wait loop.
   1353		 */
   1354		atomic_inc(&base->timer_waiters);
   1355		spin_lock_bh(&base->expiry_lock);
   1356		atomic_dec(&base->timer_waiters);
   1357		spin_unlock_bh(&base->expiry_lock);
   1358	}
   1359}
   1360#else
   1361static inline void timer_base_init_expiry_lock(struct timer_base *base) { }
   1362static inline void timer_base_lock_expiry(struct timer_base *base) { }
   1363static inline void timer_base_unlock_expiry(struct timer_base *base) { }
   1364static inline void timer_sync_wait_running(struct timer_base *base) { }
   1365static inline void del_timer_wait_running(struct timer_list *timer) { }
   1366#endif
   1367
   1368#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
   1369/**
   1370 * del_timer_sync - deactivate a timer and wait for the handler to finish.
   1371 * @timer: the timer to be deactivated
   1372 *
   1373 * This function only differs from del_timer() on SMP: besides deactivating
   1374 * the timer it also makes sure the handler has finished executing on other
   1375 * CPUs.
   1376 *
   1377 * Synchronization rules: Callers must prevent restarting of the timer,
   1378 * otherwise this function is meaningless. It must not be called from
   1379 * interrupt contexts unless the timer is an irqsafe one. The caller must
   1380 * not hold locks which would prevent completion of the timer's
   1381 * handler. The timer's handler must not call add_timer_on(). Upon exit the
   1382 * timer is not queued and the handler is not running on any CPU.
   1383 *
   1384 * Note: For !irqsafe timers, you must not hold locks that are held in
   1385 *   interrupt context while calling this function. Even if the lock has
   1386 *   nothing to do with the timer in question.  Here's why::
   1387 *
   1388 *    CPU0                             CPU1
   1389 *    ----                             ----
   1390 *                                     <SOFTIRQ>
   1391 *                                       call_timer_fn();
   1392 *                                       base->running_timer = mytimer;
   1393 *    spin_lock_irq(somelock);
   1394 *                                     <IRQ>
   1395 *                                        spin_lock(somelock);
   1396 *    del_timer_sync(mytimer);
   1397 *    while (base->running_timer == mytimer);
   1398 *
   1399 * Now del_timer_sync() will never return and never release somelock.
   1400 * The interrupt on the other CPU is waiting to grab somelock but
   1401 * it has interrupted the softirq that CPU0 is waiting to finish.
   1402 *
   1403 * The function returns whether it has deactivated a pending timer or not.
   1404 */
   1405int del_timer_sync(struct timer_list *timer)
   1406{
   1407	int ret;
   1408
   1409#ifdef CONFIG_LOCKDEP
   1410	unsigned long flags;
   1411
   1412	/*
   1413	 * If lockdep gives a backtrace here, please reference
   1414	 * the synchronization rules above.
   1415	 */
   1416	local_irq_save(flags);
   1417	lock_map_acquire(&timer->lockdep_map);
   1418	lock_map_release(&timer->lockdep_map);
   1419	local_irq_restore(flags);
   1420#endif
   1421	/*
   1422	 * don't use it in hardirq context, because it
   1423	 * could lead to deadlock.
   1424	 */
   1425	WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
   1426
   1427	/*
   1428	 * Must be able to sleep on PREEMPT_RT because of the slowpath in
   1429	 * del_timer_wait_running().
   1430	 */
   1431	if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(timer->flags & TIMER_IRQSAFE))
   1432		lockdep_assert_preemption_enabled();
   1433
   1434	do {
   1435		ret = try_to_del_timer_sync(timer);
   1436
   1437		if (unlikely(ret < 0)) {
   1438			del_timer_wait_running(timer);
   1439			cpu_relax();
   1440		}
   1441	} while (ret < 0);
   1442
   1443	return ret;
   1444}
   1445EXPORT_SYMBOL(del_timer_sync);
   1446#endif
   1447
   1448static void call_timer_fn(struct timer_list *timer,
   1449			  void (*fn)(struct timer_list *),
   1450			  unsigned long baseclk)
   1451{
   1452	int count = preempt_count();
   1453
   1454#ifdef CONFIG_LOCKDEP
   1455	/*
   1456	 * It is permissible to free the timer from inside the
   1457	 * function that is called from it, this we need to take into
   1458	 * account for lockdep too. To avoid bogus "held lock freed"
   1459	 * warnings as well as problems when looking into
   1460	 * timer->lockdep_map, make a copy and use that here.
   1461	 */
   1462	struct lockdep_map lockdep_map;
   1463
   1464	lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
   1465#endif
   1466	/*
   1467	 * Couple the lock chain with the lock chain at
   1468	 * del_timer_sync() by acquiring the lock_map around the fn()
   1469	 * call here and in del_timer_sync().
   1470	 */
   1471	lock_map_acquire(&lockdep_map);
   1472
   1473	trace_timer_expire_entry(timer, baseclk);
   1474	fn(timer);
   1475	trace_timer_expire_exit(timer);
   1476
   1477	lock_map_release(&lockdep_map);
   1478
   1479	if (count != preempt_count()) {
   1480		WARN_ONCE(1, "timer: %pS preempt leak: %08x -> %08x\n",
   1481			  fn, count, preempt_count());
   1482		/*
   1483		 * Restore the preempt count. That gives us a decent
   1484		 * chance to survive and extract information. If the
   1485		 * callback kept a lock held, bad luck, but not worse
   1486		 * than the BUG() we had.
   1487		 */
   1488		preempt_count_set(count);
   1489	}
   1490}
   1491
   1492static void expire_timers(struct timer_base *base, struct hlist_head *head)
   1493{
   1494	/*
   1495	 * This value is required only for tracing. base->clk was
   1496	 * incremented directly before expire_timers was called. But expiry
   1497	 * is related to the old base->clk value.
   1498	 */
   1499	unsigned long baseclk = base->clk - 1;
   1500
   1501	while (!hlist_empty(head)) {
   1502		struct timer_list *timer;
   1503		void (*fn)(struct timer_list *);
   1504
   1505		timer = hlist_entry(head->first, struct timer_list, entry);
   1506
   1507		base->running_timer = timer;
   1508		detach_timer(timer, true);
   1509
   1510		fn = timer->function;
   1511
   1512		if (timer->flags & TIMER_IRQSAFE) {
   1513			raw_spin_unlock(&base->lock);
   1514			call_timer_fn(timer, fn, baseclk);
   1515			raw_spin_lock(&base->lock);
   1516			base->running_timer = NULL;
   1517		} else {
   1518			raw_spin_unlock_irq(&base->lock);
   1519			call_timer_fn(timer, fn, baseclk);
   1520			raw_spin_lock_irq(&base->lock);
   1521			base->running_timer = NULL;
   1522			timer_sync_wait_running(base);
   1523		}
   1524	}
   1525}
   1526
   1527static int collect_expired_timers(struct timer_base *base,
   1528				  struct hlist_head *heads)
   1529{
   1530	unsigned long clk = base->clk = base->next_expiry;
   1531	struct hlist_head *vec;
   1532	int i, levels = 0;
   1533	unsigned int idx;
   1534
   1535	for (i = 0; i < LVL_DEPTH; i++) {
   1536		idx = (clk & LVL_MASK) + i * LVL_SIZE;
   1537
   1538		if (__test_and_clear_bit(idx, base->pending_map)) {
   1539			vec = base->vectors + idx;
   1540			hlist_move_list(vec, heads++);
   1541			levels++;
   1542		}
   1543		/* Is it time to look at the next level? */
   1544		if (clk & LVL_CLK_MASK)
   1545			break;
   1546		/* Shift clock for the next level granularity */
   1547		clk >>= LVL_CLK_SHIFT;
   1548	}
   1549	return levels;
   1550}
   1551
   1552/*
   1553 * Find the next pending bucket of a level. Search from level start (@offset)
   1554 * + @clk upwards and if nothing there, search from start of the level
   1555 * (@offset) up to @offset + clk.
   1556 */
   1557static int next_pending_bucket(struct timer_base *base, unsigned offset,
   1558			       unsigned clk)
   1559{
   1560	unsigned pos, start = offset + clk;
   1561	unsigned end = offset + LVL_SIZE;
   1562
   1563	pos = find_next_bit(base->pending_map, end, start);
   1564	if (pos < end)
   1565		return pos - start;
   1566
   1567	pos = find_next_bit(base->pending_map, start, offset);
   1568	return pos < start ? pos + LVL_SIZE - start : -1;
   1569}
   1570
   1571/*
   1572 * Search the first expiring timer in the various clock levels. Caller must
   1573 * hold base->lock.
   1574 */
   1575static unsigned long __next_timer_interrupt(struct timer_base *base)
   1576{
   1577	unsigned long clk, next, adj;
   1578	unsigned lvl, offset = 0;
   1579
   1580	next = base->clk + NEXT_TIMER_MAX_DELTA;
   1581	clk = base->clk;
   1582	for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) {
   1583		int pos = next_pending_bucket(base, offset, clk & LVL_MASK);
   1584		unsigned long lvl_clk = clk & LVL_CLK_MASK;
   1585
   1586		if (pos >= 0) {
   1587			unsigned long tmp = clk + (unsigned long) pos;
   1588
   1589			tmp <<= LVL_SHIFT(lvl);
   1590			if (time_before(tmp, next))
   1591				next = tmp;
   1592
   1593			/*
   1594			 * If the next expiration happens before we reach
   1595			 * the next level, no need to check further.
   1596			 */
   1597			if (pos <= ((LVL_CLK_DIV - lvl_clk) & LVL_CLK_MASK))
   1598				break;
   1599		}
   1600		/*
   1601		 * Clock for the next level. If the current level clock lower
   1602		 * bits are zero, we look at the next level as is. If not we
   1603		 * need to advance it by one because that's going to be the
   1604		 * next expiring bucket in that level. base->clk is the next
   1605		 * expiring jiffie. So in case of:
   1606		 *
   1607		 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
   1608		 *  0    0    0    0    0    0
   1609		 *
   1610		 * we have to look at all levels @index 0. With
   1611		 *
   1612		 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
   1613		 *  0    0    0    0    0    2
   1614		 *
   1615		 * LVL0 has the next expiring bucket @index 2. The upper
   1616		 * levels have the next expiring bucket @index 1.
   1617		 *
   1618		 * In case that the propagation wraps the next level the same
   1619		 * rules apply:
   1620		 *
   1621		 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
   1622		 *  0    0    0    0    F    2
   1623		 *
   1624		 * So after looking at LVL0 we get:
   1625		 *
   1626		 * LVL5 LVL4 LVL3 LVL2 LVL1
   1627		 *  0    0    0    1    0
   1628		 *
   1629		 * So no propagation from LVL1 to LVL2 because that happened
   1630		 * with the add already, but then we need to propagate further
   1631		 * from LVL2 to LVL3.
   1632		 *
   1633		 * So the simple check whether the lower bits of the current
   1634		 * level are 0 or not is sufficient for all cases.
   1635		 */
   1636		adj = lvl_clk ? 1 : 0;
   1637		clk >>= LVL_CLK_SHIFT;
   1638		clk += adj;
   1639	}
   1640
   1641	base->next_expiry_recalc = false;
   1642	base->timers_pending = !(next == base->clk + NEXT_TIMER_MAX_DELTA);
   1643
   1644	return next;
   1645}
   1646
   1647#ifdef CONFIG_NO_HZ_COMMON
   1648/*
   1649 * Check, if the next hrtimer event is before the next timer wheel
   1650 * event:
   1651 */
   1652static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
   1653{
   1654	u64 nextevt = hrtimer_get_next_event();
   1655
   1656	/*
   1657	 * If high resolution timers are enabled
   1658	 * hrtimer_get_next_event() returns KTIME_MAX.
   1659	 */
   1660	if (expires <= nextevt)
   1661		return expires;
   1662
   1663	/*
   1664	 * If the next timer is already expired, return the tick base
   1665	 * time so the tick is fired immediately.
   1666	 */
   1667	if (nextevt <= basem)
   1668		return basem;
   1669
   1670	/*
   1671	 * Round up to the next jiffie. High resolution timers are
   1672	 * off, so the hrtimers are expired in the tick and we need to
   1673	 * make sure that this tick really expires the timer to avoid
   1674	 * a ping pong of the nohz stop code.
   1675	 *
   1676	 * Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3
   1677	 */
   1678	return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC;
   1679}
   1680
   1681/**
   1682 * get_next_timer_interrupt - return the time (clock mono) of the next timer
   1683 * @basej:	base time jiffies
   1684 * @basem:	base time clock monotonic
   1685 *
   1686 * Returns the tick aligned clock monotonic time of the next pending
   1687 * timer or KTIME_MAX if no timer is pending.
   1688 */
   1689u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
   1690{
   1691	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
   1692	u64 expires = KTIME_MAX;
   1693	unsigned long nextevt;
   1694
   1695	/*
   1696	 * Pretend that there is no timer pending if the cpu is offline.
   1697	 * Possible pending timers will be migrated later to an active cpu.
   1698	 */
   1699	if (cpu_is_offline(smp_processor_id()))
   1700		return expires;
   1701
   1702	raw_spin_lock(&base->lock);
   1703	if (base->next_expiry_recalc)
   1704		base->next_expiry = __next_timer_interrupt(base);
   1705	nextevt = base->next_expiry;
   1706
   1707	/*
   1708	 * We have a fresh next event. Check whether we can forward the
   1709	 * base. We can only do that when @basej is past base->clk
   1710	 * otherwise we might rewind base->clk.
   1711	 */
   1712	if (time_after(basej, base->clk)) {
   1713		if (time_after(nextevt, basej))
   1714			base->clk = basej;
   1715		else if (time_after(nextevt, base->clk))
   1716			base->clk = nextevt;
   1717	}
   1718
   1719	if (time_before_eq(nextevt, basej)) {
   1720		expires = basem;
   1721		base->is_idle = false;
   1722	} else {
   1723		if (base->timers_pending)
   1724			expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
   1725		/*
   1726		 * If we expect to sleep more than a tick, mark the base idle.
   1727		 * Also the tick is stopped so any added timer must forward
   1728		 * the base clk itself to keep granularity small. This idle
   1729		 * logic is only maintained for the BASE_STD base, deferrable
   1730		 * timers may still see large granularity skew (by design).
   1731		 */
   1732		if ((expires - basem) > TICK_NSEC)
   1733			base->is_idle = true;
   1734	}
   1735	raw_spin_unlock(&base->lock);
   1736
   1737	return cmp_next_hrtimer_event(basem, expires);
   1738}
   1739
   1740/**
   1741 * timer_clear_idle - Clear the idle state of the timer base
   1742 *
   1743 * Called with interrupts disabled
   1744 */
   1745void timer_clear_idle(void)
   1746{
   1747	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
   1748
   1749	/*
   1750	 * We do this unlocked. The worst outcome is a remote enqueue sending
   1751	 * a pointless IPI, but taking the lock would just make the window for
   1752	 * sending the IPI a few instructions smaller for the cost of taking
   1753	 * the lock in the exit from idle path.
   1754	 */
   1755	base->is_idle = false;
   1756}
   1757#endif
   1758
   1759/**
   1760 * __run_timers - run all expired timers (if any) on this CPU.
   1761 * @base: the timer vector to be processed.
   1762 */
   1763static inline void __run_timers(struct timer_base *base)
   1764{
   1765	struct hlist_head heads[LVL_DEPTH];
   1766	int levels;
   1767
   1768	if (time_before(jiffies, base->next_expiry))
   1769		return;
   1770
   1771	timer_base_lock_expiry(base);
   1772	raw_spin_lock_irq(&base->lock);
   1773
   1774	while (time_after_eq(jiffies, base->clk) &&
   1775	       time_after_eq(jiffies, base->next_expiry)) {
   1776		levels = collect_expired_timers(base, heads);
   1777		/*
   1778		 * The two possible reasons for not finding any expired
   1779		 * timer at this clk are that all matching timers have been
   1780		 * dequeued or no timer has been queued since
   1781		 * base::next_expiry was set to base::clk +
   1782		 * NEXT_TIMER_MAX_DELTA.
   1783		 */
   1784		WARN_ON_ONCE(!levels && !base->next_expiry_recalc
   1785			     && base->timers_pending);
   1786		base->clk++;
   1787		base->next_expiry = __next_timer_interrupt(base);
   1788
   1789		while (levels--)
   1790			expire_timers(base, heads + levels);
   1791	}
   1792	raw_spin_unlock_irq(&base->lock);
   1793	timer_base_unlock_expiry(base);
   1794}
   1795
   1796/*
   1797 * This function runs timers and the timer-tq in bottom half context.
   1798 */
   1799static __latent_entropy void run_timer_softirq(struct softirq_action *h)
   1800{
   1801	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
   1802
   1803	__run_timers(base);
   1804	if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
   1805		__run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
   1806}
   1807
   1808/*
   1809 * Called by the local, per-CPU timer interrupt on SMP.
   1810 */
   1811static void run_local_timers(void)
   1812{
   1813	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
   1814
   1815	hrtimer_run_queues();
   1816	/* Raise the softirq only if required. */
   1817	if (time_before(jiffies, base->next_expiry)) {
   1818		if (!IS_ENABLED(CONFIG_NO_HZ_COMMON))
   1819			return;
   1820		/* CPU is awake, so check the deferrable base. */
   1821		base++;
   1822		if (time_before(jiffies, base->next_expiry))
   1823			return;
   1824	}
   1825	raise_softirq(TIMER_SOFTIRQ);
   1826}
   1827
   1828/*
   1829 * Called from the timer interrupt handler to charge one tick to the current
   1830 * process.  user_tick is 1 if the tick is user time, 0 for system.
   1831 */
   1832void update_process_times(int user_tick)
   1833{
   1834	struct task_struct *p = current;
   1835
   1836	/* Note: this timer irq context must be accounted for as well. */
   1837	account_process_tick(p, user_tick);
   1838	run_local_timers();
   1839	rcu_sched_clock_irq(user_tick);
   1840#ifdef CONFIG_IRQ_WORK
   1841	if (in_irq())
   1842		irq_work_tick();
   1843#endif
   1844	scheduler_tick();
   1845	if (IS_ENABLED(CONFIG_POSIX_TIMERS))
   1846		run_posix_cpu_timers();
   1847}
   1848
   1849/*
   1850 * Since schedule_timeout()'s timer is defined on the stack, it must store
   1851 * the target task on the stack as well.
   1852 */
   1853struct process_timer {
   1854	struct timer_list timer;
   1855	struct task_struct *task;
   1856};
   1857
   1858static void process_timeout(struct timer_list *t)
   1859{
   1860	struct process_timer *timeout = from_timer(timeout, t, timer);
   1861
   1862	wake_up_process(timeout->task);
   1863}
   1864
   1865/**
   1866 * schedule_timeout - sleep until timeout
   1867 * @timeout: timeout value in jiffies
   1868 *
   1869 * Make the current task sleep until @timeout jiffies have elapsed.
   1870 * The function behavior depends on the current task state
   1871 * (see also set_current_state() description):
   1872 *
   1873 * %TASK_RUNNING - the scheduler is called, but the task does not sleep
   1874 * at all. That happens because sched_submit_work() does nothing for
   1875 * tasks in %TASK_RUNNING state.
   1876 *
   1877 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
   1878 * pass before the routine returns unless the current task is explicitly
   1879 * woken up, (e.g. by wake_up_process()).
   1880 *
   1881 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
   1882 * delivered to the current task or the current task is explicitly woken
   1883 * up.
   1884 *
   1885 * The current task state is guaranteed to be %TASK_RUNNING when this
   1886 * routine returns.
   1887 *
   1888 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
   1889 * the CPU away without a bound on the timeout. In this case the return
   1890 * value will be %MAX_SCHEDULE_TIMEOUT.
   1891 *
   1892 * Returns 0 when the timer has expired otherwise the remaining time in
   1893 * jiffies will be returned. In all cases the return value is guaranteed
   1894 * to be non-negative.
   1895 */
   1896signed long __sched schedule_timeout(signed long timeout)
   1897{
   1898	struct process_timer timer;
   1899	unsigned long expire;
   1900
   1901	switch (timeout)
   1902	{
   1903	case MAX_SCHEDULE_TIMEOUT:
   1904		/*
   1905		 * These two special cases are useful to be comfortable
   1906		 * in the caller. Nothing more. We could take
   1907		 * MAX_SCHEDULE_TIMEOUT from one of the negative value
   1908		 * but I' d like to return a valid offset (>=0) to allow
   1909		 * the caller to do everything it want with the retval.
   1910		 */
   1911		schedule();
   1912		goto out;
   1913	default:
   1914		/*
   1915		 * Another bit of PARANOID. Note that the retval will be
   1916		 * 0 since no piece of kernel is supposed to do a check
   1917		 * for a negative retval of schedule_timeout() (since it
   1918		 * should never happens anyway). You just have the printk()
   1919		 * that will tell you if something is gone wrong and where.
   1920		 */
   1921		if (timeout < 0) {
   1922			printk(KERN_ERR "schedule_timeout: wrong timeout "
   1923				"value %lx\n", timeout);
   1924			dump_stack();
   1925			__set_current_state(TASK_RUNNING);
   1926			goto out;
   1927		}
   1928	}
   1929
   1930	expire = timeout + jiffies;
   1931
   1932	timer.task = current;
   1933	timer_setup_on_stack(&timer.timer, process_timeout, 0);
   1934	__mod_timer(&timer.timer, expire, MOD_TIMER_NOTPENDING);
   1935	schedule();
   1936	del_singleshot_timer_sync(&timer.timer);
   1937
   1938	/* Remove the timer from the object tracker */
   1939	destroy_timer_on_stack(&timer.timer);
   1940
   1941	timeout = expire - jiffies;
   1942
   1943 out:
   1944	return timeout < 0 ? 0 : timeout;
   1945}
   1946EXPORT_SYMBOL(schedule_timeout);
   1947
   1948/*
   1949 * We can use __set_current_state() here because schedule_timeout() calls
   1950 * schedule() unconditionally.
   1951 */
   1952signed long __sched schedule_timeout_interruptible(signed long timeout)
   1953{
   1954	__set_current_state(TASK_INTERRUPTIBLE);
   1955	return schedule_timeout(timeout);
   1956}
   1957EXPORT_SYMBOL(schedule_timeout_interruptible);
   1958
   1959signed long __sched schedule_timeout_killable(signed long timeout)
   1960{
   1961	__set_current_state(TASK_KILLABLE);
   1962	return schedule_timeout(timeout);
   1963}
   1964EXPORT_SYMBOL(schedule_timeout_killable);
   1965
   1966signed long __sched schedule_timeout_uninterruptible(signed long timeout)
   1967{
   1968	__set_current_state(TASK_UNINTERRUPTIBLE);
   1969	return schedule_timeout(timeout);
   1970}
   1971EXPORT_SYMBOL(schedule_timeout_uninterruptible);
   1972
   1973/*
   1974 * Like schedule_timeout_uninterruptible(), except this task will not contribute
   1975 * to load average.
   1976 */
   1977signed long __sched schedule_timeout_idle(signed long timeout)
   1978{
   1979	__set_current_state(TASK_IDLE);
   1980	return schedule_timeout(timeout);
   1981}
   1982EXPORT_SYMBOL(schedule_timeout_idle);
   1983
   1984#ifdef CONFIG_HOTPLUG_CPU
   1985static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *head)
   1986{
   1987	struct timer_list *timer;
   1988	int cpu = new_base->cpu;
   1989
   1990	while (!hlist_empty(head)) {
   1991		timer = hlist_entry(head->first, struct timer_list, entry);
   1992		detach_timer(timer, false);
   1993		timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
   1994		internal_add_timer(new_base, timer);
   1995	}
   1996}
   1997
   1998int timers_prepare_cpu(unsigned int cpu)
   1999{
   2000	struct timer_base *base;
   2001	int b;
   2002
   2003	for (b = 0; b < NR_BASES; b++) {
   2004		base = per_cpu_ptr(&timer_bases[b], cpu);
   2005		base->clk = jiffies;
   2006		base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA;
   2007		base->next_expiry_recalc = false;
   2008		base->timers_pending = false;
   2009		base->is_idle = false;
   2010	}
   2011	return 0;
   2012}
   2013
   2014int timers_dead_cpu(unsigned int cpu)
   2015{
   2016	struct timer_base *old_base;
   2017	struct timer_base *new_base;
   2018	int b, i;
   2019
   2020	BUG_ON(cpu_online(cpu));
   2021
   2022	for (b = 0; b < NR_BASES; b++) {
   2023		old_base = per_cpu_ptr(&timer_bases[b], cpu);
   2024		new_base = get_cpu_ptr(&timer_bases[b]);
   2025		/*
   2026		 * The caller is globally serialized and nobody else
   2027		 * takes two locks at once, deadlock is not possible.
   2028		 */
   2029		raw_spin_lock_irq(&new_base->lock);
   2030		raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
   2031
   2032		/*
   2033		 * The current CPUs base clock might be stale. Update it
   2034		 * before moving the timers over.
   2035		 */
   2036		forward_timer_base(new_base);
   2037
   2038		BUG_ON(old_base->running_timer);
   2039
   2040		for (i = 0; i < WHEEL_SIZE; i++)
   2041			migrate_timer_list(new_base, old_base->vectors + i);
   2042
   2043		raw_spin_unlock(&old_base->lock);
   2044		raw_spin_unlock_irq(&new_base->lock);
   2045		put_cpu_ptr(&timer_bases);
   2046	}
   2047	return 0;
   2048}
   2049
   2050#endif /* CONFIG_HOTPLUG_CPU */
   2051
   2052static void __init init_timer_cpu(int cpu)
   2053{
   2054	struct timer_base *base;
   2055	int i;
   2056
   2057	for (i = 0; i < NR_BASES; i++) {
   2058		base = per_cpu_ptr(&timer_bases[i], cpu);
   2059		base->cpu = cpu;
   2060		raw_spin_lock_init(&base->lock);
   2061		base->clk = jiffies;
   2062		base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA;
   2063		timer_base_init_expiry_lock(base);
   2064	}
   2065}
   2066
   2067static void __init init_timer_cpus(void)
   2068{
   2069	int cpu;
   2070
   2071	for_each_possible_cpu(cpu)
   2072		init_timer_cpu(cpu);
   2073}
   2074
   2075void __init init_timers(void)
   2076{
   2077	init_timer_cpus();
   2078	posix_cputimers_init_work();
   2079	open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
   2080}
   2081
   2082/**
   2083 * msleep - sleep safely even with waitqueue interruptions
   2084 * @msecs: Time in milliseconds to sleep for
   2085 */
   2086void msleep(unsigned int msecs)
   2087{
   2088	unsigned long timeout = msecs_to_jiffies(msecs) + 1;
   2089
   2090	while (timeout)
   2091		timeout = schedule_timeout_uninterruptible(timeout);
   2092}
   2093
   2094EXPORT_SYMBOL(msleep);
   2095
   2096/**
   2097 * msleep_interruptible - sleep waiting for signals
   2098 * @msecs: Time in milliseconds to sleep for
   2099 */
   2100unsigned long msleep_interruptible(unsigned int msecs)
   2101{
   2102	unsigned long timeout = msecs_to_jiffies(msecs) + 1;
   2103
   2104	while (timeout && !signal_pending(current))
   2105		timeout = schedule_timeout_interruptible(timeout);
   2106	return jiffies_to_msecs(timeout);
   2107}
   2108
   2109EXPORT_SYMBOL(msleep_interruptible);
   2110
   2111/**
   2112 * usleep_range_state - Sleep for an approximate time in a given state
   2113 * @min:	Minimum time in usecs to sleep
   2114 * @max:	Maximum time in usecs to sleep
   2115 * @state:	State of the current task that will be while sleeping
   2116 *
   2117 * In non-atomic context where the exact wakeup time is flexible, use
   2118 * usleep_range_state() instead of udelay().  The sleep improves responsiveness
   2119 * by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces
   2120 * power usage by allowing hrtimers to take advantage of an already-
   2121 * scheduled interrupt instead of scheduling a new one just for this sleep.
   2122 */
   2123void __sched usleep_range_state(unsigned long min, unsigned long max,
   2124				unsigned int state)
   2125{
   2126	ktime_t exp = ktime_add_us(ktime_get(), min);
   2127	u64 delta = (u64)(max - min) * NSEC_PER_USEC;
   2128
   2129	for (;;) {
   2130		__set_current_state(state);
   2131		/* Do not return before the requested sleep time has elapsed */
   2132		if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS))
   2133			break;
   2134	}
   2135}
   2136EXPORT_SYMBOL(usleep_range_state);