cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

main.c (51421B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * drivers/base/power/main.c - Where the driver meets power management.
      4 *
      5 * Copyright (c) 2003 Patrick Mochel
      6 * Copyright (c) 2003 Open Source Development Lab
      7 *
      8 * The driver model core calls device_pm_add() when a device is registered.
      9 * This will initialize the embedded device_pm_info object in the device
     10 * and add it to the list of power-controlled devices. sysfs entries for
     11 * controlling device power management will also be added.
     12 *
     13 * A separate list is used for keeping track of power info, because the power
     14 * domain dependencies may differ from the ancestral dependencies that the
     15 * subsystem list maintains.
     16 */
     17
     18#define pr_fmt(fmt) "PM: " fmt
     19#define dev_fmt pr_fmt
     20
     21#include <linux/device.h>
     22#include <linux/export.h>
     23#include <linux/mutex.h>
     24#include <linux/pm.h>
     25#include <linux/pm_runtime.h>
     26#include <linux/pm-trace.h>
     27#include <linux/pm_wakeirq.h>
     28#include <linux/interrupt.h>
     29#include <linux/sched.h>
     30#include <linux/sched/debug.h>
     31#include <linux/async.h>
     32#include <linux/suspend.h>
     33#include <trace/events/power.h>
     34#include <linux/cpufreq.h>
     35#include <linux/devfreq.h>
     36#include <linux/timer.h>
     37
     38#include "../base.h"
     39#include "power.h"
     40
     41typedef int (*pm_callback_t)(struct device *);
     42
     43#define list_for_each_entry_rcu_locked(pos, head, member) \
     44	list_for_each_entry_rcu(pos, head, member, \
     45			device_links_read_lock_held())
     46
     47/*
     48 * The entries in the dpm_list list are in a depth first order, simply
     49 * because children are guaranteed to be discovered after parents, and
     50 * are inserted at the back of the list on discovery.
     51 *
     52 * Since device_pm_add() may be called with a device lock held,
     53 * we must never try to acquire a device lock while holding
     54 * dpm_list_mutex.
     55 */
     56
     57LIST_HEAD(dpm_list);
     58static LIST_HEAD(dpm_prepared_list);
     59static LIST_HEAD(dpm_suspended_list);
     60static LIST_HEAD(dpm_late_early_list);
     61static LIST_HEAD(dpm_noirq_list);
     62
     63struct suspend_stats suspend_stats;
     64static DEFINE_MUTEX(dpm_list_mtx);
     65static pm_message_t pm_transition;
     66
     67static int async_error;
     68
     69static const char *pm_verb(int event)
     70{
     71	switch (event) {
     72	case PM_EVENT_SUSPEND:
     73		return "suspend";
     74	case PM_EVENT_RESUME:
     75		return "resume";
     76	case PM_EVENT_FREEZE:
     77		return "freeze";
     78	case PM_EVENT_QUIESCE:
     79		return "quiesce";
     80	case PM_EVENT_HIBERNATE:
     81		return "hibernate";
     82	case PM_EVENT_THAW:
     83		return "thaw";
     84	case PM_EVENT_RESTORE:
     85		return "restore";
     86	case PM_EVENT_RECOVER:
     87		return "recover";
     88	default:
     89		return "(unknown PM event)";
     90	}
     91}
     92
     93/**
     94 * device_pm_sleep_init - Initialize system suspend-related device fields.
     95 * @dev: Device object being initialized.
     96 */
     97void device_pm_sleep_init(struct device *dev)
     98{
     99	dev->power.is_prepared = false;
    100	dev->power.is_suspended = false;
    101	dev->power.is_noirq_suspended = false;
    102	dev->power.is_late_suspended = false;
    103	init_completion(&dev->power.completion);
    104	complete_all(&dev->power.completion);
    105	dev->power.wakeup = NULL;
    106	INIT_LIST_HEAD(&dev->power.entry);
    107}
    108
    109/**
    110 * device_pm_lock - Lock the list of active devices used by the PM core.
    111 */
    112void device_pm_lock(void)
    113{
    114	mutex_lock(&dpm_list_mtx);
    115}
    116
    117/**
    118 * device_pm_unlock - Unlock the list of active devices used by the PM core.
    119 */
    120void device_pm_unlock(void)
    121{
    122	mutex_unlock(&dpm_list_mtx);
    123}
    124
    125/**
    126 * device_pm_add - Add a device to the PM core's list of active devices.
    127 * @dev: Device to add to the list.
    128 */
    129void device_pm_add(struct device *dev)
    130{
    131	/* Skip PM setup/initialization. */
    132	if (device_pm_not_required(dev))
    133		return;
    134
    135	pr_debug("Adding info for %s:%s\n",
    136		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
    137	device_pm_check_callbacks(dev);
    138	mutex_lock(&dpm_list_mtx);
    139	if (dev->parent && dev->parent->power.is_prepared)
    140		dev_warn(dev, "parent %s should not be sleeping\n",
    141			dev_name(dev->parent));
    142	list_add_tail(&dev->power.entry, &dpm_list);
    143	dev->power.in_dpm_list = true;
    144	mutex_unlock(&dpm_list_mtx);
    145}
    146
    147/**
    148 * device_pm_remove - Remove a device from the PM core's list of active devices.
    149 * @dev: Device to be removed from the list.
    150 */
    151void device_pm_remove(struct device *dev)
    152{
    153	if (device_pm_not_required(dev))
    154		return;
    155
    156	pr_debug("Removing info for %s:%s\n",
    157		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
    158	complete_all(&dev->power.completion);
    159	mutex_lock(&dpm_list_mtx);
    160	list_del_init(&dev->power.entry);
    161	dev->power.in_dpm_list = false;
    162	mutex_unlock(&dpm_list_mtx);
    163	device_wakeup_disable(dev);
    164	pm_runtime_remove(dev);
    165	device_pm_check_callbacks(dev);
    166}
    167
    168/**
    169 * device_pm_move_before - Move device in the PM core's list of active devices.
    170 * @deva: Device to move in dpm_list.
    171 * @devb: Device @deva should come before.
    172 */
    173void device_pm_move_before(struct device *deva, struct device *devb)
    174{
    175	pr_debug("Moving %s:%s before %s:%s\n",
    176		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
    177		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
    178	/* Delete deva from dpm_list and reinsert before devb. */
    179	list_move_tail(&deva->power.entry, &devb->power.entry);
    180}
    181
    182/**
    183 * device_pm_move_after - Move device in the PM core's list of active devices.
    184 * @deva: Device to move in dpm_list.
    185 * @devb: Device @deva should come after.
    186 */
    187void device_pm_move_after(struct device *deva, struct device *devb)
    188{
    189	pr_debug("Moving %s:%s after %s:%s\n",
    190		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
    191		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
    192	/* Delete deva from dpm_list and reinsert after devb. */
    193	list_move(&deva->power.entry, &devb->power.entry);
    194}
    195
    196/**
    197 * device_pm_move_last - Move device to end of the PM core's list of devices.
    198 * @dev: Device to move in dpm_list.
    199 */
    200void device_pm_move_last(struct device *dev)
    201{
    202	pr_debug("Moving %s:%s to end of list\n",
    203		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
    204	list_move_tail(&dev->power.entry, &dpm_list);
    205}
    206
    207static ktime_t initcall_debug_start(struct device *dev, void *cb)
    208{
    209	if (!pm_print_times_enabled)
    210		return 0;
    211
    212	dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
    213		 task_pid_nr(current),
    214		 dev->parent ? dev_name(dev->parent) : "none");
    215	return ktime_get();
    216}
    217
    218static void initcall_debug_report(struct device *dev, ktime_t calltime,
    219				  void *cb, int error)
    220{
    221	ktime_t rettime;
    222
    223	if (!pm_print_times_enabled)
    224		return;
    225
    226	rettime = ktime_get();
    227	dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
    228		 (unsigned long long)ktime_us_delta(rettime, calltime));
    229}
    230
    231/**
    232 * dpm_wait - Wait for a PM operation to complete.
    233 * @dev: Device to wait for.
    234 * @async: If unset, wait only if the device's power.async_suspend flag is set.
    235 */
    236static void dpm_wait(struct device *dev, bool async)
    237{
    238	if (!dev)
    239		return;
    240
    241	if (async || (pm_async_enabled && dev->power.async_suspend))
    242		wait_for_completion(&dev->power.completion);
    243}
    244
    245static int dpm_wait_fn(struct device *dev, void *async_ptr)
    246{
    247	dpm_wait(dev, *((bool *)async_ptr));
    248	return 0;
    249}
    250
    251static void dpm_wait_for_children(struct device *dev, bool async)
    252{
    253       device_for_each_child(dev, &async, dpm_wait_fn);
    254}
    255
    256static void dpm_wait_for_suppliers(struct device *dev, bool async)
    257{
    258	struct device_link *link;
    259	int idx;
    260
    261	idx = device_links_read_lock();
    262
    263	/*
    264	 * If the supplier goes away right after we've checked the link to it,
    265	 * we'll wait for its completion to change the state, but that's fine,
    266	 * because the only things that will block as a result are the SRCU
    267	 * callbacks freeing the link objects for the links in the list we're
    268	 * walking.
    269	 */
    270	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
    271		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
    272			dpm_wait(link->supplier, async);
    273
    274	device_links_read_unlock(idx);
    275}
    276
    277static bool dpm_wait_for_superior(struct device *dev, bool async)
    278{
    279	struct device *parent;
    280
    281	/*
    282	 * If the device is resumed asynchronously and the parent's callback
    283	 * deletes both the device and the parent itself, the parent object may
    284	 * be freed while this function is running, so avoid that by reference
    285	 * counting the parent once more unless the device has been deleted
    286	 * already (in which case return right away).
    287	 */
    288	mutex_lock(&dpm_list_mtx);
    289
    290	if (!device_pm_initialized(dev)) {
    291		mutex_unlock(&dpm_list_mtx);
    292		return false;
    293	}
    294
    295	parent = get_device(dev->parent);
    296
    297	mutex_unlock(&dpm_list_mtx);
    298
    299	dpm_wait(parent, async);
    300	put_device(parent);
    301
    302	dpm_wait_for_suppliers(dev, async);
    303
    304	/*
    305	 * If the parent's callback has deleted the device, attempting to resume
    306	 * it would be invalid, so avoid doing that then.
    307	 */
    308	return device_pm_initialized(dev);
    309}
    310
    311static void dpm_wait_for_consumers(struct device *dev, bool async)
    312{
    313	struct device_link *link;
    314	int idx;
    315
    316	idx = device_links_read_lock();
    317
    318	/*
    319	 * The status of a device link can only be changed from "dormant" by a
    320	 * probe, but that cannot happen during system suspend/resume.  In
    321	 * theory it can change to "dormant" at that time, but then it is
    322	 * reasonable to wait for the target device anyway (eg. if it goes
    323	 * away, it's better to wait for it to go away completely and then
    324	 * continue instead of trying to continue in parallel with its
    325	 * unregistration).
    326	 */
    327	list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
    328		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
    329			dpm_wait(link->consumer, async);
    330
    331	device_links_read_unlock(idx);
    332}
    333
    334static void dpm_wait_for_subordinate(struct device *dev, bool async)
    335{
    336	dpm_wait_for_children(dev, async);
    337	dpm_wait_for_consumers(dev, async);
    338}
    339
    340/**
    341 * pm_op - Return the PM operation appropriate for given PM event.
    342 * @ops: PM operations to choose from.
    343 * @state: PM transition of the system being carried out.
    344 */
    345static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
    346{
    347	switch (state.event) {
    348#ifdef CONFIG_SUSPEND
    349	case PM_EVENT_SUSPEND:
    350		return ops->suspend;
    351	case PM_EVENT_RESUME:
    352		return ops->resume;
    353#endif /* CONFIG_SUSPEND */
    354#ifdef CONFIG_HIBERNATE_CALLBACKS
    355	case PM_EVENT_FREEZE:
    356	case PM_EVENT_QUIESCE:
    357		return ops->freeze;
    358	case PM_EVENT_HIBERNATE:
    359		return ops->poweroff;
    360	case PM_EVENT_THAW:
    361	case PM_EVENT_RECOVER:
    362		return ops->thaw;
    363	case PM_EVENT_RESTORE:
    364		return ops->restore;
    365#endif /* CONFIG_HIBERNATE_CALLBACKS */
    366	}
    367
    368	return NULL;
    369}
    370
    371/**
    372 * pm_late_early_op - Return the PM operation appropriate for given PM event.
    373 * @ops: PM operations to choose from.
    374 * @state: PM transition of the system being carried out.
    375 *
    376 * Runtime PM is disabled for @dev while this function is being executed.
    377 */
    378static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
    379				      pm_message_t state)
    380{
    381	switch (state.event) {
    382#ifdef CONFIG_SUSPEND
    383	case PM_EVENT_SUSPEND:
    384		return ops->suspend_late;
    385	case PM_EVENT_RESUME:
    386		return ops->resume_early;
    387#endif /* CONFIG_SUSPEND */
    388#ifdef CONFIG_HIBERNATE_CALLBACKS
    389	case PM_EVENT_FREEZE:
    390	case PM_EVENT_QUIESCE:
    391		return ops->freeze_late;
    392	case PM_EVENT_HIBERNATE:
    393		return ops->poweroff_late;
    394	case PM_EVENT_THAW:
    395	case PM_EVENT_RECOVER:
    396		return ops->thaw_early;
    397	case PM_EVENT_RESTORE:
    398		return ops->restore_early;
    399#endif /* CONFIG_HIBERNATE_CALLBACKS */
    400	}
    401
    402	return NULL;
    403}
    404
    405/**
    406 * pm_noirq_op - Return the PM operation appropriate for given PM event.
    407 * @ops: PM operations to choose from.
    408 * @state: PM transition of the system being carried out.
    409 *
    410 * The driver of @dev will not receive interrupts while this function is being
    411 * executed.
    412 */
    413static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
    414{
    415	switch (state.event) {
    416#ifdef CONFIG_SUSPEND
    417	case PM_EVENT_SUSPEND:
    418		return ops->suspend_noirq;
    419	case PM_EVENT_RESUME:
    420		return ops->resume_noirq;
    421#endif /* CONFIG_SUSPEND */
    422#ifdef CONFIG_HIBERNATE_CALLBACKS
    423	case PM_EVENT_FREEZE:
    424	case PM_EVENT_QUIESCE:
    425		return ops->freeze_noirq;
    426	case PM_EVENT_HIBERNATE:
    427		return ops->poweroff_noirq;
    428	case PM_EVENT_THAW:
    429	case PM_EVENT_RECOVER:
    430		return ops->thaw_noirq;
    431	case PM_EVENT_RESTORE:
    432		return ops->restore_noirq;
    433#endif /* CONFIG_HIBERNATE_CALLBACKS */
    434	}
    435
    436	return NULL;
    437}
    438
    439static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
    440{
    441	dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
    442		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
    443		", may wakeup" : "", dev->power.driver_flags);
    444}
    445
    446static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
    447			int error)
    448{
    449	dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
    450		error);
    451}
    452
    453static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
    454			  const char *info)
    455{
    456	ktime_t calltime;
    457	u64 usecs64;
    458	int usecs;
    459
    460	calltime = ktime_get();
    461	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
    462	do_div(usecs64, NSEC_PER_USEC);
    463	usecs = usecs64;
    464	if (usecs == 0)
    465		usecs = 1;
    466
    467	pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
    468		  info ?: "", info ? " " : "", pm_verb(state.event),
    469		  error ? "aborted" : "complete",
    470		  usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
    471}
    472
    473static int dpm_run_callback(pm_callback_t cb, struct device *dev,
    474			    pm_message_t state, const char *info)
    475{
    476	ktime_t calltime;
    477	int error;
    478
    479	if (!cb)
    480		return 0;
    481
    482	calltime = initcall_debug_start(dev, cb);
    483
    484	pm_dev_dbg(dev, state, info);
    485	trace_device_pm_callback_start(dev, info, state.event);
    486	error = cb(dev);
    487	trace_device_pm_callback_end(dev, error);
    488	suspend_report_result(dev, cb, error);
    489
    490	initcall_debug_report(dev, calltime, cb, error);
    491
    492	return error;
    493}
    494
    495#ifdef CONFIG_DPM_WATCHDOG
    496struct dpm_watchdog {
    497	struct device		*dev;
    498	struct task_struct	*tsk;
    499	struct timer_list	timer;
    500};
    501
    502#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
    503	struct dpm_watchdog wd
    504
    505/**
    506 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
    507 * @t: The timer that PM watchdog depends on.
    508 *
    509 * Called when a driver has timed out suspending or resuming.
    510 * There's not much we can do here to recover so panic() to
    511 * capture a crash-dump in pstore.
    512 */
    513static void dpm_watchdog_handler(struct timer_list *t)
    514{
    515	struct dpm_watchdog *wd = from_timer(wd, t, timer);
    516
    517	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
    518	show_stack(wd->tsk, NULL, KERN_EMERG);
    519	panic("%s %s: unrecoverable failure\n",
    520		dev_driver_string(wd->dev), dev_name(wd->dev));
    521}
    522
    523/**
    524 * dpm_watchdog_set - Enable pm watchdog for given device.
    525 * @wd: Watchdog. Must be allocated on the stack.
    526 * @dev: Device to handle.
    527 */
    528static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
    529{
    530	struct timer_list *timer = &wd->timer;
    531
    532	wd->dev = dev;
    533	wd->tsk = current;
    534
    535	timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
    536	/* use same timeout value for both suspend and resume */
    537	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
    538	add_timer(timer);
    539}
    540
    541/**
    542 * dpm_watchdog_clear - Disable suspend/resume watchdog.
    543 * @wd: Watchdog to disable.
    544 */
    545static void dpm_watchdog_clear(struct dpm_watchdog *wd)
    546{
    547	struct timer_list *timer = &wd->timer;
    548
    549	del_timer_sync(timer);
    550	destroy_timer_on_stack(timer);
    551}
    552#else
    553#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
    554#define dpm_watchdog_set(x, y)
    555#define dpm_watchdog_clear(x)
    556#endif
    557
    558/*------------------------- Resume routines -------------------------*/
    559
    560/**
    561 * dev_pm_skip_resume - System-wide device resume optimization check.
    562 * @dev: Target device.
    563 *
    564 * Return:
    565 * - %false if the transition under way is RESTORE.
    566 * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
    567 * - The logical negation of %power.must_resume otherwise (that is, when the
    568 *   transition under way is RESUME).
    569 */
    570bool dev_pm_skip_resume(struct device *dev)
    571{
    572	if (pm_transition.event == PM_EVENT_RESTORE)
    573		return false;
    574
    575	if (pm_transition.event == PM_EVENT_THAW)
    576		return dev_pm_skip_suspend(dev);
    577
    578	return !dev->power.must_resume;
    579}
    580
    581/**
    582 * device_resume_noirq - Execute a "noirq resume" callback for given device.
    583 * @dev: Device to handle.
    584 * @state: PM transition of the system being carried out.
    585 * @async: If true, the device is being resumed asynchronously.
    586 *
    587 * The driver of @dev will not receive interrupts while this function is being
    588 * executed.
    589 */
    590static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
    591{
    592	pm_callback_t callback = NULL;
    593	const char *info = NULL;
    594	bool skip_resume;
    595	int error = 0;
    596
    597	TRACE_DEVICE(dev);
    598	TRACE_RESUME(0);
    599
    600	if (dev->power.syscore || dev->power.direct_complete)
    601		goto Out;
    602
    603	if (!dev->power.is_noirq_suspended)
    604		goto Out;
    605
    606	if (!dpm_wait_for_superior(dev, async))
    607		goto Out;
    608
    609	skip_resume = dev_pm_skip_resume(dev);
    610	/*
    611	 * If the driver callback is skipped below or by the middle layer
    612	 * callback and device_resume_early() also skips the driver callback for
    613	 * this device later, it needs to appear as "suspended" to PM-runtime,
    614	 * so change its status accordingly.
    615	 *
    616	 * Otherwise, the device is going to be resumed, so set its PM-runtime
    617	 * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
    618	 * to avoid confusing drivers that don't use it.
    619	 */
    620	if (skip_resume)
    621		pm_runtime_set_suspended(dev);
    622	else if (dev_pm_skip_suspend(dev))
    623		pm_runtime_set_active(dev);
    624
    625	if (dev->pm_domain) {
    626		info = "noirq power domain ";
    627		callback = pm_noirq_op(&dev->pm_domain->ops, state);
    628	} else if (dev->type && dev->type->pm) {
    629		info = "noirq type ";
    630		callback = pm_noirq_op(dev->type->pm, state);
    631	} else if (dev->class && dev->class->pm) {
    632		info = "noirq class ";
    633		callback = pm_noirq_op(dev->class->pm, state);
    634	} else if (dev->bus && dev->bus->pm) {
    635		info = "noirq bus ";
    636		callback = pm_noirq_op(dev->bus->pm, state);
    637	}
    638	if (callback)
    639		goto Run;
    640
    641	if (skip_resume)
    642		goto Skip;
    643
    644	if (dev->driver && dev->driver->pm) {
    645		info = "noirq driver ";
    646		callback = pm_noirq_op(dev->driver->pm, state);
    647	}
    648
    649Run:
    650	error = dpm_run_callback(callback, dev, state, info);
    651
    652Skip:
    653	dev->power.is_noirq_suspended = false;
    654
    655Out:
    656	complete_all(&dev->power.completion);
    657	TRACE_RESUME(error);
    658	return error;
    659}
    660
    661static bool is_async(struct device *dev)
    662{
    663	return dev->power.async_suspend && pm_async_enabled
    664		&& !pm_trace_is_enabled();
    665}
    666
    667static bool dpm_async_fn(struct device *dev, async_func_t func)
    668{
    669	reinit_completion(&dev->power.completion);
    670
    671	if (is_async(dev)) {
    672		get_device(dev);
    673		async_schedule_dev(func, dev);
    674		return true;
    675	}
    676
    677	return false;
    678}
    679
    680static void async_resume_noirq(void *data, async_cookie_t cookie)
    681{
    682	struct device *dev = (struct device *)data;
    683	int error;
    684
    685	error = device_resume_noirq(dev, pm_transition, true);
    686	if (error)
    687		pm_dev_err(dev, pm_transition, " async", error);
    688
    689	put_device(dev);
    690}
    691
    692static void dpm_noirq_resume_devices(pm_message_t state)
    693{
    694	struct device *dev;
    695	ktime_t starttime = ktime_get();
    696
    697	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
    698	mutex_lock(&dpm_list_mtx);
    699	pm_transition = state;
    700
    701	/*
    702	 * Advanced the async threads upfront,
    703	 * in case the starting of async threads is
    704	 * delayed by non-async resuming devices.
    705	 */
    706	list_for_each_entry(dev, &dpm_noirq_list, power.entry)
    707		dpm_async_fn(dev, async_resume_noirq);
    708
    709	while (!list_empty(&dpm_noirq_list)) {
    710		dev = to_device(dpm_noirq_list.next);
    711		get_device(dev);
    712		list_move_tail(&dev->power.entry, &dpm_late_early_list);
    713
    714		mutex_unlock(&dpm_list_mtx);
    715
    716		if (!is_async(dev)) {
    717			int error;
    718
    719			error = device_resume_noirq(dev, state, false);
    720			if (error) {
    721				suspend_stats.failed_resume_noirq++;
    722				dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
    723				dpm_save_failed_dev(dev_name(dev));
    724				pm_dev_err(dev, state, " noirq", error);
    725			}
    726		}
    727
    728		put_device(dev);
    729
    730		mutex_lock(&dpm_list_mtx);
    731	}
    732	mutex_unlock(&dpm_list_mtx);
    733	async_synchronize_full();
    734	dpm_show_time(starttime, state, 0, "noirq");
    735	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
    736}
    737
    738/**
    739 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
    740 * @state: PM transition of the system being carried out.
    741 *
    742 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
    743 * allow device drivers' interrupt handlers to be called.
    744 */
    745void dpm_resume_noirq(pm_message_t state)
    746{
    747	dpm_noirq_resume_devices(state);
    748
    749	resume_device_irqs();
    750	device_wakeup_disarm_wake_irqs();
    751}
    752
    753/**
    754 * device_resume_early - Execute an "early resume" callback for given device.
    755 * @dev: Device to handle.
    756 * @state: PM transition of the system being carried out.
    757 * @async: If true, the device is being resumed asynchronously.
    758 *
    759 * Runtime PM is disabled for @dev while this function is being executed.
    760 */
    761static int device_resume_early(struct device *dev, pm_message_t state, bool async)
    762{
    763	pm_callback_t callback = NULL;
    764	const char *info = NULL;
    765	int error = 0;
    766
    767	TRACE_DEVICE(dev);
    768	TRACE_RESUME(0);
    769
    770	if (dev->power.syscore || dev->power.direct_complete)
    771		goto Out;
    772
    773	if (!dev->power.is_late_suspended)
    774		goto Out;
    775
    776	if (!dpm_wait_for_superior(dev, async))
    777		goto Out;
    778
    779	if (dev->pm_domain) {
    780		info = "early power domain ";
    781		callback = pm_late_early_op(&dev->pm_domain->ops, state);
    782	} else if (dev->type && dev->type->pm) {
    783		info = "early type ";
    784		callback = pm_late_early_op(dev->type->pm, state);
    785	} else if (dev->class && dev->class->pm) {
    786		info = "early class ";
    787		callback = pm_late_early_op(dev->class->pm, state);
    788	} else if (dev->bus && dev->bus->pm) {
    789		info = "early bus ";
    790		callback = pm_late_early_op(dev->bus->pm, state);
    791	}
    792	if (callback)
    793		goto Run;
    794
    795	if (dev_pm_skip_resume(dev))
    796		goto Skip;
    797
    798	if (dev->driver && dev->driver->pm) {
    799		info = "early driver ";
    800		callback = pm_late_early_op(dev->driver->pm, state);
    801	}
    802
    803Run:
    804	error = dpm_run_callback(callback, dev, state, info);
    805
    806Skip:
    807	dev->power.is_late_suspended = false;
    808
    809Out:
    810	TRACE_RESUME(error);
    811
    812	pm_runtime_enable(dev);
    813	complete_all(&dev->power.completion);
    814	return error;
    815}
    816
    817static void async_resume_early(void *data, async_cookie_t cookie)
    818{
    819	struct device *dev = (struct device *)data;
    820	int error;
    821
    822	error = device_resume_early(dev, pm_transition, true);
    823	if (error)
    824		pm_dev_err(dev, pm_transition, " async", error);
    825
    826	put_device(dev);
    827}
    828
    829/**
    830 * dpm_resume_early - Execute "early resume" callbacks for all devices.
    831 * @state: PM transition of the system being carried out.
    832 */
    833void dpm_resume_early(pm_message_t state)
    834{
    835	struct device *dev;
    836	ktime_t starttime = ktime_get();
    837
    838	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
    839	mutex_lock(&dpm_list_mtx);
    840	pm_transition = state;
    841
    842	/*
    843	 * Advanced the async threads upfront,
    844	 * in case the starting of async threads is
    845	 * delayed by non-async resuming devices.
    846	 */
    847	list_for_each_entry(dev, &dpm_late_early_list, power.entry)
    848		dpm_async_fn(dev, async_resume_early);
    849
    850	while (!list_empty(&dpm_late_early_list)) {
    851		dev = to_device(dpm_late_early_list.next);
    852		get_device(dev);
    853		list_move_tail(&dev->power.entry, &dpm_suspended_list);
    854
    855		mutex_unlock(&dpm_list_mtx);
    856
    857		if (!is_async(dev)) {
    858			int error;
    859
    860			error = device_resume_early(dev, state, false);
    861			if (error) {
    862				suspend_stats.failed_resume_early++;
    863				dpm_save_failed_step(SUSPEND_RESUME_EARLY);
    864				dpm_save_failed_dev(dev_name(dev));
    865				pm_dev_err(dev, state, " early", error);
    866			}
    867		}
    868
    869		put_device(dev);
    870
    871		mutex_lock(&dpm_list_mtx);
    872	}
    873	mutex_unlock(&dpm_list_mtx);
    874	async_synchronize_full();
    875	dpm_show_time(starttime, state, 0, "early");
    876	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
    877}
    878
    879/**
    880 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
    881 * @state: PM transition of the system being carried out.
    882 */
    883void dpm_resume_start(pm_message_t state)
    884{
    885	dpm_resume_noirq(state);
    886	dpm_resume_early(state);
    887}
    888EXPORT_SYMBOL_GPL(dpm_resume_start);
    889
    890/**
    891 * device_resume - Execute "resume" callbacks for given device.
    892 * @dev: Device to handle.
    893 * @state: PM transition of the system being carried out.
    894 * @async: If true, the device is being resumed asynchronously.
    895 */
    896static int device_resume(struct device *dev, pm_message_t state, bool async)
    897{
    898	pm_callback_t callback = NULL;
    899	const char *info = NULL;
    900	int error = 0;
    901	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
    902
    903	TRACE_DEVICE(dev);
    904	TRACE_RESUME(0);
    905
    906	if (dev->power.syscore)
    907		goto Complete;
    908
    909	if (dev->power.direct_complete) {
    910		/* Match the pm_runtime_disable() in __device_suspend(). */
    911		pm_runtime_enable(dev);
    912		goto Complete;
    913	}
    914
    915	if (!dpm_wait_for_superior(dev, async))
    916		goto Complete;
    917
    918	dpm_watchdog_set(&wd, dev);
    919	device_lock(dev);
    920
    921	/*
    922	 * This is a fib.  But we'll allow new children to be added below
    923	 * a resumed device, even if the device hasn't been completed yet.
    924	 */
    925	dev->power.is_prepared = false;
    926
    927	if (!dev->power.is_suspended)
    928		goto Unlock;
    929
    930	if (dev->pm_domain) {
    931		info = "power domain ";
    932		callback = pm_op(&dev->pm_domain->ops, state);
    933		goto Driver;
    934	}
    935
    936	if (dev->type && dev->type->pm) {
    937		info = "type ";
    938		callback = pm_op(dev->type->pm, state);
    939		goto Driver;
    940	}
    941
    942	if (dev->class && dev->class->pm) {
    943		info = "class ";
    944		callback = pm_op(dev->class->pm, state);
    945		goto Driver;
    946	}
    947
    948	if (dev->bus) {
    949		if (dev->bus->pm) {
    950			info = "bus ";
    951			callback = pm_op(dev->bus->pm, state);
    952		} else if (dev->bus->resume) {
    953			info = "legacy bus ";
    954			callback = dev->bus->resume;
    955			goto End;
    956		}
    957	}
    958
    959 Driver:
    960	if (!callback && dev->driver && dev->driver->pm) {
    961		info = "driver ";
    962		callback = pm_op(dev->driver->pm, state);
    963	}
    964
    965 End:
    966	error = dpm_run_callback(callback, dev, state, info);
    967	dev->power.is_suspended = false;
    968
    969 Unlock:
    970	device_unlock(dev);
    971	dpm_watchdog_clear(&wd);
    972
    973 Complete:
    974	complete_all(&dev->power.completion);
    975
    976	TRACE_RESUME(error);
    977
    978	return error;
    979}
    980
    981static void async_resume(void *data, async_cookie_t cookie)
    982{
    983	struct device *dev = (struct device *)data;
    984	int error;
    985
    986	error = device_resume(dev, pm_transition, true);
    987	if (error)
    988		pm_dev_err(dev, pm_transition, " async", error);
    989	put_device(dev);
    990}
    991
    992/**
    993 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
    994 * @state: PM transition of the system being carried out.
    995 *
    996 * Execute the appropriate "resume" callback for all devices whose status
    997 * indicates that they are suspended.
    998 */
    999void dpm_resume(pm_message_t state)
   1000{
   1001	struct device *dev;
   1002	ktime_t starttime = ktime_get();
   1003
   1004	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
   1005	might_sleep();
   1006
   1007	mutex_lock(&dpm_list_mtx);
   1008	pm_transition = state;
   1009	async_error = 0;
   1010
   1011	list_for_each_entry(dev, &dpm_suspended_list, power.entry)
   1012		dpm_async_fn(dev, async_resume);
   1013
   1014	while (!list_empty(&dpm_suspended_list)) {
   1015		dev = to_device(dpm_suspended_list.next);
   1016		get_device(dev);
   1017		if (!is_async(dev)) {
   1018			int error;
   1019
   1020			mutex_unlock(&dpm_list_mtx);
   1021
   1022			error = device_resume(dev, state, false);
   1023			if (error) {
   1024				suspend_stats.failed_resume++;
   1025				dpm_save_failed_step(SUSPEND_RESUME);
   1026				dpm_save_failed_dev(dev_name(dev));
   1027				pm_dev_err(dev, state, "", error);
   1028			}
   1029
   1030			mutex_lock(&dpm_list_mtx);
   1031		}
   1032		if (!list_empty(&dev->power.entry))
   1033			list_move_tail(&dev->power.entry, &dpm_prepared_list);
   1034
   1035		mutex_unlock(&dpm_list_mtx);
   1036
   1037		put_device(dev);
   1038
   1039		mutex_lock(&dpm_list_mtx);
   1040	}
   1041	mutex_unlock(&dpm_list_mtx);
   1042	async_synchronize_full();
   1043	dpm_show_time(starttime, state, 0, NULL);
   1044
   1045	cpufreq_resume();
   1046	devfreq_resume();
   1047	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
   1048}
   1049
   1050/**
   1051 * device_complete - Complete a PM transition for given device.
   1052 * @dev: Device to handle.
   1053 * @state: PM transition of the system being carried out.
   1054 */
   1055static void device_complete(struct device *dev, pm_message_t state)
   1056{
   1057	void (*callback)(struct device *) = NULL;
   1058	const char *info = NULL;
   1059
   1060	if (dev->power.syscore)
   1061		goto out;
   1062
   1063	device_lock(dev);
   1064
   1065	if (dev->pm_domain) {
   1066		info = "completing power domain ";
   1067		callback = dev->pm_domain->ops.complete;
   1068	} else if (dev->type && dev->type->pm) {
   1069		info = "completing type ";
   1070		callback = dev->type->pm->complete;
   1071	} else if (dev->class && dev->class->pm) {
   1072		info = "completing class ";
   1073		callback = dev->class->pm->complete;
   1074	} else if (dev->bus && dev->bus->pm) {
   1075		info = "completing bus ";
   1076		callback = dev->bus->pm->complete;
   1077	}
   1078
   1079	if (!callback && dev->driver && dev->driver->pm) {
   1080		info = "completing driver ";
   1081		callback = dev->driver->pm->complete;
   1082	}
   1083
   1084	if (callback) {
   1085		pm_dev_dbg(dev, state, info);
   1086		callback(dev);
   1087	}
   1088
   1089	device_unlock(dev);
   1090
   1091out:
   1092	pm_runtime_put(dev);
   1093}
   1094
   1095/**
   1096 * dpm_complete - Complete a PM transition for all non-sysdev devices.
   1097 * @state: PM transition of the system being carried out.
   1098 *
   1099 * Execute the ->complete() callbacks for all devices whose PM status is not
   1100 * DPM_ON (this allows new devices to be registered).
   1101 */
   1102void dpm_complete(pm_message_t state)
   1103{
   1104	struct list_head list;
   1105
   1106	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
   1107	might_sleep();
   1108
   1109	INIT_LIST_HEAD(&list);
   1110	mutex_lock(&dpm_list_mtx);
   1111	while (!list_empty(&dpm_prepared_list)) {
   1112		struct device *dev = to_device(dpm_prepared_list.prev);
   1113
   1114		get_device(dev);
   1115		dev->power.is_prepared = false;
   1116		list_move(&dev->power.entry, &list);
   1117
   1118		mutex_unlock(&dpm_list_mtx);
   1119
   1120		trace_device_pm_callback_start(dev, "", state.event);
   1121		device_complete(dev, state);
   1122		trace_device_pm_callback_end(dev, 0);
   1123
   1124		put_device(dev);
   1125
   1126		mutex_lock(&dpm_list_mtx);
   1127	}
   1128	list_splice(&list, &dpm_list);
   1129	mutex_unlock(&dpm_list_mtx);
   1130
   1131	/* Allow device probing and trigger re-probing of deferred devices */
   1132	device_unblock_probing();
   1133	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
   1134}
   1135
   1136/**
   1137 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
   1138 * @state: PM transition of the system being carried out.
   1139 *
   1140 * Execute "resume" callbacks for all devices and complete the PM transition of
   1141 * the system.
   1142 */
   1143void dpm_resume_end(pm_message_t state)
   1144{
   1145	dpm_resume(state);
   1146	dpm_complete(state);
   1147}
   1148EXPORT_SYMBOL_GPL(dpm_resume_end);
   1149
   1150
   1151/*------------------------- Suspend routines -------------------------*/
   1152
   1153/**
   1154 * resume_event - Return a "resume" message for given "suspend" sleep state.
   1155 * @sleep_state: PM message representing a sleep state.
   1156 *
   1157 * Return a PM message representing the resume event corresponding to given
   1158 * sleep state.
   1159 */
   1160static pm_message_t resume_event(pm_message_t sleep_state)
   1161{
   1162	switch (sleep_state.event) {
   1163	case PM_EVENT_SUSPEND:
   1164		return PMSG_RESUME;
   1165	case PM_EVENT_FREEZE:
   1166	case PM_EVENT_QUIESCE:
   1167		return PMSG_RECOVER;
   1168	case PM_EVENT_HIBERNATE:
   1169		return PMSG_RESTORE;
   1170	}
   1171	return PMSG_ON;
   1172}
   1173
   1174static void dpm_superior_set_must_resume(struct device *dev)
   1175{
   1176	struct device_link *link;
   1177	int idx;
   1178
   1179	if (dev->parent)
   1180		dev->parent->power.must_resume = true;
   1181
   1182	idx = device_links_read_lock();
   1183
   1184	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
   1185		link->supplier->power.must_resume = true;
   1186
   1187	device_links_read_unlock(idx);
   1188}
   1189
   1190/**
   1191 * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
   1192 * @dev: Device to handle.
   1193 * @state: PM transition of the system being carried out.
   1194 * @async: If true, the device is being suspended asynchronously.
   1195 *
   1196 * The driver of @dev will not receive interrupts while this function is being
   1197 * executed.
   1198 */
   1199static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
   1200{
   1201	pm_callback_t callback = NULL;
   1202	const char *info = NULL;
   1203	int error = 0;
   1204
   1205	TRACE_DEVICE(dev);
   1206	TRACE_SUSPEND(0);
   1207
   1208	dpm_wait_for_subordinate(dev, async);
   1209
   1210	if (async_error)
   1211		goto Complete;
   1212
   1213	if (dev->power.syscore || dev->power.direct_complete)
   1214		goto Complete;
   1215
   1216	if (dev->pm_domain) {
   1217		info = "noirq power domain ";
   1218		callback = pm_noirq_op(&dev->pm_domain->ops, state);
   1219	} else if (dev->type && dev->type->pm) {
   1220		info = "noirq type ";
   1221		callback = pm_noirq_op(dev->type->pm, state);
   1222	} else if (dev->class && dev->class->pm) {
   1223		info = "noirq class ";
   1224		callback = pm_noirq_op(dev->class->pm, state);
   1225	} else if (dev->bus && dev->bus->pm) {
   1226		info = "noirq bus ";
   1227		callback = pm_noirq_op(dev->bus->pm, state);
   1228	}
   1229	if (callback)
   1230		goto Run;
   1231
   1232	if (dev_pm_skip_suspend(dev))
   1233		goto Skip;
   1234
   1235	if (dev->driver && dev->driver->pm) {
   1236		info = "noirq driver ";
   1237		callback = pm_noirq_op(dev->driver->pm, state);
   1238	}
   1239
   1240Run:
   1241	error = dpm_run_callback(callback, dev, state, info);
   1242	if (error) {
   1243		async_error = error;
   1244		goto Complete;
   1245	}
   1246
   1247Skip:
   1248	dev->power.is_noirq_suspended = true;
   1249
   1250	/*
   1251	 * Skipping the resume of devices that were in use right before the
   1252	 * system suspend (as indicated by their PM-runtime usage counters)
   1253	 * would be suboptimal.  Also resume them if doing that is not allowed
   1254	 * to be skipped.
   1255	 */
   1256	if (atomic_read(&dev->power.usage_count) > 1 ||
   1257	    !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
   1258	      dev->power.may_skip_resume))
   1259		dev->power.must_resume = true;
   1260
   1261	if (dev->power.must_resume)
   1262		dpm_superior_set_must_resume(dev);
   1263
   1264Complete:
   1265	complete_all(&dev->power.completion);
   1266	TRACE_SUSPEND(error);
   1267	return error;
   1268}
   1269
   1270static void async_suspend_noirq(void *data, async_cookie_t cookie)
   1271{
   1272	struct device *dev = (struct device *)data;
   1273	int error;
   1274
   1275	error = __device_suspend_noirq(dev, pm_transition, true);
   1276	if (error) {
   1277		dpm_save_failed_dev(dev_name(dev));
   1278		pm_dev_err(dev, pm_transition, " async", error);
   1279	}
   1280
   1281	put_device(dev);
   1282}
   1283
   1284static int device_suspend_noirq(struct device *dev)
   1285{
   1286	if (dpm_async_fn(dev, async_suspend_noirq))
   1287		return 0;
   1288
   1289	return __device_suspend_noirq(dev, pm_transition, false);
   1290}
   1291
   1292static int dpm_noirq_suspend_devices(pm_message_t state)
   1293{
   1294	ktime_t starttime = ktime_get();
   1295	int error = 0;
   1296
   1297	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
   1298	mutex_lock(&dpm_list_mtx);
   1299	pm_transition = state;
   1300	async_error = 0;
   1301
   1302	while (!list_empty(&dpm_late_early_list)) {
   1303		struct device *dev = to_device(dpm_late_early_list.prev);
   1304
   1305		get_device(dev);
   1306		mutex_unlock(&dpm_list_mtx);
   1307
   1308		error = device_suspend_noirq(dev);
   1309
   1310		mutex_lock(&dpm_list_mtx);
   1311
   1312		if (error) {
   1313			pm_dev_err(dev, state, " noirq", error);
   1314			dpm_save_failed_dev(dev_name(dev));
   1315		} else if (!list_empty(&dev->power.entry)) {
   1316			list_move(&dev->power.entry, &dpm_noirq_list);
   1317		}
   1318
   1319		mutex_unlock(&dpm_list_mtx);
   1320
   1321		put_device(dev);
   1322
   1323		mutex_lock(&dpm_list_mtx);
   1324
   1325		if (error || async_error)
   1326			break;
   1327	}
   1328	mutex_unlock(&dpm_list_mtx);
   1329	async_synchronize_full();
   1330	if (!error)
   1331		error = async_error;
   1332
   1333	if (error) {
   1334		suspend_stats.failed_suspend_noirq++;
   1335		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
   1336	}
   1337	dpm_show_time(starttime, state, error, "noirq");
   1338	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
   1339	return error;
   1340}
   1341
   1342/**
   1343 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
   1344 * @state: PM transition of the system being carried out.
   1345 *
   1346 * Prevent device drivers' interrupt handlers from being called and invoke
   1347 * "noirq" suspend callbacks for all non-sysdev devices.
   1348 */
   1349int dpm_suspend_noirq(pm_message_t state)
   1350{
   1351	int ret;
   1352
   1353	device_wakeup_arm_wake_irqs();
   1354	suspend_device_irqs();
   1355
   1356	ret = dpm_noirq_suspend_devices(state);
   1357	if (ret)
   1358		dpm_resume_noirq(resume_event(state));
   1359
   1360	return ret;
   1361}
   1362
   1363static void dpm_propagate_wakeup_to_parent(struct device *dev)
   1364{
   1365	struct device *parent = dev->parent;
   1366
   1367	if (!parent)
   1368		return;
   1369
   1370	spin_lock_irq(&parent->power.lock);
   1371
   1372	if (device_wakeup_path(dev) && !parent->power.ignore_children)
   1373		parent->power.wakeup_path = true;
   1374
   1375	spin_unlock_irq(&parent->power.lock);
   1376}
   1377
   1378/**
   1379 * __device_suspend_late - Execute a "late suspend" callback for given device.
   1380 * @dev: Device to handle.
   1381 * @state: PM transition of the system being carried out.
   1382 * @async: If true, the device is being suspended asynchronously.
   1383 *
   1384 * Runtime PM is disabled for @dev while this function is being executed.
   1385 */
   1386static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
   1387{
   1388	pm_callback_t callback = NULL;
   1389	const char *info = NULL;
   1390	int error = 0;
   1391
   1392	TRACE_DEVICE(dev);
   1393	TRACE_SUSPEND(0);
   1394
   1395	__pm_runtime_disable(dev, false);
   1396
   1397	dpm_wait_for_subordinate(dev, async);
   1398
   1399	if (async_error)
   1400		goto Complete;
   1401
   1402	if (pm_wakeup_pending()) {
   1403		async_error = -EBUSY;
   1404		goto Complete;
   1405	}
   1406
   1407	if (dev->power.syscore || dev->power.direct_complete)
   1408		goto Complete;
   1409
   1410	if (dev->pm_domain) {
   1411		info = "late power domain ";
   1412		callback = pm_late_early_op(&dev->pm_domain->ops, state);
   1413	} else if (dev->type && dev->type->pm) {
   1414		info = "late type ";
   1415		callback = pm_late_early_op(dev->type->pm, state);
   1416	} else if (dev->class && dev->class->pm) {
   1417		info = "late class ";
   1418		callback = pm_late_early_op(dev->class->pm, state);
   1419	} else if (dev->bus && dev->bus->pm) {
   1420		info = "late bus ";
   1421		callback = pm_late_early_op(dev->bus->pm, state);
   1422	}
   1423	if (callback)
   1424		goto Run;
   1425
   1426	if (dev_pm_skip_suspend(dev))
   1427		goto Skip;
   1428
   1429	if (dev->driver && dev->driver->pm) {
   1430		info = "late driver ";
   1431		callback = pm_late_early_op(dev->driver->pm, state);
   1432	}
   1433
   1434Run:
   1435	error = dpm_run_callback(callback, dev, state, info);
   1436	if (error) {
   1437		async_error = error;
   1438		goto Complete;
   1439	}
   1440	dpm_propagate_wakeup_to_parent(dev);
   1441
   1442Skip:
   1443	dev->power.is_late_suspended = true;
   1444
   1445Complete:
   1446	TRACE_SUSPEND(error);
   1447	complete_all(&dev->power.completion);
   1448	return error;
   1449}
   1450
   1451static void async_suspend_late(void *data, async_cookie_t cookie)
   1452{
   1453	struct device *dev = (struct device *)data;
   1454	int error;
   1455
   1456	error = __device_suspend_late(dev, pm_transition, true);
   1457	if (error) {
   1458		dpm_save_failed_dev(dev_name(dev));
   1459		pm_dev_err(dev, pm_transition, " async", error);
   1460	}
   1461	put_device(dev);
   1462}
   1463
   1464static int device_suspend_late(struct device *dev)
   1465{
   1466	if (dpm_async_fn(dev, async_suspend_late))
   1467		return 0;
   1468
   1469	return __device_suspend_late(dev, pm_transition, false);
   1470}
   1471
   1472/**
   1473 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
   1474 * @state: PM transition of the system being carried out.
   1475 */
   1476int dpm_suspend_late(pm_message_t state)
   1477{
   1478	ktime_t starttime = ktime_get();
   1479	int error = 0;
   1480
   1481	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
   1482	wake_up_all_idle_cpus();
   1483	mutex_lock(&dpm_list_mtx);
   1484	pm_transition = state;
   1485	async_error = 0;
   1486
   1487	while (!list_empty(&dpm_suspended_list)) {
   1488		struct device *dev = to_device(dpm_suspended_list.prev);
   1489
   1490		get_device(dev);
   1491
   1492		mutex_unlock(&dpm_list_mtx);
   1493
   1494		error = device_suspend_late(dev);
   1495
   1496		mutex_lock(&dpm_list_mtx);
   1497
   1498		if (!list_empty(&dev->power.entry))
   1499			list_move(&dev->power.entry, &dpm_late_early_list);
   1500
   1501		if (error) {
   1502			pm_dev_err(dev, state, " late", error);
   1503			dpm_save_failed_dev(dev_name(dev));
   1504		}
   1505
   1506		mutex_unlock(&dpm_list_mtx);
   1507
   1508		put_device(dev);
   1509
   1510		mutex_lock(&dpm_list_mtx);
   1511
   1512		if (error || async_error)
   1513			break;
   1514	}
   1515	mutex_unlock(&dpm_list_mtx);
   1516	async_synchronize_full();
   1517	if (!error)
   1518		error = async_error;
   1519	if (error) {
   1520		suspend_stats.failed_suspend_late++;
   1521		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
   1522		dpm_resume_early(resume_event(state));
   1523	}
   1524	dpm_show_time(starttime, state, error, "late");
   1525	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
   1526	return error;
   1527}
   1528
   1529/**
   1530 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
   1531 * @state: PM transition of the system being carried out.
   1532 */
   1533int dpm_suspend_end(pm_message_t state)
   1534{
   1535	ktime_t starttime = ktime_get();
   1536	int error;
   1537
   1538	error = dpm_suspend_late(state);
   1539	if (error)
   1540		goto out;
   1541
   1542	error = dpm_suspend_noirq(state);
   1543	if (error)
   1544		dpm_resume_early(resume_event(state));
   1545
   1546out:
   1547	dpm_show_time(starttime, state, error, "end");
   1548	return error;
   1549}
   1550EXPORT_SYMBOL_GPL(dpm_suspend_end);
   1551
   1552/**
   1553 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
   1554 * @dev: Device to suspend.
   1555 * @state: PM transition of the system being carried out.
   1556 * @cb: Suspend callback to execute.
   1557 * @info: string description of caller.
   1558 */
   1559static int legacy_suspend(struct device *dev, pm_message_t state,
   1560			  int (*cb)(struct device *dev, pm_message_t state),
   1561			  const char *info)
   1562{
   1563	int error;
   1564	ktime_t calltime;
   1565
   1566	calltime = initcall_debug_start(dev, cb);
   1567
   1568	trace_device_pm_callback_start(dev, info, state.event);
   1569	error = cb(dev, state);
   1570	trace_device_pm_callback_end(dev, error);
   1571	suspend_report_result(dev, cb, error);
   1572
   1573	initcall_debug_report(dev, calltime, cb, error);
   1574
   1575	return error;
   1576}
   1577
   1578static void dpm_clear_superiors_direct_complete(struct device *dev)
   1579{
   1580	struct device_link *link;
   1581	int idx;
   1582
   1583	if (dev->parent) {
   1584		spin_lock_irq(&dev->parent->power.lock);
   1585		dev->parent->power.direct_complete = false;
   1586		spin_unlock_irq(&dev->parent->power.lock);
   1587	}
   1588
   1589	idx = device_links_read_lock();
   1590
   1591	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
   1592		spin_lock_irq(&link->supplier->power.lock);
   1593		link->supplier->power.direct_complete = false;
   1594		spin_unlock_irq(&link->supplier->power.lock);
   1595	}
   1596
   1597	device_links_read_unlock(idx);
   1598}
   1599
   1600/**
   1601 * __device_suspend - Execute "suspend" callbacks for given device.
   1602 * @dev: Device to handle.
   1603 * @state: PM transition of the system being carried out.
   1604 * @async: If true, the device is being suspended asynchronously.
   1605 */
   1606static int __device_suspend(struct device *dev, pm_message_t state, bool async)
   1607{
   1608	pm_callback_t callback = NULL;
   1609	const char *info = NULL;
   1610	int error = 0;
   1611	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
   1612
   1613	TRACE_DEVICE(dev);
   1614	TRACE_SUSPEND(0);
   1615
   1616	dpm_wait_for_subordinate(dev, async);
   1617
   1618	if (async_error) {
   1619		dev->power.direct_complete = false;
   1620		goto Complete;
   1621	}
   1622
   1623	/*
   1624	 * Wait for possible runtime PM transitions of the device in progress
   1625	 * to complete and if there's a runtime resume request pending for it,
   1626	 * resume it before proceeding with invoking the system-wide suspend
   1627	 * callbacks for it.
   1628	 *
   1629	 * If the system-wide suspend callbacks below change the configuration
   1630	 * of the device, they must disable runtime PM for it or otherwise
   1631	 * ensure that its runtime-resume callbacks will not be confused by that
   1632	 * change in case they are invoked going forward.
   1633	 */
   1634	pm_runtime_barrier(dev);
   1635
   1636	if (pm_wakeup_pending()) {
   1637		dev->power.direct_complete = false;
   1638		async_error = -EBUSY;
   1639		goto Complete;
   1640	}
   1641
   1642	if (dev->power.syscore)
   1643		goto Complete;
   1644
   1645	/* Avoid direct_complete to let wakeup_path propagate. */
   1646	if (device_may_wakeup(dev) || device_wakeup_path(dev))
   1647		dev->power.direct_complete = false;
   1648
   1649	if (dev->power.direct_complete) {
   1650		if (pm_runtime_status_suspended(dev)) {
   1651			pm_runtime_disable(dev);
   1652			if (pm_runtime_status_suspended(dev)) {
   1653				pm_dev_dbg(dev, state, "direct-complete ");
   1654				goto Complete;
   1655			}
   1656
   1657			pm_runtime_enable(dev);
   1658		}
   1659		dev->power.direct_complete = false;
   1660	}
   1661
   1662	dev->power.may_skip_resume = true;
   1663	dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
   1664
   1665	dpm_watchdog_set(&wd, dev);
   1666	device_lock(dev);
   1667
   1668	if (dev->pm_domain) {
   1669		info = "power domain ";
   1670		callback = pm_op(&dev->pm_domain->ops, state);
   1671		goto Run;
   1672	}
   1673
   1674	if (dev->type && dev->type->pm) {
   1675		info = "type ";
   1676		callback = pm_op(dev->type->pm, state);
   1677		goto Run;
   1678	}
   1679
   1680	if (dev->class && dev->class->pm) {
   1681		info = "class ";
   1682		callback = pm_op(dev->class->pm, state);
   1683		goto Run;
   1684	}
   1685
   1686	if (dev->bus) {
   1687		if (dev->bus->pm) {
   1688			info = "bus ";
   1689			callback = pm_op(dev->bus->pm, state);
   1690		} else if (dev->bus->suspend) {
   1691			pm_dev_dbg(dev, state, "legacy bus ");
   1692			error = legacy_suspend(dev, state, dev->bus->suspend,
   1693						"legacy bus ");
   1694			goto End;
   1695		}
   1696	}
   1697
   1698 Run:
   1699	if (!callback && dev->driver && dev->driver->pm) {
   1700		info = "driver ";
   1701		callback = pm_op(dev->driver->pm, state);
   1702	}
   1703
   1704	error = dpm_run_callback(callback, dev, state, info);
   1705
   1706 End:
   1707	if (!error) {
   1708		dev->power.is_suspended = true;
   1709		if (device_may_wakeup(dev))
   1710			dev->power.wakeup_path = true;
   1711
   1712		dpm_propagate_wakeup_to_parent(dev);
   1713		dpm_clear_superiors_direct_complete(dev);
   1714	}
   1715
   1716	device_unlock(dev);
   1717	dpm_watchdog_clear(&wd);
   1718
   1719 Complete:
   1720	if (error)
   1721		async_error = error;
   1722
   1723	complete_all(&dev->power.completion);
   1724	TRACE_SUSPEND(error);
   1725	return error;
   1726}
   1727
   1728static void async_suspend(void *data, async_cookie_t cookie)
   1729{
   1730	struct device *dev = (struct device *)data;
   1731	int error;
   1732
   1733	error = __device_suspend(dev, pm_transition, true);
   1734	if (error) {
   1735		dpm_save_failed_dev(dev_name(dev));
   1736		pm_dev_err(dev, pm_transition, " async", error);
   1737	}
   1738
   1739	put_device(dev);
   1740}
   1741
   1742static int device_suspend(struct device *dev)
   1743{
   1744	if (dpm_async_fn(dev, async_suspend))
   1745		return 0;
   1746
   1747	return __device_suspend(dev, pm_transition, false);
   1748}
   1749
   1750/**
   1751 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
   1752 * @state: PM transition of the system being carried out.
   1753 */
   1754int dpm_suspend(pm_message_t state)
   1755{
   1756	ktime_t starttime = ktime_get();
   1757	int error = 0;
   1758
   1759	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
   1760	might_sleep();
   1761
   1762	devfreq_suspend();
   1763	cpufreq_suspend();
   1764
   1765	mutex_lock(&dpm_list_mtx);
   1766	pm_transition = state;
   1767	async_error = 0;
   1768	while (!list_empty(&dpm_prepared_list)) {
   1769		struct device *dev = to_device(dpm_prepared_list.prev);
   1770
   1771		get_device(dev);
   1772
   1773		mutex_unlock(&dpm_list_mtx);
   1774
   1775		error = device_suspend(dev);
   1776
   1777		mutex_lock(&dpm_list_mtx);
   1778
   1779		if (error) {
   1780			pm_dev_err(dev, state, "", error);
   1781			dpm_save_failed_dev(dev_name(dev));
   1782		} else if (!list_empty(&dev->power.entry)) {
   1783			list_move(&dev->power.entry, &dpm_suspended_list);
   1784		}
   1785
   1786		mutex_unlock(&dpm_list_mtx);
   1787
   1788		put_device(dev);
   1789
   1790		mutex_lock(&dpm_list_mtx);
   1791
   1792		if (error || async_error)
   1793			break;
   1794	}
   1795	mutex_unlock(&dpm_list_mtx);
   1796	async_synchronize_full();
   1797	if (!error)
   1798		error = async_error;
   1799	if (error) {
   1800		suspend_stats.failed_suspend++;
   1801		dpm_save_failed_step(SUSPEND_SUSPEND);
   1802	}
   1803	dpm_show_time(starttime, state, error, NULL);
   1804	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
   1805	return error;
   1806}
   1807
   1808/**
   1809 * device_prepare - Prepare a device for system power transition.
   1810 * @dev: Device to handle.
   1811 * @state: PM transition of the system being carried out.
   1812 *
   1813 * Execute the ->prepare() callback(s) for given device.  No new children of the
   1814 * device may be registered after this function has returned.
   1815 */
   1816static int device_prepare(struct device *dev, pm_message_t state)
   1817{
   1818	int (*callback)(struct device *) = NULL;
   1819	int ret = 0;
   1820
   1821	/*
   1822	 * If a device's parent goes into runtime suspend at the wrong time,
   1823	 * it won't be possible to resume the device.  To prevent this we
   1824	 * block runtime suspend here, during the prepare phase, and allow
   1825	 * it again during the complete phase.
   1826	 */
   1827	pm_runtime_get_noresume(dev);
   1828
   1829	if (dev->power.syscore)
   1830		return 0;
   1831
   1832	device_lock(dev);
   1833
   1834	dev->power.wakeup_path = false;
   1835
   1836	if (dev->power.no_pm_callbacks)
   1837		goto unlock;
   1838
   1839	if (dev->pm_domain)
   1840		callback = dev->pm_domain->ops.prepare;
   1841	else if (dev->type && dev->type->pm)
   1842		callback = dev->type->pm->prepare;
   1843	else if (dev->class && dev->class->pm)
   1844		callback = dev->class->pm->prepare;
   1845	else if (dev->bus && dev->bus->pm)
   1846		callback = dev->bus->pm->prepare;
   1847
   1848	if (!callback && dev->driver && dev->driver->pm)
   1849		callback = dev->driver->pm->prepare;
   1850
   1851	if (callback)
   1852		ret = callback(dev);
   1853
   1854unlock:
   1855	device_unlock(dev);
   1856
   1857	if (ret < 0) {
   1858		suspend_report_result(dev, callback, ret);
   1859		pm_runtime_put(dev);
   1860		return ret;
   1861	}
   1862	/*
   1863	 * A positive return value from ->prepare() means "this device appears
   1864	 * to be runtime-suspended and its state is fine, so if it really is
   1865	 * runtime-suspended, you can leave it in that state provided that you
   1866	 * will do the same thing with all of its descendants".  This only
   1867	 * applies to suspend transitions, however.
   1868	 */
   1869	spin_lock_irq(&dev->power.lock);
   1870	dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
   1871		(ret > 0 || dev->power.no_pm_callbacks) &&
   1872		!dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
   1873	spin_unlock_irq(&dev->power.lock);
   1874	return 0;
   1875}
   1876
   1877/**
   1878 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
   1879 * @state: PM transition of the system being carried out.
   1880 *
   1881 * Execute the ->prepare() callback(s) for all devices.
   1882 */
   1883int dpm_prepare(pm_message_t state)
   1884{
   1885	int error = 0;
   1886
   1887	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
   1888	might_sleep();
   1889
   1890	/*
   1891	 * Give a chance for the known devices to complete their probes, before
   1892	 * disable probing of devices. This sync point is important at least
   1893	 * at boot time + hibernation restore.
   1894	 */
   1895	wait_for_device_probe();
   1896	/*
   1897	 * It is unsafe if probing of devices will happen during suspend or
   1898	 * hibernation and system behavior will be unpredictable in this case.
   1899	 * So, let's prohibit device's probing here and defer their probes
   1900	 * instead. The normal behavior will be restored in dpm_complete().
   1901	 */
   1902	device_block_probing();
   1903
   1904	mutex_lock(&dpm_list_mtx);
   1905	while (!list_empty(&dpm_list) && !error) {
   1906		struct device *dev = to_device(dpm_list.next);
   1907
   1908		get_device(dev);
   1909
   1910		mutex_unlock(&dpm_list_mtx);
   1911
   1912		trace_device_pm_callback_start(dev, "", state.event);
   1913		error = device_prepare(dev, state);
   1914		trace_device_pm_callback_end(dev, error);
   1915
   1916		mutex_lock(&dpm_list_mtx);
   1917
   1918		if (!error) {
   1919			dev->power.is_prepared = true;
   1920			if (!list_empty(&dev->power.entry))
   1921				list_move_tail(&dev->power.entry, &dpm_prepared_list);
   1922		} else if (error == -EAGAIN) {
   1923			error = 0;
   1924		} else {
   1925			dev_info(dev, "not prepared for power transition: code %d\n",
   1926				 error);
   1927		}
   1928
   1929		mutex_unlock(&dpm_list_mtx);
   1930
   1931		put_device(dev);
   1932
   1933		mutex_lock(&dpm_list_mtx);
   1934	}
   1935	mutex_unlock(&dpm_list_mtx);
   1936	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
   1937	return error;
   1938}
   1939
   1940/**
   1941 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
   1942 * @state: PM transition of the system being carried out.
   1943 *
   1944 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
   1945 * callbacks for them.
   1946 */
   1947int dpm_suspend_start(pm_message_t state)
   1948{
   1949	ktime_t starttime = ktime_get();
   1950	int error;
   1951
   1952	error = dpm_prepare(state);
   1953	if (error) {
   1954		suspend_stats.failed_prepare++;
   1955		dpm_save_failed_step(SUSPEND_PREPARE);
   1956	} else
   1957		error = dpm_suspend(state);
   1958	dpm_show_time(starttime, state, error, "start");
   1959	return error;
   1960}
   1961EXPORT_SYMBOL_GPL(dpm_suspend_start);
   1962
   1963void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
   1964{
   1965	if (ret)
   1966		dev_err(dev, "%s(): %pS returns %d\n", function, fn, ret);
   1967}
   1968EXPORT_SYMBOL_GPL(__suspend_report_result);
   1969
   1970/**
   1971 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
   1972 * @subordinate: Device that needs to wait for @dev.
   1973 * @dev: Device to wait for.
   1974 */
   1975int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
   1976{
   1977	dpm_wait(dev, subordinate->power.async_suspend);
   1978	return async_error;
   1979}
   1980EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
   1981
   1982/**
   1983 * dpm_for_each_dev - device iterator.
   1984 * @data: data for the callback.
   1985 * @fn: function to be called for each device.
   1986 *
   1987 * Iterate over devices in dpm_list, and call @fn for each device,
   1988 * passing it @data.
   1989 */
   1990void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
   1991{
   1992	struct device *dev;
   1993
   1994	if (!fn)
   1995		return;
   1996
   1997	device_pm_lock();
   1998	list_for_each_entry(dev, &dpm_list, power.entry)
   1999		fn(dev, data);
   2000	device_pm_unlock();
   2001}
   2002EXPORT_SYMBOL_GPL(dpm_for_each_dev);
   2003
   2004static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
   2005{
   2006	if (!ops)
   2007		return true;
   2008
   2009	return !ops->prepare &&
   2010	       !ops->suspend &&
   2011	       !ops->suspend_late &&
   2012	       !ops->suspend_noirq &&
   2013	       !ops->resume_noirq &&
   2014	       !ops->resume_early &&
   2015	       !ops->resume &&
   2016	       !ops->complete;
   2017}
   2018
   2019void device_pm_check_callbacks(struct device *dev)
   2020{
   2021	unsigned long flags;
   2022
   2023	spin_lock_irqsave(&dev->power.lock, flags);
   2024	dev->power.no_pm_callbacks =
   2025		(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
   2026		 !dev->bus->suspend && !dev->bus->resume)) &&
   2027		(!dev->class || pm_ops_is_empty(dev->class->pm)) &&
   2028		(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
   2029		(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
   2030		(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
   2031		 !dev->driver->suspend && !dev->driver->resume));
   2032	spin_unlock_irqrestore(&dev->power.lock, flags);
   2033}
   2034
   2035bool dev_pm_skip_suspend(struct device *dev)
   2036{
   2037	return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
   2038		pm_runtime_status_suspended(dev);
   2039}