cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dd.c (35416B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * drivers/base/dd.c - The core device/driver interactions.
      4 *
      5 * This file contains the (sometimes tricky) code that controls the
      6 * interactions between devices and drivers, which primarily includes
      7 * driver binding and unbinding.
      8 *
      9 * All of this code used to exist in drivers/base/bus.c, but was
     10 * relocated to here in the name of compartmentalization (since it wasn't
     11 * strictly code just for the 'struct bus_type'.
     12 *
     13 * Copyright (c) 2002-5 Patrick Mochel
     14 * Copyright (c) 2002-3 Open Source Development Labs
     15 * Copyright (c) 2007-2009 Greg Kroah-Hartman <gregkh@suse.de>
     16 * Copyright (c) 2007-2009 Novell Inc.
     17 */
     18
     19#include <linux/debugfs.h>
     20#include <linux/device.h>
     21#include <linux/delay.h>
     22#include <linux/dma-map-ops.h>
     23#include <linux/init.h>
     24#include <linux/module.h>
     25#include <linux/kthread.h>
     26#include <linux/wait.h>
     27#include <linux/async.h>
     28#include <linux/pm_runtime.h>
     29#include <linux/pinctrl/devinfo.h>
     30#include <linux/slab.h>
     31
     32#include "base.h"
     33#include "power/power.h"
     34
     35/*
     36 * Deferred Probe infrastructure.
     37 *
     38 * Sometimes driver probe order matters, but the kernel doesn't always have
     39 * dependency information which means some drivers will get probed before a
     40 * resource it depends on is available.  For example, an SDHCI driver may
     41 * first need a GPIO line from an i2c GPIO controller before it can be
     42 * initialized.  If a required resource is not available yet, a driver can
     43 * request probing to be deferred by returning -EPROBE_DEFER from its probe hook
     44 *
     45 * Deferred probe maintains two lists of devices, a pending list and an active
     46 * list.  A driver returning -EPROBE_DEFER causes the device to be added to the
     47 * pending list.  A successful driver probe will trigger moving all devices
     48 * from the pending to the active list so that the workqueue will eventually
     49 * retry them.
     50 *
     51 * The deferred_probe_mutex must be held any time the deferred_probe_*_list
     52 * of the (struct device*)->p->deferred_probe pointers are manipulated
     53 */
     54static DEFINE_MUTEX(deferred_probe_mutex);
     55static LIST_HEAD(deferred_probe_pending_list);
     56static LIST_HEAD(deferred_probe_active_list);
     57static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
     58static bool initcalls_done;
     59
     60/* Save the async probe drivers' name from kernel cmdline */
     61#define ASYNC_DRV_NAMES_MAX_LEN	256
     62static char async_probe_drv_names[ASYNC_DRV_NAMES_MAX_LEN];
     63static bool async_probe_default;
     64
     65/*
     66 * In some cases, like suspend to RAM or hibernation, It might be reasonable
     67 * to prohibit probing of devices as it could be unsafe.
     68 * Once defer_all_probes is true all drivers probes will be forcibly deferred.
     69 */
     70static bool defer_all_probes;
     71
     72static void __device_set_deferred_probe_reason(const struct device *dev, char *reason)
     73{
     74	kfree(dev->p->deferred_probe_reason);
     75	dev->p->deferred_probe_reason = reason;
     76}
     77
     78/*
     79 * deferred_probe_work_func() - Retry probing devices in the active list.
     80 */
     81static void deferred_probe_work_func(struct work_struct *work)
     82{
     83	struct device *dev;
     84	struct device_private *private;
     85	/*
     86	 * This block processes every device in the deferred 'active' list.
     87	 * Each device is removed from the active list and passed to
     88	 * bus_probe_device() to re-attempt the probe.  The loop continues
     89	 * until every device in the active list is removed and retried.
     90	 *
     91	 * Note: Once the device is removed from the list and the mutex is
     92	 * released, it is possible for the device get freed by another thread
     93	 * and cause a illegal pointer dereference.  This code uses
     94	 * get/put_device() to ensure the device structure cannot disappear
     95	 * from under our feet.
     96	 */
     97	mutex_lock(&deferred_probe_mutex);
     98	while (!list_empty(&deferred_probe_active_list)) {
     99		private = list_first_entry(&deferred_probe_active_list,
    100					typeof(*dev->p), deferred_probe);
    101		dev = private->device;
    102		list_del_init(&private->deferred_probe);
    103
    104		get_device(dev);
    105
    106		__device_set_deferred_probe_reason(dev, NULL);
    107
    108		/*
    109		 * Drop the mutex while probing each device; the probe path may
    110		 * manipulate the deferred list
    111		 */
    112		mutex_unlock(&deferred_probe_mutex);
    113
    114		/*
    115		 * Force the device to the end of the dpm_list since
    116		 * the PM code assumes that the order we add things to
    117		 * the list is a good order for suspend but deferred
    118		 * probe makes that very unsafe.
    119		 */
    120		device_pm_move_to_tail(dev);
    121
    122		dev_dbg(dev, "Retrying from deferred list\n");
    123		bus_probe_device(dev);
    124		mutex_lock(&deferred_probe_mutex);
    125
    126		put_device(dev);
    127	}
    128	mutex_unlock(&deferred_probe_mutex);
    129}
    130static DECLARE_WORK(deferred_probe_work, deferred_probe_work_func);
    131
    132void driver_deferred_probe_add(struct device *dev)
    133{
    134	if (!dev->can_match)
    135		return;
    136
    137	mutex_lock(&deferred_probe_mutex);
    138	if (list_empty(&dev->p->deferred_probe)) {
    139		dev_dbg(dev, "Added to deferred list\n");
    140		list_add_tail(&dev->p->deferred_probe, &deferred_probe_pending_list);
    141	}
    142	mutex_unlock(&deferred_probe_mutex);
    143}
    144
    145void driver_deferred_probe_del(struct device *dev)
    146{
    147	mutex_lock(&deferred_probe_mutex);
    148	if (!list_empty(&dev->p->deferred_probe)) {
    149		dev_dbg(dev, "Removed from deferred list\n");
    150		list_del_init(&dev->p->deferred_probe);
    151		__device_set_deferred_probe_reason(dev, NULL);
    152	}
    153	mutex_unlock(&deferred_probe_mutex);
    154}
    155
    156static bool driver_deferred_probe_enable;
    157/**
    158 * driver_deferred_probe_trigger() - Kick off re-probing deferred devices
    159 *
    160 * This functions moves all devices from the pending list to the active
    161 * list and schedules the deferred probe workqueue to process them.  It
    162 * should be called anytime a driver is successfully bound to a device.
    163 *
    164 * Note, there is a race condition in multi-threaded probe. In the case where
    165 * more than one device is probing at the same time, it is possible for one
    166 * probe to complete successfully while another is about to defer. If the second
    167 * depends on the first, then it will get put on the pending list after the
    168 * trigger event has already occurred and will be stuck there.
    169 *
    170 * The atomic 'deferred_trigger_count' is used to determine if a successful
    171 * trigger has occurred in the midst of probing a driver. If the trigger count
    172 * changes in the midst of a probe, then deferred processing should be triggered
    173 * again.
    174 */
    175static void driver_deferred_probe_trigger(void)
    176{
    177	if (!driver_deferred_probe_enable)
    178		return;
    179
    180	/*
    181	 * A successful probe means that all the devices in the pending list
    182	 * should be triggered to be reprobed.  Move all the deferred devices
    183	 * into the active list so they can be retried by the workqueue
    184	 */
    185	mutex_lock(&deferred_probe_mutex);
    186	atomic_inc(&deferred_trigger_count);
    187	list_splice_tail_init(&deferred_probe_pending_list,
    188			      &deferred_probe_active_list);
    189	mutex_unlock(&deferred_probe_mutex);
    190
    191	/*
    192	 * Kick the re-probe thread.  It may already be scheduled, but it is
    193	 * safe to kick it again.
    194	 */
    195	queue_work(system_unbound_wq, &deferred_probe_work);
    196}
    197
    198/**
    199 * device_block_probing() - Block/defer device's probes
    200 *
    201 *	It will disable probing of devices and defer their probes instead.
    202 */
    203void device_block_probing(void)
    204{
    205	defer_all_probes = true;
    206	/* sync with probes to avoid races. */
    207	wait_for_device_probe();
    208}
    209
    210/**
    211 * device_unblock_probing() - Unblock/enable device's probes
    212 *
    213 *	It will restore normal behavior and trigger re-probing of deferred
    214 * devices.
    215 */
    216void device_unblock_probing(void)
    217{
    218	defer_all_probes = false;
    219	driver_deferred_probe_trigger();
    220}
    221
    222/**
    223 * device_set_deferred_probe_reason() - Set defer probe reason message for device
    224 * @dev: the pointer to the struct device
    225 * @vaf: the pointer to va_format structure with message
    226 */
    227void device_set_deferred_probe_reason(const struct device *dev, struct va_format *vaf)
    228{
    229	const char *drv = dev_driver_string(dev);
    230	char *reason;
    231
    232	mutex_lock(&deferred_probe_mutex);
    233
    234	reason = kasprintf(GFP_KERNEL, "%s: %pV", drv, vaf);
    235	__device_set_deferred_probe_reason(dev, reason);
    236
    237	mutex_unlock(&deferred_probe_mutex);
    238}
    239
    240/*
    241 * deferred_devs_show() - Show the devices in the deferred probe pending list.
    242 */
    243static int deferred_devs_show(struct seq_file *s, void *data)
    244{
    245	struct device_private *curr;
    246
    247	mutex_lock(&deferred_probe_mutex);
    248
    249	list_for_each_entry(curr, &deferred_probe_pending_list, deferred_probe)
    250		seq_printf(s, "%s\t%s", dev_name(curr->device),
    251			   curr->device->p->deferred_probe_reason ?: "\n");
    252
    253	mutex_unlock(&deferred_probe_mutex);
    254
    255	return 0;
    256}
    257DEFINE_SHOW_ATTRIBUTE(deferred_devs);
    258
    259int driver_deferred_probe_timeout;
    260EXPORT_SYMBOL_GPL(driver_deferred_probe_timeout);
    261
    262static int __init deferred_probe_timeout_setup(char *str)
    263{
    264	int timeout;
    265
    266	if (!kstrtoint(str, 10, &timeout))
    267		driver_deferred_probe_timeout = timeout;
    268	return 1;
    269}
    270__setup("deferred_probe_timeout=", deferred_probe_timeout_setup);
    271
    272/**
    273 * driver_deferred_probe_check_state() - Check deferred probe state
    274 * @dev: device to check
    275 *
    276 * Return:
    277 * * -ENODEV if initcalls have completed and modules are disabled.
    278 * * -ETIMEDOUT if the deferred probe timeout was set and has expired
    279 *   and modules are enabled.
    280 * * -EPROBE_DEFER in other cases.
    281 *
    282 * Drivers or subsystems can opt-in to calling this function instead of directly
    283 * returning -EPROBE_DEFER.
    284 */
    285int driver_deferred_probe_check_state(struct device *dev)
    286{
    287	if (!IS_ENABLED(CONFIG_MODULES) && initcalls_done) {
    288		dev_warn(dev, "ignoring dependency for device, assuming no driver\n");
    289		return -ENODEV;
    290	}
    291
    292	if (!driver_deferred_probe_timeout && initcalls_done) {
    293		dev_warn(dev, "deferred probe timeout, ignoring dependency\n");
    294		return -ETIMEDOUT;
    295	}
    296
    297	return -EPROBE_DEFER;
    298}
    299EXPORT_SYMBOL_GPL(driver_deferred_probe_check_state);
    300
    301static void deferred_probe_timeout_work_func(struct work_struct *work)
    302{
    303	struct device_private *p;
    304
    305	fw_devlink_drivers_done();
    306
    307	driver_deferred_probe_timeout = 0;
    308	driver_deferred_probe_trigger();
    309	flush_work(&deferred_probe_work);
    310
    311	mutex_lock(&deferred_probe_mutex);
    312	list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe)
    313		dev_info(p->device, "deferred probe pending\n");
    314	mutex_unlock(&deferred_probe_mutex);
    315}
    316static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
    317
    318void deferred_probe_extend_timeout(void)
    319{
    320	/*
    321	 * If the work hasn't been queued yet or if the work expired, don't
    322	 * start a new one.
    323	 */
    324	if (cancel_delayed_work(&deferred_probe_timeout_work)) {
    325		schedule_delayed_work(&deferred_probe_timeout_work,
    326				driver_deferred_probe_timeout * HZ);
    327		pr_debug("Extended deferred probe timeout by %d secs\n",
    328					driver_deferred_probe_timeout);
    329	}
    330}
    331
    332/**
    333 * deferred_probe_initcall() - Enable probing of deferred devices
    334 *
    335 * We don't want to get in the way when the bulk of drivers are getting probed.
    336 * Instead, this initcall makes sure that deferred probing is delayed until
    337 * late_initcall time.
    338 */
    339static int deferred_probe_initcall(void)
    340{
    341	debugfs_create_file("devices_deferred", 0444, NULL, NULL,
    342			    &deferred_devs_fops);
    343
    344	driver_deferred_probe_enable = true;
    345	driver_deferred_probe_trigger();
    346	/* Sort as many dependencies as possible before exiting initcalls */
    347	flush_work(&deferred_probe_work);
    348	initcalls_done = true;
    349
    350	if (!IS_ENABLED(CONFIG_MODULES))
    351		fw_devlink_drivers_done();
    352
    353	/*
    354	 * Trigger deferred probe again, this time we won't defer anything
    355	 * that is optional
    356	 */
    357	driver_deferred_probe_trigger();
    358	flush_work(&deferred_probe_work);
    359
    360	if (driver_deferred_probe_timeout > 0) {
    361		schedule_delayed_work(&deferred_probe_timeout_work,
    362			driver_deferred_probe_timeout * HZ);
    363	}
    364	return 0;
    365}
    366late_initcall(deferred_probe_initcall);
    367
    368static void __exit deferred_probe_exit(void)
    369{
    370	debugfs_remove_recursive(debugfs_lookup("devices_deferred", NULL));
    371}
    372__exitcall(deferred_probe_exit);
    373
    374/**
    375 * device_is_bound() - Check if device is bound to a driver
    376 * @dev: device to check
    377 *
    378 * Returns true if passed device has already finished probing successfully
    379 * against a driver.
    380 *
    381 * This function must be called with the device lock held.
    382 */
    383bool device_is_bound(struct device *dev)
    384{
    385	return dev->p && klist_node_attached(&dev->p->knode_driver);
    386}
    387
    388static void driver_bound(struct device *dev)
    389{
    390	if (device_is_bound(dev)) {
    391		pr_warn("%s: device %s already bound\n",
    392			__func__, kobject_name(&dev->kobj));
    393		return;
    394	}
    395
    396	pr_debug("driver: '%s': %s: bound to device '%s'\n", dev->driver->name,
    397		 __func__, dev_name(dev));
    398
    399	klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
    400	device_links_driver_bound(dev);
    401
    402	device_pm_check_callbacks(dev);
    403
    404	/*
    405	 * Make sure the device is no longer in one of the deferred lists and
    406	 * kick off retrying all pending devices
    407	 */
    408	driver_deferred_probe_del(dev);
    409	driver_deferred_probe_trigger();
    410
    411	if (dev->bus)
    412		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
    413					     BUS_NOTIFY_BOUND_DRIVER, dev);
    414
    415	kobject_uevent(&dev->kobj, KOBJ_BIND);
    416}
    417
    418static ssize_t coredump_store(struct device *dev, struct device_attribute *attr,
    419			    const char *buf, size_t count)
    420{
    421	device_lock(dev);
    422	dev->driver->coredump(dev);
    423	device_unlock(dev);
    424
    425	return count;
    426}
    427static DEVICE_ATTR_WO(coredump);
    428
    429static int driver_sysfs_add(struct device *dev)
    430{
    431	int ret;
    432
    433	if (dev->bus)
    434		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
    435					     BUS_NOTIFY_BIND_DRIVER, dev);
    436
    437	ret = sysfs_create_link(&dev->driver->p->kobj, &dev->kobj,
    438				kobject_name(&dev->kobj));
    439	if (ret)
    440		goto fail;
    441
    442	ret = sysfs_create_link(&dev->kobj, &dev->driver->p->kobj,
    443				"driver");
    444	if (ret)
    445		goto rm_dev;
    446
    447	if (!IS_ENABLED(CONFIG_DEV_COREDUMP) || !dev->driver->coredump)
    448		return 0;
    449
    450	ret = device_create_file(dev, &dev_attr_coredump);
    451	if (!ret)
    452		return 0;
    453
    454	sysfs_remove_link(&dev->kobj, "driver");
    455
    456rm_dev:
    457	sysfs_remove_link(&dev->driver->p->kobj,
    458			  kobject_name(&dev->kobj));
    459
    460fail:
    461	return ret;
    462}
    463
    464static void driver_sysfs_remove(struct device *dev)
    465{
    466	struct device_driver *drv = dev->driver;
    467
    468	if (drv) {
    469		if (drv->coredump)
    470			device_remove_file(dev, &dev_attr_coredump);
    471		sysfs_remove_link(&drv->p->kobj, kobject_name(&dev->kobj));
    472		sysfs_remove_link(&dev->kobj, "driver");
    473	}
    474}
    475
    476/**
    477 * device_bind_driver - bind a driver to one device.
    478 * @dev: device.
    479 *
    480 * Allow manual attachment of a driver to a device.
    481 * Caller must have already set @dev->driver.
    482 *
    483 * Note that this does not modify the bus reference count.
    484 * Please verify that is accounted for before calling this.
    485 * (It is ok to call with no other effort from a driver's probe() method.)
    486 *
    487 * This function must be called with the device lock held.
    488 *
    489 * Callers should prefer to use device_driver_attach() instead.
    490 */
    491int device_bind_driver(struct device *dev)
    492{
    493	int ret;
    494
    495	ret = driver_sysfs_add(dev);
    496	if (!ret) {
    497		device_links_force_bind(dev);
    498		driver_bound(dev);
    499	}
    500	else if (dev->bus)
    501		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
    502					     BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
    503	return ret;
    504}
    505EXPORT_SYMBOL_GPL(device_bind_driver);
    506
    507static atomic_t probe_count = ATOMIC_INIT(0);
    508static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
    509
    510static ssize_t state_synced_show(struct device *dev,
    511				 struct device_attribute *attr, char *buf)
    512{
    513	bool val;
    514
    515	device_lock(dev);
    516	val = dev->state_synced;
    517	device_unlock(dev);
    518
    519	return sysfs_emit(buf, "%u\n", val);
    520}
    521static DEVICE_ATTR_RO(state_synced);
    522
    523static void device_unbind_cleanup(struct device *dev)
    524{
    525	devres_release_all(dev);
    526	arch_teardown_dma_ops(dev);
    527	kfree(dev->dma_range_map);
    528	dev->dma_range_map = NULL;
    529	dev->driver = NULL;
    530	dev_set_drvdata(dev, NULL);
    531	if (dev->pm_domain && dev->pm_domain->dismiss)
    532		dev->pm_domain->dismiss(dev);
    533	pm_runtime_reinit(dev);
    534	dev_pm_set_driver_flags(dev, 0);
    535}
    536
    537static void device_remove(struct device *dev)
    538{
    539	device_remove_file(dev, &dev_attr_state_synced);
    540	device_remove_groups(dev, dev->driver->dev_groups);
    541
    542	if (dev->bus && dev->bus->remove)
    543		dev->bus->remove(dev);
    544	else if (dev->driver->remove)
    545		dev->driver->remove(dev);
    546}
    547
    548static int call_driver_probe(struct device *dev, struct device_driver *drv)
    549{
    550	int ret = 0;
    551
    552	if (dev->bus->probe)
    553		ret = dev->bus->probe(dev);
    554	else if (drv->probe)
    555		ret = drv->probe(dev);
    556
    557	switch (ret) {
    558	case 0:
    559		break;
    560	case -EPROBE_DEFER:
    561		/* Driver requested deferred probing */
    562		dev_dbg(dev, "Driver %s requests probe deferral\n", drv->name);
    563		break;
    564	case -ENODEV:
    565	case -ENXIO:
    566		pr_debug("%s: probe of %s rejects match %d\n",
    567			 drv->name, dev_name(dev), ret);
    568		break;
    569	default:
    570		/* driver matched but the probe failed */
    571		pr_warn("%s: probe of %s failed with error %d\n",
    572			drv->name, dev_name(dev), ret);
    573		break;
    574	}
    575
    576	return ret;
    577}
    578
    579static int really_probe(struct device *dev, struct device_driver *drv)
    580{
    581	bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE) &&
    582			   !drv->suppress_bind_attrs;
    583	int ret;
    584
    585	if (defer_all_probes) {
    586		/*
    587		 * Value of defer_all_probes can be set only by
    588		 * device_block_probing() which, in turn, will call
    589		 * wait_for_device_probe() right after that to avoid any races.
    590		 */
    591		dev_dbg(dev, "Driver %s force probe deferral\n", drv->name);
    592		return -EPROBE_DEFER;
    593	}
    594
    595	ret = device_links_check_suppliers(dev);
    596	if (ret)
    597		return ret;
    598
    599	pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
    600		 drv->bus->name, __func__, drv->name, dev_name(dev));
    601	if (!list_empty(&dev->devres_head)) {
    602		dev_crit(dev, "Resources present before probing\n");
    603		ret = -EBUSY;
    604		goto done;
    605	}
    606
    607re_probe:
    608	dev->driver = drv;
    609
    610	/* If using pinctrl, bind pins now before probing */
    611	ret = pinctrl_bind_pins(dev);
    612	if (ret)
    613		goto pinctrl_bind_failed;
    614
    615	if (dev->bus->dma_configure) {
    616		ret = dev->bus->dma_configure(dev);
    617		if (ret)
    618			goto pinctrl_bind_failed;
    619	}
    620
    621	ret = driver_sysfs_add(dev);
    622	if (ret) {
    623		pr_err("%s: driver_sysfs_add(%s) failed\n",
    624		       __func__, dev_name(dev));
    625		goto sysfs_failed;
    626	}
    627
    628	if (dev->pm_domain && dev->pm_domain->activate) {
    629		ret = dev->pm_domain->activate(dev);
    630		if (ret)
    631			goto probe_failed;
    632	}
    633
    634	ret = call_driver_probe(dev, drv);
    635	if (ret) {
    636		/*
    637		 * Return probe errors as positive values so that the callers
    638		 * can distinguish them from other errors.
    639		 */
    640		ret = -ret;
    641		goto probe_failed;
    642	}
    643
    644	ret = device_add_groups(dev, drv->dev_groups);
    645	if (ret) {
    646		dev_err(dev, "device_add_groups() failed\n");
    647		goto dev_groups_failed;
    648	}
    649
    650	if (dev_has_sync_state(dev)) {
    651		ret = device_create_file(dev, &dev_attr_state_synced);
    652		if (ret) {
    653			dev_err(dev, "state_synced sysfs add failed\n");
    654			goto dev_sysfs_state_synced_failed;
    655		}
    656	}
    657
    658	if (test_remove) {
    659		test_remove = false;
    660
    661		device_remove(dev);
    662		driver_sysfs_remove(dev);
    663		device_unbind_cleanup(dev);
    664
    665		goto re_probe;
    666	}
    667
    668	pinctrl_init_done(dev);
    669
    670	if (dev->pm_domain && dev->pm_domain->sync)
    671		dev->pm_domain->sync(dev);
    672
    673	driver_bound(dev);
    674	pr_debug("bus: '%s': %s: bound device %s to driver %s\n",
    675		 drv->bus->name, __func__, dev_name(dev), drv->name);
    676	goto done;
    677
    678dev_sysfs_state_synced_failed:
    679dev_groups_failed:
    680	device_remove(dev);
    681probe_failed:
    682	driver_sysfs_remove(dev);
    683sysfs_failed:
    684	if (dev->bus)
    685		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
    686					     BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
    687	if (dev->bus && dev->bus->dma_cleanup)
    688		dev->bus->dma_cleanup(dev);
    689pinctrl_bind_failed:
    690	device_links_no_driver(dev);
    691	device_unbind_cleanup(dev);
    692done:
    693	return ret;
    694}
    695
    696/*
    697 * For initcall_debug, show the driver probe time.
    698 */
    699static int really_probe_debug(struct device *dev, struct device_driver *drv)
    700{
    701	ktime_t calltime, rettime;
    702	int ret;
    703
    704	calltime = ktime_get();
    705	ret = really_probe(dev, drv);
    706	rettime = ktime_get();
    707	pr_debug("probe of %s returned %d after %lld usecs\n",
    708		 dev_name(dev), ret, ktime_us_delta(rettime, calltime));
    709	return ret;
    710}
    711
    712/**
    713 * driver_probe_done
    714 * Determine if the probe sequence is finished or not.
    715 *
    716 * Should somehow figure out how to use a semaphore, not an atomic variable...
    717 */
    718int driver_probe_done(void)
    719{
    720	int local_probe_count = atomic_read(&probe_count);
    721
    722	pr_debug("%s: probe_count = %d\n", __func__, local_probe_count);
    723	if (local_probe_count)
    724		return -EBUSY;
    725	return 0;
    726}
    727
    728/**
    729 * wait_for_device_probe
    730 * Wait for device probing to be completed.
    731 */
    732void wait_for_device_probe(void)
    733{
    734	/* wait for the deferred probe workqueue to finish */
    735	flush_work(&deferred_probe_work);
    736
    737	/* wait for the known devices to complete their probing */
    738	wait_event(probe_waitqueue, atomic_read(&probe_count) == 0);
    739	async_synchronize_full();
    740}
    741EXPORT_SYMBOL_GPL(wait_for_device_probe);
    742
    743static int __driver_probe_device(struct device_driver *drv, struct device *dev)
    744{
    745	int ret = 0;
    746
    747	if (dev->p->dead || !device_is_registered(dev))
    748		return -ENODEV;
    749	if (dev->driver)
    750		return -EBUSY;
    751
    752	dev->can_match = true;
    753	pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
    754		 drv->bus->name, __func__, dev_name(dev), drv->name);
    755
    756	pm_runtime_get_suppliers(dev);
    757	if (dev->parent)
    758		pm_runtime_get_sync(dev->parent);
    759
    760	pm_runtime_barrier(dev);
    761	if (initcall_debug)
    762		ret = really_probe_debug(dev, drv);
    763	else
    764		ret = really_probe(dev, drv);
    765	pm_request_idle(dev);
    766
    767	if (dev->parent)
    768		pm_runtime_put(dev->parent);
    769
    770	pm_runtime_put_suppliers(dev);
    771	return ret;
    772}
    773
    774/**
    775 * driver_probe_device - attempt to bind device & driver together
    776 * @drv: driver to bind a device to
    777 * @dev: device to try to bind to the driver
    778 *
    779 * This function returns -ENODEV if the device is not registered, -EBUSY if it
    780 * already has a driver, 0 if the device is bound successfully and a positive
    781 * (inverted) error code for failures from the ->probe method.
    782 *
    783 * This function must be called with @dev lock held.  When called for a
    784 * USB interface, @dev->parent lock must be held as well.
    785 *
    786 * If the device has a parent, runtime-resume the parent before driver probing.
    787 */
    788static int driver_probe_device(struct device_driver *drv, struct device *dev)
    789{
    790	int trigger_count = atomic_read(&deferred_trigger_count);
    791	int ret;
    792
    793	atomic_inc(&probe_count);
    794	ret = __driver_probe_device(drv, dev);
    795	if (ret == -EPROBE_DEFER || ret == EPROBE_DEFER) {
    796		driver_deferred_probe_add(dev);
    797
    798		/*
    799		 * Did a trigger occur while probing? Need to re-trigger if yes
    800		 */
    801		if (trigger_count != atomic_read(&deferred_trigger_count) &&
    802		    !defer_all_probes)
    803			driver_deferred_probe_trigger();
    804	}
    805	atomic_dec(&probe_count);
    806	wake_up_all(&probe_waitqueue);
    807	return ret;
    808}
    809
    810static inline bool cmdline_requested_async_probing(const char *drv_name)
    811{
    812	bool async_drv;
    813
    814	async_drv = parse_option_str(async_probe_drv_names, drv_name);
    815
    816	return (async_probe_default != async_drv);
    817}
    818
    819/* The option format is "driver_async_probe=drv_name1,drv_name2,..." */
    820static int __init save_async_options(char *buf)
    821{
    822	if (strlen(buf) >= ASYNC_DRV_NAMES_MAX_LEN)
    823		pr_warn("Too long list of driver names for 'driver_async_probe'!\n");
    824
    825	strlcpy(async_probe_drv_names, buf, ASYNC_DRV_NAMES_MAX_LEN);
    826	async_probe_default = parse_option_str(async_probe_drv_names, "*");
    827
    828	return 1;
    829}
    830__setup("driver_async_probe=", save_async_options);
    831
    832bool driver_allows_async_probing(struct device_driver *drv)
    833{
    834	switch (drv->probe_type) {
    835	case PROBE_PREFER_ASYNCHRONOUS:
    836		return true;
    837
    838	case PROBE_FORCE_SYNCHRONOUS:
    839		return false;
    840
    841	default:
    842		if (cmdline_requested_async_probing(drv->name))
    843			return true;
    844
    845		if (module_requested_async_probing(drv->owner))
    846			return true;
    847
    848		return false;
    849	}
    850}
    851
    852struct device_attach_data {
    853	struct device *dev;
    854
    855	/*
    856	 * Indicates whether we are considering asynchronous probing or
    857	 * not. Only initial binding after device or driver registration
    858	 * (including deferral processing) may be done asynchronously, the
    859	 * rest is always synchronous, as we expect it is being done by
    860	 * request from userspace.
    861	 */
    862	bool check_async;
    863
    864	/*
    865	 * Indicates if we are binding synchronous or asynchronous drivers.
    866	 * When asynchronous probing is enabled we'll execute 2 passes
    867	 * over drivers: first pass doing synchronous probing and second
    868	 * doing asynchronous probing (if synchronous did not succeed -
    869	 * most likely because there was no driver requiring synchronous
    870	 * probing - and we found asynchronous driver during first pass).
    871	 * The 2 passes are done because we can't shoot asynchronous
    872	 * probe for given device and driver from bus_for_each_drv() since
    873	 * driver pointer is not guaranteed to stay valid once
    874	 * bus_for_each_drv() iterates to the next driver on the bus.
    875	 */
    876	bool want_async;
    877
    878	/*
    879	 * We'll set have_async to 'true' if, while scanning for matching
    880	 * driver, we'll encounter one that requests asynchronous probing.
    881	 */
    882	bool have_async;
    883};
    884
    885static int __device_attach_driver(struct device_driver *drv, void *_data)
    886{
    887	struct device_attach_data *data = _data;
    888	struct device *dev = data->dev;
    889	bool async_allowed;
    890	int ret;
    891
    892	ret = driver_match_device(drv, dev);
    893	if (ret == 0) {
    894		/* no match */
    895		return 0;
    896	} else if (ret == -EPROBE_DEFER) {
    897		dev_dbg(dev, "Device match requests probe deferral\n");
    898		dev->can_match = true;
    899		driver_deferred_probe_add(dev);
    900	} else if (ret < 0) {
    901		dev_dbg(dev, "Bus failed to match device: %d\n", ret);
    902		return ret;
    903	} /* ret > 0 means positive match */
    904
    905	async_allowed = driver_allows_async_probing(drv);
    906
    907	if (async_allowed)
    908		data->have_async = true;
    909
    910	if (data->check_async && async_allowed != data->want_async)
    911		return 0;
    912
    913	/*
    914	 * Ignore errors returned by ->probe so that the next driver can try
    915	 * its luck.
    916	 */
    917	ret = driver_probe_device(drv, dev);
    918	if (ret < 0)
    919		return ret;
    920	return ret == 0;
    921}
    922
    923static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
    924{
    925	struct device *dev = _dev;
    926	struct device_attach_data data = {
    927		.dev		= dev,
    928		.check_async	= true,
    929		.want_async	= true,
    930	};
    931
    932	device_lock(dev);
    933
    934	/*
    935	 * Check if device has already been removed or claimed. This may
    936	 * happen with driver loading, device discovery/registration,
    937	 * and deferred probe processing happens all at once with
    938	 * multiple threads.
    939	 */
    940	if (dev->p->dead || dev->driver)
    941		goto out_unlock;
    942
    943	if (dev->parent)
    944		pm_runtime_get_sync(dev->parent);
    945
    946	bus_for_each_drv(dev->bus, NULL, &data, __device_attach_driver);
    947	dev_dbg(dev, "async probe completed\n");
    948
    949	pm_request_idle(dev);
    950
    951	if (dev->parent)
    952		pm_runtime_put(dev->parent);
    953out_unlock:
    954	device_unlock(dev);
    955
    956	put_device(dev);
    957}
    958
    959static int __device_attach(struct device *dev, bool allow_async)
    960{
    961	int ret = 0;
    962	bool async = false;
    963
    964	device_lock(dev);
    965	if (dev->p->dead) {
    966		goto out_unlock;
    967	} else if (dev->driver) {
    968		if (device_is_bound(dev)) {
    969			ret = 1;
    970			goto out_unlock;
    971		}
    972		ret = device_bind_driver(dev);
    973		if (ret == 0)
    974			ret = 1;
    975		else {
    976			dev->driver = NULL;
    977			ret = 0;
    978		}
    979	} else {
    980		struct device_attach_data data = {
    981			.dev = dev,
    982			.check_async = allow_async,
    983			.want_async = false,
    984		};
    985
    986		if (dev->parent)
    987			pm_runtime_get_sync(dev->parent);
    988
    989		ret = bus_for_each_drv(dev->bus, NULL, &data,
    990					__device_attach_driver);
    991		if (!ret && allow_async && data.have_async) {
    992			/*
    993			 * If we could not find appropriate driver
    994			 * synchronously and we are allowed to do
    995			 * async probes and there are drivers that
    996			 * want to probe asynchronously, we'll
    997			 * try them.
    998			 */
    999			dev_dbg(dev, "scheduling asynchronous probe\n");
   1000			get_device(dev);
   1001			async = true;
   1002		} else {
   1003			pm_request_idle(dev);
   1004		}
   1005
   1006		if (dev->parent)
   1007			pm_runtime_put(dev->parent);
   1008	}
   1009out_unlock:
   1010	device_unlock(dev);
   1011	if (async)
   1012		async_schedule_dev(__device_attach_async_helper, dev);
   1013	return ret;
   1014}
   1015
   1016/**
   1017 * device_attach - try to attach device to a driver.
   1018 * @dev: device.
   1019 *
   1020 * Walk the list of drivers that the bus has and call
   1021 * driver_probe_device() for each pair. If a compatible
   1022 * pair is found, break out and return.
   1023 *
   1024 * Returns 1 if the device was bound to a driver;
   1025 * 0 if no matching driver was found;
   1026 * -ENODEV if the device is not registered.
   1027 *
   1028 * When called for a USB interface, @dev->parent lock must be held.
   1029 */
   1030int device_attach(struct device *dev)
   1031{
   1032	return __device_attach(dev, false);
   1033}
   1034EXPORT_SYMBOL_GPL(device_attach);
   1035
   1036void device_initial_probe(struct device *dev)
   1037{
   1038	__device_attach(dev, true);
   1039}
   1040
   1041/*
   1042 * __device_driver_lock - acquire locks needed to manipulate dev->drv
   1043 * @dev: Device we will update driver info for
   1044 * @parent: Parent device. Needed if the bus requires parent lock
   1045 *
   1046 * This function will take the required locks for manipulating dev->drv.
   1047 * Normally this will just be the @dev lock, but when called for a USB
   1048 * interface, @parent lock will be held as well.
   1049 */
   1050static void __device_driver_lock(struct device *dev, struct device *parent)
   1051{
   1052	if (parent && dev->bus->need_parent_lock)
   1053		device_lock(parent);
   1054	device_lock(dev);
   1055}
   1056
   1057/*
   1058 * __device_driver_unlock - release locks needed to manipulate dev->drv
   1059 * @dev: Device we will update driver info for
   1060 * @parent: Parent device. Needed if the bus requires parent lock
   1061 *
   1062 * This function will release the required locks for manipulating dev->drv.
   1063 * Normally this will just be the @dev lock, but when called for a
   1064 * USB interface, @parent lock will be released as well.
   1065 */
   1066static void __device_driver_unlock(struct device *dev, struct device *parent)
   1067{
   1068	device_unlock(dev);
   1069	if (parent && dev->bus->need_parent_lock)
   1070		device_unlock(parent);
   1071}
   1072
   1073/**
   1074 * device_driver_attach - attach a specific driver to a specific device
   1075 * @drv: Driver to attach
   1076 * @dev: Device to attach it to
   1077 *
   1078 * Manually attach driver to a device. Will acquire both @dev lock and
   1079 * @dev->parent lock if needed. Returns 0 on success, -ERR on failure.
   1080 */
   1081int device_driver_attach(struct device_driver *drv, struct device *dev)
   1082{
   1083	int ret;
   1084
   1085	__device_driver_lock(dev, dev->parent);
   1086	ret = __driver_probe_device(drv, dev);
   1087	__device_driver_unlock(dev, dev->parent);
   1088
   1089	/* also return probe errors as normal negative errnos */
   1090	if (ret > 0)
   1091		ret = -ret;
   1092	if (ret == -EPROBE_DEFER)
   1093		return -EAGAIN;
   1094	return ret;
   1095}
   1096EXPORT_SYMBOL_GPL(device_driver_attach);
   1097
   1098static void __driver_attach_async_helper(void *_dev, async_cookie_t cookie)
   1099{
   1100	struct device *dev = _dev;
   1101	struct device_driver *drv;
   1102	int ret;
   1103
   1104	__device_driver_lock(dev, dev->parent);
   1105	drv = dev->p->async_driver;
   1106	dev->p->async_driver = NULL;
   1107	ret = driver_probe_device(drv, dev);
   1108	__device_driver_unlock(dev, dev->parent);
   1109
   1110	dev_dbg(dev, "driver %s async attach completed: %d\n", drv->name, ret);
   1111
   1112	put_device(dev);
   1113}
   1114
   1115static int __driver_attach(struct device *dev, void *data)
   1116{
   1117	struct device_driver *drv = data;
   1118	int ret;
   1119
   1120	/*
   1121	 * Lock device and try to bind to it. We drop the error
   1122	 * here and always return 0, because we need to keep trying
   1123	 * to bind to devices and some drivers will return an error
   1124	 * simply if it didn't support the device.
   1125	 *
   1126	 * driver_probe_device() will spit a warning if there
   1127	 * is an error.
   1128	 */
   1129
   1130	ret = driver_match_device(drv, dev);
   1131	if (ret == 0) {
   1132		/* no match */
   1133		return 0;
   1134	} else if (ret == -EPROBE_DEFER) {
   1135		dev_dbg(dev, "Device match requests probe deferral\n");
   1136		dev->can_match = true;
   1137		driver_deferred_probe_add(dev);
   1138	} else if (ret < 0) {
   1139		dev_dbg(dev, "Bus failed to match device: %d\n", ret);
   1140		return ret;
   1141	} /* ret > 0 means positive match */
   1142
   1143	if (driver_allows_async_probing(drv)) {
   1144		/*
   1145		 * Instead of probing the device synchronously we will
   1146		 * probe it asynchronously to allow for more parallelism.
   1147		 *
   1148		 * We only take the device lock here in order to guarantee
   1149		 * that the dev->driver and async_driver fields are protected
   1150		 */
   1151		dev_dbg(dev, "probing driver %s asynchronously\n", drv->name);
   1152		device_lock(dev);
   1153		if (!dev->driver && !dev->p->async_driver) {
   1154			get_device(dev);
   1155			dev->p->async_driver = drv;
   1156			async_schedule_dev(__driver_attach_async_helper, dev);
   1157		}
   1158		device_unlock(dev);
   1159		return 0;
   1160	}
   1161
   1162	__device_driver_lock(dev, dev->parent);
   1163	driver_probe_device(drv, dev);
   1164	__device_driver_unlock(dev, dev->parent);
   1165
   1166	return 0;
   1167}
   1168
   1169/**
   1170 * driver_attach - try to bind driver to devices.
   1171 * @drv: driver.
   1172 *
   1173 * Walk the list of devices that the bus has on it and try to
   1174 * match the driver with each one.  If driver_probe_device()
   1175 * returns 0 and the @dev->driver is set, we've found a
   1176 * compatible pair.
   1177 */
   1178int driver_attach(struct device_driver *drv)
   1179{
   1180	return bus_for_each_dev(drv->bus, NULL, drv, __driver_attach);
   1181}
   1182EXPORT_SYMBOL_GPL(driver_attach);
   1183
   1184/*
   1185 * __device_release_driver() must be called with @dev lock held.
   1186 * When called for a USB interface, @dev->parent lock must be held as well.
   1187 */
   1188static void __device_release_driver(struct device *dev, struct device *parent)
   1189{
   1190	struct device_driver *drv;
   1191
   1192	drv = dev->driver;
   1193	if (drv) {
   1194		pm_runtime_get_sync(dev);
   1195
   1196		while (device_links_busy(dev)) {
   1197			__device_driver_unlock(dev, parent);
   1198
   1199			device_links_unbind_consumers(dev);
   1200
   1201			__device_driver_lock(dev, parent);
   1202			/*
   1203			 * A concurrent invocation of the same function might
   1204			 * have released the driver successfully while this one
   1205			 * was waiting, so check for that.
   1206			 */
   1207			if (dev->driver != drv) {
   1208				pm_runtime_put(dev);
   1209				return;
   1210			}
   1211		}
   1212
   1213		driver_sysfs_remove(dev);
   1214
   1215		if (dev->bus)
   1216			blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
   1217						     BUS_NOTIFY_UNBIND_DRIVER,
   1218						     dev);
   1219
   1220		pm_runtime_put_sync(dev);
   1221
   1222		device_remove(dev);
   1223
   1224		if (dev->bus && dev->bus->dma_cleanup)
   1225			dev->bus->dma_cleanup(dev);
   1226
   1227		device_links_driver_cleanup(dev);
   1228		device_unbind_cleanup(dev);
   1229
   1230		klist_remove(&dev->p->knode_driver);
   1231		device_pm_check_callbacks(dev);
   1232		if (dev->bus)
   1233			blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
   1234						     BUS_NOTIFY_UNBOUND_DRIVER,
   1235						     dev);
   1236
   1237		kobject_uevent(&dev->kobj, KOBJ_UNBIND);
   1238	}
   1239}
   1240
   1241void device_release_driver_internal(struct device *dev,
   1242				    struct device_driver *drv,
   1243				    struct device *parent)
   1244{
   1245	__device_driver_lock(dev, parent);
   1246
   1247	if (!drv || drv == dev->driver)
   1248		__device_release_driver(dev, parent);
   1249
   1250	__device_driver_unlock(dev, parent);
   1251}
   1252
   1253/**
   1254 * device_release_driver - manually detach device from driver.
   1255 * @dev: device.
   1256 *
   1257 * Manually detach device from driver.
   1258 * When called for a USB interface, @dev->parent lock must be held.
   1259 *
   1260 * If this function is to be called with @dev->parent lock held, ensure that
   1261 * the device's consumers are unbound in advance or that their locks can be
   1262 * acquired under the @dev->parent lock.
   1263 */
   1264void device_release_driver(struct device *dev)
   1265{
   1266	/*
   1267	 * If anyone calls device_release_driver() recursively from
   1268	 * within their ->remove callback for the same device, they
   1269	 * will deadlock right here.
   1270	 */
   1271	device_release_driver_internal(dev, NULL, NULL);
   1272}
   1273EXPORT_SYMBOL_GPL(device_release_driver);
   1274
   1275/**
   1276 * device_driver_detach - detach driver from a specific device
   1277 * @dev: device to detach driver from
   1278 *
   1279 * Detach driver from device. Will acquire both @dev lock and @dev->parent
   1280 * lock if needed.
   1281 */
   1282void device_driver_detach(struct device *dev)
   1283{
   1284	device_release_driver_internal(dev, NULL, dev->parent);
   1285}
   1286
   1287/**
   1288 * driver_detach - detach driver from all devices it controls.
   1289 * @drv: driver.
   1290 */
   1291void driver_detach(struct device_driver *drv)
   1292{
   1293	struct device_private *dev_prv;
   1294	struct device *dev;
   1295
   1296	if (driver_allows_async_probing(drv))
   1297		async_synchronize_full();
   1298
   1299	for (;;) {
   1300		spin_lock(&drv->p->klist_devices.k_lock);
   1301		if (list_empty(&drv->p->klist_devices.k_list)) {
   1302			spin_unlock(&drv->p->klist_devices.k_lock);
   1303			break;
   1304		}
   1305		dev_prv = list_last_entry(&drv->p->klist_devices.k_list,
   1306				     struct device_private,
   1307				     knode_driver.n_node);
   1308		dev = dev_prv->device;
   1309		get_device(dev);
   1310		spin_unlock(&drv->p->klist_devices.k_lock);
   1311		device_release_driver_internal(dev, drv, dev->parent);
   1312		put_device(dev);
   1313	}
   1314}