cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

scan.c (69116B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * scan.c - support for transforming the ACPI namespace into individual objects
      4 */
      5
      6#define pr_fmt(fmt) "ACPI: " fmt
      7
      8#include <linux/module.h>
      9#include <linux/init.h>
     10#include <linux/slab.h>
     11#include <linux/kernel.h>
     12#include <linux/acpi.h>
     13#include <linux/acpi_iort.h>
     14#include <linux/acpi_viot.h>
     15#include <linux/iommu.h>
     16#include <linux/signal.h>
     17#include <linux/kthread.h>
     18#include <linux/dmi.h>
     19#include <linux/dma-map-ops.h>
     20#include <linux/platform_data/x86/apple.h>
     21#include <linux/pgtable.h>
     22#include <linux/crc32.h>
     23
     24#include "internal.h"
     25
     26extern struct acpi_device *acpi_root;
     27
     28#define ACPI_BUS_CLASS			"system_bus"
     29#define ACPI_BUS_HID			"LNXSYBUS"
     30#define ACPI_BUS_DEVICE_NAME		"System Bus"
     31
     32#define ACPI_IS_ROOT_DEVICE(device)    (!(device)->parent)
     33
     34#define INVALID_ACPI_HANDLE	((acpi_handle)empty_zero_page)
     35
     36static const char *dummy_hid = "device";
     37
     38static LIST_HEAD(acpi_dep_list);
     39static DEFINE_MUTEX(acpi_dep_list_lock);
     40LIST_HEAD(acpi_bus_id_list);
     41static DEFINE_MUTEX(acpi_scan_lock);
     42static LIST_HEAD(acpi_scan_handlers_list);
     43DEFINE_MUTEX(acpi_device_lock);
     44LIST_HEAD(acpi_wakeup_device_list);
     45static DEFINE_MUTEX(acpi_hp_context_lock);
     46
     47/*
     48 * The UART device described by the SPCR table is the only object which needs
     49 * special-casing. Everything else is covered by ACPI namespace paths in STAO
     50 * table.
     51 */
     52static u64 spcr_uart_addr;
     53
     54void acpi_scan_lock_acquire(void)
     55{
     56	mutex_lock(&acpi_scan_lock);
     57}
     58EXPORT_SYMBOL_GPL(acpi_scan_lock_acquire);
     59
     60void acpi_scan_lock_release(void)
     61{
     62	mutex_unlock(&acpi_scan_lock);
     63}
     64EXPORT_SYMBOL_GPL(acpi_scan_lock_release);
     65
     66void acpi_lock_hp_context(void)
     67{
     68	mutex_lock(&acpi_hp_context_lock);
     69}
     70
     71void acpi_unlock_hp_context(void)
     72{
     73	mutex_unlock(&acpi_hp_context_lock);
     74}
     75
     76void acpi_initialize_hp_context(struct acpi_device *adev,
     77				struct acpi_hotplug_context *hp,
     78				int (*notify)(struct acpi_device *, u32),
     79				void (*uevent)(struct acpi_device *, u32))
     80{
     81	acpi_lock_hp_context();
     82	hp->notify = notify;
     83	hp->uevent = uevent;
     84	acpi_set_hp_context(adev, hp);
     85	acpi_unlock_hp_context();
     86}
     87EXPORT_SYMBOL_GPL(acpi_initialize_hp_context);
     88
     89int acpi_scan_add_handler(struct acpi_scan_handler *handler)
     90{
     91	if (!handler)
     92		return -EINVAL;
     93
     94	list_add_tail(&handler->list_node, &acpi_scan_handlers_list);
     95	return 0;
     96}
     97
     98int acpi_scan_add_handler_with_hotplug(struct acpi_scan_handler *handler,
     99				       const char *hotplug_profile_name)
    100{
    101	int error;
    102
    103	error = acpi_scan_add_handler(handler);
    104	if (error)
    105		return error;
    106
    107	acpi_sysfs_add_hotplug_profile(&handler->hotplug, hotplug_profile_name);
    108	return 0;
    109}
    110
    111bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent)
    112{
    113	struct acpi_device_physical_node *pn;
    114	bool offline = true;
    115	char *envp[] = { "EVENT=offline", NULL };
    116
    117	/*
    118	 * acpi_container_offline() calls this for all of the container's
    119	 * children under the container's physical_node_lock lock.
    120	 */
    121	mutex_lock_nested(&adev->physical_node_lock, SINGLE_DEPTH_NESTING);
    122
    123	list_for_each_entry(pn, &adev->physical_node_list, node)
    124		if (device_supports_offline(pn->dev) && !pn->dev->offline) {
    125			if (uevent)
    126				kobject_uevent_env(&pn->dev->kobj, KOBJ_CHANGE, envp);
    127
    128			offline = false;
    129			break;
    130		}
    131
    132	mutex_unlock(&adev->physical_node_lock);
    133	return offline;
    134}
    135
    136static acpi_status acpi_bus_offline(acpi_handle handle, u32 lvl, void *data,
    137				    void **ret_p)
    138{
    139	struct acpi_device *device = acpi_fetch_acpi_dev(handle);
    140	struct acpi_device_physical_node *pn;
    141	bool second_pass = (bool)data;
    142	acpi_status status = AE_OK;
    143
    144	if (!device)
    145		return AE_OK;
    146
    147	if (device->handler && !device->handler->hotplug.enabled) {
    148		*ret_p = &device->dev;
    149		return AE_SUPPORT;
    150	}
    151
    152	mutex_lock(&device->physical_node_lock);
    153
    154	list_for_each_entry(pn, &device->physical_node_list, node) {
    155		int ret;
    156
    157		if (second_pass) {
    158			/* Skip devices offlined by the first pass. */
    159			if (pn->put_online)
    160				continue;
    161		} else {
    162			pn->put_online = false;
    163		}
    164		ret = device_offline(pn->dev);
    165		if (ret >= 0) {
    166			pn->put_online = !ret;
    167		} else {
    168			*ret_p = pn->dev;
    169			if (second_pass) {
    170				status = AE_ERROR;
    171				break;
    172			}
    173		}
    174	}
    175
    176	mutex_unlock(&device->physical_node_lock);
    177
    178	return status;
    179}
    180
    181static acpi_status acpi_bus_online(acpi_handle handle, u32 lvl, void *data,
    182				   void **ret_p)
    183{
    184	struct acpi_device *device = acpi_fetch_acpi_dev(handle);
    185	struct acpi_device_physical_node *pn;
    186
    187	if (!device)
    188		return AE_OK;
    189
    190	mutex_lock(&device->physical_node_lock);
    191
    192	list_for_each_entry(pn, &device->physical_node_list, node)
    193		if (pn->put_online) {
    194			device_online(pn->dev);
    195			pn->put_online = false;
    196		}
    197
    198	mutex_unlock(&device->physical_node_lock);
    199
    200	return AE_OK;
    201}
    202
    203static int acpi_scan_try_to_offline(struct acpi_device *device)
    204{
    205	acpi_handle handle = device->handle;
    206	struct device *errdev = NULL;
    207	acpi_status status;
    208
    209	/*
    210	 * Carry out two passes here and ignore errors in the first pass,
    211	 * because if the devices in question are memory blocks and
    212	 * CONFIG_MEMCG is set, one of the blocks may hold data structures
    213	 * that the other blocks depend on, but it is not known in advance which
    214	 * block holds them.
    215	 *
    216	 * If the first pass is successful, the second one isn't needed, though.
    217	 */
    218	status = acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
    219				     NULL, acpi_bus_offline, (void *)false,
    220				     (void **)&errdev);
    221	if (status == AE_SUPPORT) {
    222		dev_warn(errdev, "Offline disabled.\n");
    223		acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
    224				    acpi_bus_online, NULL, NULL, NULL);
    225		return -EPERM;
    226	}
    227	acpi_bus_offline(handle, 0, (void *)false, (void **)&errdev);
    228	if (errdev) {
    229		errdev = NULL;
    230		acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
    231				    NULL, acpi_bus_offline, (void *)true,
    232				    (void **)&errdev);
    233		if (!errdev)
    234			acpi_bus_offline(handle, 0, (void *)true,
    235					 (void **)&errdev);
    236
    237		if (errdev) {
    238			dev_warn(errdev, "Offline failed.\n");
    239			acpi_bus_online(handle, 0, NULL, NULL);
    240			acpi_walk_namespace(ACPI_TYPE_ANY, handle,
    241					    ACPI_UINT32_MAX, acpi_bus_online,
    242					    NULL, NULL, NULL);
    243			return -EBUSY;
    244		}
    245	}
    246	return 0;
    247}
    248
    249static int acpi_scan_hot_remove(struct acpi_device *device)
    250{
    251	acpi_handle handle = device->handle;
    252	unsigned long long sta;
    253	acpi_status status;
    254
    255	if (device->handler && device->handler->hotplug.demand_offline) {
    256		if (!acpi_scan_is_offline(device, true))
    257			return -EBUSY;
    258	} else {
    259		int error = acpi_scan_try_to_offline(device);
    260		if (error)
    261			return error;
    262	}
    263
    264	acpi_handle_debug(handle, "Ejecting\n");
    265
    266	acpi_bus_trim(device);
    267
    268	acpi_evaluate_lck(handle, 0);
    269	/*
    270	 * TBD: _EJD support.
    271	 */
    272	status = acpi_evaluate_ej0(handle);
    273	if (status == AE_NOT_FOUND)
    274		return -ENODEV;
    275	else if (ACPI_FAILURE(status))
    276		return -EIO;
    277
    278	/*
    279	 * Verify if eject was indeed successful.  If not, log an error
    280	 * message.  No need to call _OST since _EJ0 call was made OK.
    281	 */
    282	status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
    283	if (ACPI_FAILURE(status)) {
    284		acpi_handle_warn(handle,
    285			"Status check after eject failed (0x%x)\n", status);
    286	} else if (sta & ACPI_STA_DEVICE_ENABLED) {
    287		acpi_handle_warn(handle,
    288			"Eject incomplete - status 0x%llx\n", sta);
    289	}
    290
    291	return 0;
    292}
    293
    294static int acpi_scan_device_not_present(struct acpi_device *adev)
    295{
    296	if (!acpi_device_enumerated(adev)) {
    297		dev_warn(&adev->dev, "Still not present\n");
    298		return -EALREADY;
    299	}
    300	acpi_bus_trim(adev);
    301	return 0;
    302}
    303
    304static int acpi_scan_device_check(struct acpi_device *adev)
    305{
    306	int error;
    307
    308	acpi_bus_get_status(adev);
    309	if (adev->status.present || adev->status.functional) {
    310		/*
    311		 * This function is only called for device objects for which
    312		 * matching scan handlers exist.  The only situation in which
    313		 * the scan handler is not attached to this device object yet
    314		 * is when the device has just appeared (either it wasn't
    315		 * present at all before or it was removed and then added
    316		 * again).
    317		 */
    318		if (adev->handler) {
    319			dev_warn(&adev->dev, "Already enumerated\n");
    320			return -EALREADY;
    321		}
    322		error = acpi_bus_scan(adev->handle);
    323		if (error) {
    324			dev_warn(&adev->dev, "Namespace scan failure\n");
    325			return error;
    326		}
    327		if (!adev->handler) {
    328			dev_warn(&adev->dev, "Enumeration failure\n");
    329			error = -ENODEV;
    330		}
    331	} else {
    332		error = acpi_scan_device_not_present(adev);
    333	}
    334	return error;
    335}
    336
    337static int acpi_scan_bus_check(struct acpi_device *adev)
    338{
    339	struct acpi_scan_handler *handler = adev->handler;
    340	struct acpi_device *child;
    341	int error;
    342
    343	acpi_bus_get_status(adev);
    344	if (!(adev->status.present || adev->status.functional)) {
    345		acpi_scan_device_not_present(adev);
    346		return 0;
    347	}
    348	if (handler && handler->hotplug.scan_dependent)
    349		return handler->hotplug.scan_dependent(adev);
    350
    351	error = acpi_bus_scan(adev->handle);
    352	if (error) {
    353		dev_warn(&adev->dev, "Namespace scan failure\n");
    354		return error;
    355	}
    356	list_for_each_entry(child, &adev->children, node) {
    357		error = acpi_scan_bus_check(child);
    358		if (error)
    359			return error;
    360	}
    361	return 0;
    362}
    363
    364static int acpi_generic_hotplug_event(struct acpi_device *adev, u32 type)
    365{
    366	switch (type) {
    367	case ACPI_NOTIFY_BUS_CHECK:
    368		return acpi_scan_bus_check(adev);
    369	case ACPI_NOTIFY_DEVICE_CHECK:
    370		return acpi_scan_device_check(adev);
    371	case ACPI_NOTIFY_EJECT_REQUEST:
    372	case ACPI_OST_EC_OSPM_EJECT:
    373		if (adev->handler && !adev->handler->hotplug.enabled) {
    374			dev_info(&adev->dev, "Eject disabled\n");
    375			return -EPERM;
    376		}
    377		acpi_evaluate_ost(adev->handle, ACPI_NOTIFY_EJECT_REQUEST,
    378				  ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
    379		return acpi_scan_hot_remove(adev);
    380	}
    381	return -EINVAL;
    382}
    383
    384void acpi_device_hotplug(struct acpi_device *adev, u32 src)
    385{
    386	u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
    387	int error = -ENODEV;
    388
    389	lock_device_hotplug();
    390	mutex_lock(&acpi_scan_lock);
    391
    392	/*
    393	 * The device object's ACPI handle cannot become invalid as long as we
    394	 * are holding acpi_scan_lock, but it might have become invalid before
    395	 * that lock was acquired.
    396	 */
    397	if (adev->handle == INVALID_ACPI_HANDLE)
    398		goto err_out;
    399
    400	if (adev->flags.is_dock_station) {
    401		error = dock_notify(adev, src);
    402	} else if (adev->flags.hotplug_notify) {
    403		error = acpi_generic_hotplug_event(adev, src);
    404	} else {
    405		int (*notify)(struct acpi_device *, u32);
    406
    407		acpi_lock_hp_context();
    408		notify = adev->hp ? adev->hp->notify : NULL;
    409		acpi_unlock_hp_context();
    410		/*
    411		 * There may be additional notify handlers for device objects
    412		 * without the .event() callback, so ignore them here.
    413		 */
    414		if (notify)
    415			error = notify(adev, src);
    416		else
    417			goto out;
    418	}
    419	switch (error) {
    420	case 0:
    421		ost_code = ACPI_OST_SC_SUCCESS;
    422		break;
    423	case -EPERM:
    424		ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
    425		break;
    426	case -EBUSY:
    427		ost_code = ACPI_OST_SC_DEVICE_BUSY;
    428		break;
    429	default:
    430		ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
    431		break;
    432	}
    433
    434 err_out:
    435	acpi_evaluate_ost(adev->handle, src, ost_code, NULL);
    436
    437 out:
    438	acpi_bus_put_acpi_device(adev);
    439	mutex_unlock(&acpi_scan_lock);
    440	unlock_device_hotplug();
    441}
    442
    443static void acpi_free_power_resources_lists(struct acpi_device *device)
    444{
    445	int i;
    446
    447	if (device->wakeup.flags.valid)
    448		acpi_power_resources_list_free(&device->wakeup.resources);
    449
    450	if (!device->power.flags.power_resources)
    451		return;
    452
    453	for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
    454		struct acpi_device_power_state *ps = &device->power.states[i];
    455		acpi_power_resources_list_free(&ps->resources);
    456	}
    457}
    458
    459static void acpi_device_release(struct device *dev)
    460{
    461	struct acpi_device *acpi_dev = to_acpi_device(dev);
    462
    463	acpi_free_properties(acpi_dev);
    464	acpi_free_pnp_ids(&acpi_dev->pnp);
    465	acpi_free_power_resources_lists(acpi_dev);
    466	kfree(acpi_dev);
    467}
    468
    469static void acpi_device_del(struct acpi_device *device)
    470{
    471	struct acpi_device_bus_id *acpi_device_bus_id;
    472
    473	mutex_lock(&acpi_device_lock);
    474	if (device->parent)
    475		list_del(&device->node);
    476
    477	list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node)
    478		if (!strcmp(acpi_device_bus_id->bus_id,
    479			    acpi_device_hid(device))) {
    480			ida_free(&acpi_device_bus_id->instance_ida,
    481				 device->pnp.instance_no);
    482			if (ida_is_empty(&acpi_device_bus_id->instance_ida)) {
    483				list_del(&acpi_device_bus_id->node);
    484				kfree_const(acpi_device_bus_id->bus_id);
    485				kfree(acpi_device_bus_id);
    486			}
    487			break;
    488		}
    489
    490	list_del(&device->wakeup_list);
    491	mutex_unlock(&acpi_device_lock);
    492
    493	acpi_power_add_remove_device(device, false);
    494	acpi_device_remove_files(device);
    495	if (device->remove)
    496		device->remove(device);
    497
    498	device_del(&device->dev);
    499}
    500
    501static BLOCKING_NOTIFIER_HEAD(acpi_reconfig_chain);
    502
    503static LIST_HEAD(acpi_device_del_list);
    504static DEFINE_MUTEX(acpi_device_del_lock);
    505
    506static void acpi_device_del_work_fn(struct work_struct *work_not_used)
    507{
    508	for (;;) {
    509		struct acpi_device *adev;
    510
    511		mutex_lock(&acpi_device_del_lock);
    512
    513		if (list_empty(&acpi_device_del_list)) {
    514			mutex_unlock(&acpi_device_del_lock);
    515			break;
    516		}
    517		adev = list_first_entry(&acpi_device_del_list,
    518					struct acpi_device, del_list);
    519		list_del(&adev->del_list);
    520
    521		mutex_unlock(&acpi_device_del_lock);
    522
    523		blocking_notifier_call_chain(&acpi_reconfig_chain,
    524					     ACPI_RECONFIG_DEVICE_REMOVE, adev);
    525
    526		acpi_device_del(adev);
    527		/*
    528		 * Drop references to all power resources that might have been
    529		 * used by the device.
    530		 */
    531		acpi_power_transition(adev, ACPI_STATE_D3_COLD);
    532		acpi_dev_put(adev);
    533	}
    534}
    535
    536/**
    537 * acpi_scan_drop_device - Drop an ACPI device object.
    538 * @handle: Handle of an ACPI namespace node, not used.
    539 * @context: Address of the ACPI device object to drop.
    540 *
    541 * This is invoked by acpi_ns_delete_node() during the removal of the ACPI
    542 * namespace node the device object pointed to by @context is attached to.
    543 *
    544 * The unregistration is carried out asynchronously to avoid running
    545 * acpi_device_del() under the ACPICA's namespace mutex and the list is used to
    546 * ensure the correct ordering (the device objects must be unregistered in the
    547 * same order in which the corresponding namespace nodes are deleted).
    548 */
    549static void acpi_scan_drop_device(acpi_handle handle, void *context)
    550{
    551	static DECLARE_WORK(work, acpi_device_del_work_fn);
    552	struct acpi_device *adev = context;
    553
    554	mutex_lock(&acpi_device_del_lock);
    555
    556	/*
    557	 * Use the ACPI hotplug workqueue which is ordered, so this work item
    558	 * won't run after any hotplug work items submitted subsequently.  That
    559	 * prevents attempts to register device objects identical to those being
    560	 * deleted from happening concurrently (such attempts result from
    561	 * hotplug events handled via the ACPI hotplug workqueue).  It also will
    562	 * run after all of the work items submitted previously, which helps
    563	 * those work items to ensure that they are not accessing stale device
    564	 * objects.
    565	 */
    566	if (list_empty(&acpi_device_del_list))
    567		acpi_queue_hotplug_work(&work);
    568
    569	list_add_tail(&adev->del_list, &acpi_device_del_list);
    570	/* Make acpi_ns_validate_handle() return NULL for this handle. */
    571	adev->handle = INVALID_ACPI_HANDLE;
    572
    573	mutex_unlock(&acpi_device_del_lock);
    574}
    575
    576static struct acpi_device *handle_to_device(acpi_handle handle,
    577					    void (*callback)(void *))
    578{
    579	struct acpi_device *adev = NULL;
    580	acpi_status status;
    581
    582	status = acpi_get_data_full(handle, acpi_scan_drop_device,
    583				    (void **)&adev, callback);
    584	if (ACPI_FAILURE(status) || !adev) {
    585		acpi_handle_debug(handle, "No context!\n");
    586		return NULL;
    587	}
    588	return adev;
    589}
    590
    591/**
    592 * acpi_fetch_acpi_dev - Retrieve ACPI device object.
    593 * @handle: ACPI handle associated with the requested ACPI device object.
    594 *
    595 * Return a pointer to the ACPI device object associated with @handle, if
    596 * present, or NULL otherwise.
    597 */
    598struct acpi_device *acpi_fetch_acpi_dev(acpi_handle handle)
    599{
    600	return handle_to_device(handle, NULL);
    601}
    602EXPORT_SYMBOL_GPL(acpi_fetch_acpi_dev);
    603
    604static void get_acpi_device(void *dev)
    605{
    606	acpi_dev_get(dev);
    607}
    608
    609struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle)
    610{
    611	return handle_to_device(handle, get_acpi_device);
    612}
    613EXPORT_SYMBOL_GPL(acpi_bus_get_acpi_device);
    614
    615static struct acpi_device_bus_id *acpi_device_bus_id_match(const char *dev_id)
    616{
    617	struct acpi_device_bus_id *acpi_device_bus_id;
    618
    619	/* Find suitable bus_id and instance number in acpi_bus_id_list. */
    620	list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) {
    621		if (!strcmp(acpi_device_bus_id->bus_id, dev_id))
    622			return acpi_device_bus_id;
    623	}
    624	return NULL;
    625}
    626
    627static int acpi_device_set_name(struct acpi_device *device,
    628				struct acpi_device_bus_id *acpi_device_bus_id)
    629{
    630	struct ida *instance_ida = &acpi_device_bus_id->instance_ida;
    631	int result;
    632
    633	result = ida_alloc(instance_ida, GFP_KERNEL);
    634	if (result < 0)
    635		return result;
    636
    637	device->pnp.instance_no = result;
    638	dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, result);
    639	return 0;
    640}
    641
    642static int acpi_tie_acpi_dev(struct acpi_device *adev)
    643{
    644	acpi_handle handle = adev->handle;
    645	acpi_status status;
    646
    647	if (!handle)
    648		return 0;
    649
    650	status = acpi_attach_data(handle, acpi_scan_drop_device, adev);
    651	if (ACPI_FAILURE(status)) {
    652		acpi_handle_err(handle, "Unable to attach device data\n");
    653		return -ENODEV;
    654	}
    655
    656	return 0;
    657}
    658
    659static void acpi_store_pld_crc(struct acpi_device *adev)
    660{
    661	struct acpi_pld_info *pld;
    662	acpi_status status;
    663
    664	status = acpi_get_physical_device_location(adev->handle, &pld);
    665	if (ACPI_FAILURE(status))
    666		return;
    667
    668	adev->pld_crc = crc32(~0, pld, sizeof(*pld));
    669	ACPI_FREE(pld);
    670}
    671
    672static int __acpi_device_add(struct acpi_device *device,
    673			     void (*release)(struct device *))
    674{
    675	struct acpi_device_bus_id *acpi_device_bus_id;
    676	int result;
    677
    678	/*
    679	 * Linkage
    680	 * -------
    681	 * Link this device to its parent and siblings.
    682	 */
    683	INIT_LIST_HEAD(&device->children);
    684	INIT_LIST_HEAD(&device->node);
    685	INIT_LIST_HEAD(&device->wakeup_list);
    686	INIT_LIST_HEAD(&device->physical_node_list);
    687	INIT_LIST_HEAD(&device->del_list);
    688	mutex_init(&device->physical_node_lock);
    689
    690	mutex_lock(&acpi_device_lock);
    691
    692	acpi_device_bus_id = acpi_device_bus_id_match(acpi_device_hid(device));
    693	if (acpi_device_bus_id) {
    694		result = acpi_device_set_name(device, acpi_device_bus_id);
    695		if (result)
    696			goto err_unlock;
    697	} else {
    698		acpi_device_bus_id = kzalloc(sizeof(*acpi_device_bus_id),
    699					     GFP_KERNEL);
    700		if (!acpi_device_bus_id) {
    701			result = -ENOMEM;
    702			goto err_unlock;
    703		}
    704		acpi_device_bus_id->bus_id =
    705			kstrdup_const(acpi_device_hid(device), GFP_KERNEL);
    706		if (!acpi_device_bus_id->bus_id) {
    707			kfree(acpi_device_bus_id);
    708			result = -ENOMEM;
    709			goto err_unlock;
    710		}
    711
    712		ida_init(&acpi_device_bus_id->instance_ida);
    713
    714		result = acpi_device_set_name(device, acpi_device_bus_id);
    715		if (result) {
    716			kfree_const(acpi_device_bus_id->bus_id);
    717			kfree(acpi_device_bus_id);
    718			goto err_unlock;
    719		}
    720
    721		list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
    722	}
    723
    724	if (device->parent)
    725		list_add_tail(&device->node, &device->parent->children);
    726
    727	if (device->wakeup.flags.valid)
    728		list_add_tail(&device->wakeup_list, &acpi_wakeup_device_list);
    729
    730	acpi_store_pld_crc(device);
    731
    732	mutex_unlock(&acpi_device_lock);
    733
    734	if (device->parent)
    735		device->dev.parent = &device->parent->dev;
    736
    737	device->dev.bus = &acpi_bus_type;
    738	device->dev.release = release;
    739	result = device_add(&device->dev);
    740	if (result) {
    741		dev_err(&device->dev, "Error registering device\n");
    742		goto err;
    743	}
    744
    745	result = acpi_device_setup_files(device);
    746	if (result)
    747		pr_err("Error creating sysfs interface for device %s\n",
    748		       dev_name(&device->dev));
    749
    750	return 0;
    751
    752err:
    753	mutex_lock(&acpi_device_lock);
    754
    755	if (device->parent)
    756		list_del(&device->node);
    757
    758	list_del(&device->wakeup_list);
    759
    760err_unlock:
    761	mutex_unlock(&acpi_device_lock);
    762
    763	acpi_detach_data(device->handle, acpi_scan_drop_device);
    764
    765	return result;
    766}
    767
    768int acpi_device_add(struct acpi_device *adev, void (*release)(struct device *))
    769{
    770	int ret;
    771
    772	ret = acpi_tie_acpi_dev(adev);
    773	if (ret)
    774		return ret;
    775
    776	return __acpi_device_add(adev, release);
    777}
    778
    779/* --------------------------------------------------------------------------
    780                                 Device Enumeration
    781   -------------------------------------------------------------------------- */
    782static bool acpi_info_matches_ids(struct acpi_device_info *info,
    783				  const char * const ids[])
    784{
    785	struct acpi_pnp_device_id_list *cid_list = NULL;
    786	int i, index;
    787
    788	if (!(info->valid & ACPI_VALID_HID))
    789		return false;
    790
    791	index = match_string(ids, -1, info->hardware_id.string);
    792	if (index >= 0)
    793		return true;
    794
    795	if (info->valid & ACPI_VALID_CID)
    796		cid_list = &info->compatible_id_list;
    797
    798	if (!cid_list)
    799		return false;
    800
    801	for (i = 0; i < cid_list->count; i++) {
    802		index = match_string(ids, -1, cid_list->ids[i].string);
    803		if (index >= 0)
    804			return true;
    805	}
    806
    807	return false;
    808}
    809
    810/* List of HIDs for which we ignore matching ACPI devices, when checking _DEP lists. */
    811static const char * const acpi_ignore_dep_ids[] = {
    812	"PNP0D80", /* Windows-compatible System Power Management Controller */
    813	"INT33BD", /* Intel Baytrail Mailbox Device */
    814	NULL
    815};
    816
    817/* List of HIDs for which we honor deps of matching ACPI devs, when checking _DEP lists. */
    818static const char * const acpi_honor_dep_ids[] = {
    819	"INT3472", /* Camera sensor PMIC / clk and regulator info */
    820	NULL
    821};
    822
    823static struct acpi_device *acpi_bus_get_parent(acpi_handle handle)
    824{
    825	struct acpi_device *device;
    826	acpi_status status;
    827
    828	/*
    829	 * Fixed hardware devices do not appear in the namespace and do not
    830	 * have handles, but we fabricate acpi_devices for them, so we have
    831	 * to deal with them specially.
    832	 */
    833	if (!handle)
    834		return acpi_root;
    835
    836	do {
    837		status = acpi_get_parent(handle, &handle);
    838		if (ACPI_FAILURE(status))
    839			return status == AE_NULL_ENTRY ? NULL : acpi_root;
    840
    841		device = acpi_fetch_acpi_dev(handle);
    842	} while (!device);
    843	return device;
    844}
    845
    846acpi_status
    847acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd)
    848{
    849	acpi_status status;
    850	acpi_handle tmp;
    851	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
    852	union acpi_object *obj;
    853
    854	status = acpi_get_handle(handle, "_EJD", &tmp);
    855	if (ACPI_FAILURE(status))
    856		return status;
    857
    858	status = acpi_evaluate_object(handle, "_EJD", NULL, &buffer);
    859	if (ACPI_SUCCESS(status)) {
    860		obj = buffer.pointer;
    861		status = acpi_get_handle(ACPI_ROOT_OBJECT, obj->string.pointer,
    862					 ejd);
    863		kfree(buffer.pointer);
    864	}
    865	return status;
    866}
    867EXPORT_SYMBOL_GPL(acpi_bus_get_ejd);
    868
    869static int acpi_bus_extract_wakeup_device_power_package(struct acpi_device *dev)
    870{
    871	acpi_handle handle = dev->handle;
    872	struct acpi_device_wakeup *wakeup = &dev->wakeup;
    873	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
    874	union acpi_object *package = NULL;
    875	union acpi_object *element = NULL;
    876	acpi_status status;
    877	int err = -ENODATA;
    878
    879	INIT_LIST_HEAD(&wakeup->resources);
    880
    881	/* _PRW */
    882	status = acpi_evaluate_object(handle, "_PRW", NULL, &buffer);
    883	if (ACPI_FAILURE(status)) {
    884		acpi_handle_info(handle, "_PRW evaluation failed: %s\n",
    885				 acpi_format_exception(status));
    886		return err;
    887	}
    888
    889	package = (union acpi_object *)buffer.pointer;
    890
    891	if (!package || package->package.count < 2)
    892		goto out;
    893
    894	element = &(package->package.elements[0]);
    895	if (!element)
    896		goto out;
    897
    898	if (element->type == ACPI_TYPE_PACKAGE) {
    899		if ((element->package.count < 2) ||
    900		    (element->package.elements[0].type !=
    901		     ACPI_TYPE_LOCAL_REFERENCE)
    902		    || (element->package.elements[1].type != ACPI_TYPE_INTEGER))
    903			goto out;
    904
    905		wakeup->gpe_device =
    906		    element->package.elements[0].reference.handle;
    907		wakeup->gpe_number =
    908		    (u32) element->package.elements[1].integer.value;
    909	} else if (element->type == ACPI_TYPE_INTEGER) {
    910		wakeup->gpe_device = NULL;
    911		wakeup->gpe_number = element->integer.value;
    912	} else {
    913		goto out;
    914	}
    915
    916	element = &(package->package.elements[1]);
    917	if (element->type != ACPI_TYPE_INTEGER)
    918		goto out;
    919
    920	wakeup->sleep_state = element->integer.value;
    921
    922	err = acpi_extract_power_resources(package, 2, &wakeup->resources);
    923	if (err)
    924		goto out;
    925
    926	if (!list_empty(&wakeup->resources)) {
    927		int sleep_state;
    928
    929		err = acpi_power_wakeup_list_init(&wakeup->resources,
    930						  &sleep_state);
    931		if (err) {
    932			acpi_handle_warn(handle, "Retrieving current states "
    933					 "of wakeup power resources failed\n");
    934			acpi_power_resources_list_free(&wakeup->resources);
    935			goto out;
    936		}
    937		if (sleep_state < wakeup->sleep_state) {
    938			acpi_handle_warn(handle, "Overriding _PRW sleep state "
    939					 "(S%d) by S%d from power resources\n",
    940					 (int)wakeup->sleep_state, sleep_state);
    941			wakeup->sleep_state = sleep_state;
    942		}
    943	}
    944
    945 out:
    946	kfree(buffer.pointer);
    947	return err;
    948}
    949
    950static bool acpi_wakeup_gpe_init(struct acpi_device *device)
    951{
    952	static const struct acpi_device_id button_device_ids[] = {
    953		{"PNP0C0C", 0},		/* Power button */
    954		{"PNP0C0D", 0},		/* Lid */
    955		{"PNP0C0E", 0},		/* Sleep button */
    956		{"", 0},
    957	};
    958	struct acpi_device_wakeup *wakeup = &device->wakeup;
    959	acpi_status status;
    960
    961	wakeup->flags.notifier_present = 0;
    962
    963	/* Power button, Lid switch always enable wakeup */
    964	if (!acpi_match_device_ids(device, button_device_ids)) {
    965		if (!acpi_match_device_ids(device, &button_device_ids[1])) {
    966			/* Do not use Lid/sleep button for S5 wakeup */
    967			if (wakeup->sleep_state == ACPI_STATE_S5)
    968				wakeup->sleep_state = ACPI_STATE_S4;
    969		}
    970		acpi_mark_gpe_for_wake(wakeup->gpe_device, wakeup->gpe_number);
    971		device_set_wakeup_capable(&device->dev, true);
    972		return true;
    973	}
    974
    975	status = acpi_setup_gpe_for_wake(device->handle, wakeup->gpe_device,
    976					 wakeup->gpe_number);
    977	return ACPI_SUCCESS(status);
    978}
    979
    980static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
    981{
    982	int err;
    983
    984	/* Presence of _PRW indicates wake capable */
    985	if (!acpi_has_method(device->handle, "_PRW"))
    986		return;
    987
    988	err = acpi_bus_extract_wakeup_device_power_package(device);
    989	if (err) {
    990		dev_err(&device->dev, "Unable to extract wakeup power resources");
    991		return;
    992	}
    993
    994	device->wakeup.flags.valid = acpi_wakeup_gpe_init(device);
    995	device->wakeup.prepare_count = 0;
    996	/*
    997	 * Call _PSW/_DSW object to disable its ability to wake the sleeping
    998	 * system for the ACPI device with the _PRW object.
    999	 * The _PSW object is deprecated in ACPI 3.0 and is replaced by _DSW.
   1000	 * So it is necessary to call _DSW object first. Only when it is not
   1001	 * present will the _PSW object used.
   1002	 */
   1003	err = acpi_device_sleep_wake(device, 0, 0, 0);
   1004	if (err)
   1005		pr_debug("error in _DSW or _PSW evaluation\n");
   1006}
   1007
   1008static void acpi_bus_init_power_state(struct acpi_device *device, int state)
   1009{
   1010	struct acpi_device_power_state *ps = &device->power.states[state];
   1011	char pathname[5] = { '_', 'P', 'R', '0' + state, '\0' };
   1012	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
   1013	acpi_status status;
   1014
   1015	INIT_LIST_HEAD(&ps->resources);
   1016
   1017	/* Evaluate "_PRx" to get referenced power resources */
   1018	status = acpi_evaluate_object(device->handle, pathname, NULL, &buffer);
   1019	if (ACPI_SUCCESS(status)) {
   1020		union acpi_object *package = buffer.pointer;
   1021
   1022		if (buffer.length && package
   1023		    && package->type == ACPI_TYPE_PACKAGE
   1024		    && package->package.count)
   1025			acpi_extract_power_resources(package, 0, &ps->resources);
   1026
   1027		ACPI_FREE(buffer.pointer);
   1028	}
   1029
   1030	/* Evaluate "_PSx" to see if we can do explicit sets */
   1031	pathname[2] = 'S';
   1032	if (acpi_has_method(device->handle, pathname))
   1033		ps->flags.explicit_set = 1;
   1034
   1035	/* State is valid if there are means to put the device into it. */
   1036	if (!list_empty(&ps->resources) || ps->flags.explicit_set)
   1037		ps->flags.valid = 1;
   1038
   1039	ps->power = -1;		/* Unknown - driver assigned */
   1040	ps->latency = -1;	/* Unknown - driver assigned */
   1041}
   1042
   1043static void acpi_bus_get_power_flags(struct acpi_device *device)
   1044{
   1045	unsigned long long dsc = ACPI_STATE_D0;
   1046	u32 i;
   1047
   1048	/* Presence of _PS0|_PR0 indicates 'power manageable' */
   1049	if (!acpi_has_method(device->handle, "_PS0") &&
   1050	    !acpi_has_method(device->handle, "_PR0"))
   1051		return;
   1052
   1053	device->flags.power_manageable = 1;
   1054
   1055	/*
   1056	 * Power Management Flags
   1057	 */
   1058	if (acpi_has_method(device->handle, "_PSC"))
   1059		device->power.flags.explicit_get = 1;
   1060
   1061	if (acpi_has_method(device->handle, "_IRC"))
   1062		device->power.flags.inrush_current = 1;
   1063
   1064	if (acpi_has_method(device->handle, "_DSW"))
   1065		device->power.flags.dsw_present = 1;
   1066
   1067	acpi_evaluate_integer(device->handle, "_DSC", NULL, &dsc);
   1068	device->power.state_for_enumeration = dsc;
   1069
   1070	/*
   1071	 * Enumerate supported power management states
   1072	 */
   1073	for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++)
   1074		acpi_bus_init_power_state(device, i);
   1075
   1076	INIT_LIST_HEAD(&device->power.states[ACPI_STATE_D3_COLD].resources);
   1077
   1078	/* Set the defaults for D0 and D3hot (always supported). */
   1079	device->power.states[ACPI_STATE_D0].flags.valid = 1;
   1080	device->power.states[ACPI_STATE_D0].power = 100;
   1081	device->power.states[ACPI_STATE_D3_HOT].flags.valid = 1;
   1082
   1083	/*
   1084	 * Use power resources only if the D0 list of them is populated, because
   1085	 * some platforms may provide _PR3 only to indicate D3cold support and
   1086	 * in those cases the power resources list returned by it may be bogus.
   1087	 */
   1088	if (!list_empty(&device->power.states[ACPI_STATE_D0].resources)) {
   1089		device->power.flags.power_resources = 1;
   1090		/*
   1091		 * D3cold is supported if the D3hot list of power resources is
   1092		 * not empty.
   1093		 */
   1094		if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources))
   1095			device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1;
   1096	}
   1097
   1098	if (acpi_bus_init_power(device))
   1099		device->flags.power_manageable = 0;
   1100}
   1101
   1102static void acpi_bus_get_flags(struct acpi_device *device)
   1103{
   1104	/* Presence of _STA indicates 'dynamic_status' */
   1105	if (acpi_has_method(device->handle, "_STA"))
   1106		device->flags.dynamic_status = 1;
   1107
   1108	/* Presence of _RMV indicates 'removable' */
   1109	if (acpi_has_method(device->handle, "_RMV"))
   1110		device->flags.removable = 1;
   1111
   1112	/* Presence of _EJD|_EJ0 indicates 'ejectable' */
   1113	if (acpi_has_method(device->handle, "_EJD") ||
   1114	    acpi_has_method(device->handle, "_EJ0"))
   1115		device->flags.ejectable = 1;
   1116}
   1117
   1118static void acpi_device_get_busid(struct acpi_device *device)
   1119{
   1120	char bus_id[5] = { '?', 0 };
   1121	struct acpi_buffer buffer = { sizeof(bus_id), bus_id };
   1122	int i = 0;
   1123
   1124	/*
   1125	 * Bus ID
   1126	 * ------
   1127	 * The device's Bus ID is simply the object name.
   1128	 * TBD: Shouldn't this value be unique (within the ACPI namespace)?
   1129	 */
   1130	if (ACPI_IS_ROOT_DEVICE(device)) {
   1131		strcpy(device->pnp.bus_id, "ACPI");
   1132		return;
   1133	}
   1134
   1135	switch (device->device_type) {
   1136	case ACPI_BUS_TYPE_POWER_BUTTON:
   1137		strcpy(device->pnp.bus_id, "PWRF");
   1138		break;
   1139	case ACPI_BUS_TYPE_SLEEP_BUTTON:
   1140		strcpy(device->pnp.bus_id, "SLPF");
   1141		break;
   1142	case ACPI_BUS_TYPE_ECDT_EC:
   1143		strcpy(device->pnp.bus_id, "ECDT");
   1144		break;
   1145	default:
   1146		acpi_get_name(device->handle, ACPI_SINGLE_NAME, &buffer);
   1147		/* Clean up trailing underscores (if any) */
   1148		for (i = 3; i > 1; i--) {
   1149			if (bus_id[i] == '_')
   1150				bus_id[i] = '\0';
   1151			else
   1152				break;
   1153		}
   1154		strcpy(device->pnp.bus_id, bus_id);
   1155		break;
   1156	}
   1157}
   1158
   1159/*
   1160 * acpi_ata_match - see if an acpi object is an ATA device
   1161 *
   1162 * If an acpi object has one of the ACPI ATA methods defined,
   1163 * then we can safely call it an ATA device.
   1164 */
   1165bool acpi_ata_match(acpi_handle handle)
   1166{
   1167	return acpi_has_method(handle, "_GTF") ||
   1168	       acpi_has_method(handle, "_GTM") ||
   1169	       acpi_has_method(handle, "_STM") ||
   1170	       acpi_has_method(handle, "_SDD");
   1171}
   1172
   1173/*
   1174 * acpi_bay_match - see if an acpi object is an ejectable driver bay
   1175 *
   1176 * If an acpi object is ejectable and has one of the ACPI ATA methods defined,
   1177 * then we can safely call it an ejectable drive bay
   1178 */
   1179bool acpi_bay_match(acpi_handle handle)
   1180{
   1181	acpi_handle phandle;
   1182
   1183	if (!acpi_has_method(handle, "_EJ0"))
   1184		return false;
   1185	if (acpi_ata_match(handle))
   1186		return true;
   1187	if (ACPI_FAILURE(acpi_get_parent(handle, &phandle)))
   1188		return false;
   1189
   1190	return acpi_ata_match(phandle);
   1191}
   1192
   1193bool acpi_device_is_battery(struct acpi_device *adev)
   1194{
   1195	struct acpi_hardware_id *hwid;
   1196
   1197	list_for_each_entry(hwid, &adev->pnp.ids, list)
   1198		if (!strcmp("PNP0C0A", hwid->id))
   1199			return true;
   1200
   1201	return false;
   1202}
   1203
   1204static bool is_ejectable_bay(struct acpi_device *adev)
   1205{
   1206	acpi_handle handle = adev->handle;
   1207
   1208	if (acpi_has_method(handle, "_EJ0") && acpi_device_is_battery(adev))
   1209		return true;
   1210
   1211	return acpi_bay_match(handle);
   1212}
   1213
   1214/*
   1215 * acpi_dock_match - see if an acpi object has a _DCK method
   1216 */
   1217bool acpi_dock_match(acpi_handle handle)
   1218{
   1219	return acpi_has_method(handle, "_DCK");
   1220}
   1221
   1222static acpi_status
   1223acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context,
   1224			  void **return_value)
   1225{
   1226	long *cap = context;
   1227
   1228	if (acpi_has_method(handle, "_BCM") &&
   1229	    acpi_has_method(handle, "_BCL")) {
   1230		acpi_handle_debug(handle, "Found generic backlight support\n");
   1231		*cap |= ACPI_VIDEO_BACKLIGHT;
   1232		/* We have backlight support, no need to scan further */
   1233		return AE_CTRL_TERMINATE;
   1234	}
   1235	return 0;
   1236}
   1237
   1238/* Returns true if the ACPI object is a video device which can be
   1239 * handled by video.ko.
   1240 * The device will get a Linux specific CID added in scan.c to
   1241 * identify the device as an ACPI graphics device
   1242 * Be aware that the graphics device may not be physically present
   1243 * Use acpi_video_get_capabilities() to detect general ACPI video
   1244 * capabilities of present cards
   1245 */
   1246long acpi_is_video_device(acpi_handle handle)
   1247{
   1248	long video_caps = 0;
   1249
   1250	/* Is this device able to support video switching ? */
   1251	if (acpi_has_method(handle, "_DOD") || acpi_has_method(handle, "_DOS"))
   1252		video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING;
   1253
   1254	/* Is this device able to retrieve a video ROM ? */
   1255	if (acpi_has_method(handle, "_ROM"))
   1256		video_caps |= ACPI_VIDEO_ROM_AVAILABLE;
   1257
   1258	/* Is this device able to configure which video head to be POSTed ? */
   1259	if (acpi_has_method(handle, "_VPO") &&
   1260	    acpi_has_method(handle, "_GPD") &&
   1261	    acpi_has_method(handle, "_SPD"))
   1262		video_caps |= ACPI_VIDEO_DEVICE_POSTING;
   1263
   1264	/* Only check for backlight functionality if one of the above hit. */
   1265	if (video_caps)
   1266		acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
   1267				    ACPI_UINT32_MAX, acpi_backlight_cap_match, NULL,
   1268				    &video_caps, NULL);
   1269
   1270	return video_caps;
   1271}
   1272EXPORT_SYMBOL(acpi_is_video_device);
   1273
   1274const char *acpi_device_hid(struct acpi_device *device)
   1275{
   1276	struct acpi_hardware_id *hid;
   1277
   1278	if (list_empty(&device->pnp.ids))
   1279		return dummy_hid;
   1280
   1281	hid = list_first_entry(&device->pnp.ids, struct acpi_hardware_id, list);
   1282	return hid->id;
   1283}
   1284EXPORT_SYMBOL(acpi_device_hid);
   1285
   1286static void acpi_add_id(struct acpi_device_pnp *pnp, const char *dev_id)
   1287{
   1288	struct acpi_hardware_id *id;
   1289
   1290	id = kmalloc(sizeof(*id), GFP_KERNEL);
   1291	if (!id)
   1292		return;
   1293
   1294	id->id = kstrdup_const(dev_id, GFP_KERNEL);
   1295	if (!id->id) {
   1296		kfree(id);
   1297		return;
   1298	}
   1299
   1300	list_add_tail(&id->list, &pnp->ids);
   1301	pnp->type.hardware_id = 1;
   1302}
   1303
   1304/*
   1305 * Old IBM workstations have a DSDT bug wherein the SMBus object
   1306 * lacks the SMBUS01 HID and the methods do not have the necessary "_"
   1307 * prefix.  Work around this.
   1308 */
   1309static bool acpi_ibm_smbus_match(acpi_handle handle)
   1310{
   1311	char node_name[ACPI_PATH_SEGMENT_LENGTH];
   1312	struct acpi_buffer path = { sizeof(node_name), node_name };
   1313
   1314	if (!dmi_name_in_vendors("IBM"))
   1315		return false;
   1316
   1317	/* Look for SMBS object */
   1318	if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &path)) ||
   1319	    strcmp("SMBS", path.pointer))
   1320		return false;
   1321
   1322	/* Does it have the necessary (but misnamed) methods? */
   1323	if (acpi_has_method(handle, "SBI") &&
   1324	    acpi_has_method(handle, "SBR") &&
   1325	    acpi_has_method(handle, "SBW"))
   1326		return true;
   1327
   1328	return false;
   1329}
   1330
   1331static bool acpi_object_is_system_bus(acpi_handle handle)
   1332{
   1333	acpi_handle tmp;
   1334
   1335	if (ACPI_SUCCESS(acpi_get_handle(NULL, "\\_SB", &tmp)) &&
   1336	    tmp == handle)
   1337		return true;
   1338	if (ACPI_SUCCESS(acpi_get_handle(NULL, "\\_TZ", &tmp)) &&
   1339	    tmp == handle)
   1340		return true;
   1341
   1342	return false;
   1343}
   1344
   1345static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
   1346			     int device_type)
   1347{
   1348	struct acpi_device_info *info = NULL;
   1349	struct acpi_pnp_device_id_list *cid_list;
   1350	int i;
   1351
   1352	switch (device_type) {
   1353	case ACPI_BUS_TYPE_DEVICE:
   1354		if (handle == ACPI_ROOT_OBJECT) {
   1355			acpi_add_id(pnp, ACPI_SYSTEM_HID);
   1356			break;
   1357		}
   1358
   1359		acpi_get_object_info(handle, &info);
   1360		if (!info) {
   1361			pr_err("%s: Error reading device info\n", __func__);
   1362			return;
   1363		}
   1364
   1365		if (info->valid & ACPI_VALID_HID) {
   1366			acpi_add_id(pnp, info->hardware_id.string);
   1367			pnp->type.platform_id = 1;
   1368		}
   1369		if (info->valid & ACPI_VALID_CID) {
   1370			cid_list = &info->compatible_id_list;
   1371			for (i = 0; i < cid_list->count; i++)
   1372				acpi_add_id(pnp, cid_list->ids[i].string);
   1373		}
   1374		if (info->valid & ACPI_VALID_ADR) {
   1375			pnp->bus_address = info->address;
   1376			pnp->type.bus_address = 1;
   1377		}
   1378		if (info->valid & ACPI_VALID_UID)
   1379			pnp->unique_id = kstrdup(info->unique_id.string,
   1380							GFP_KERNEL);
   1381		if (info->valid & ACPI_VALID_CLS)
   1382			acpi_add_id(pnp, info->class_code.string);
   1383
   1384		kfree(info);
   1385
   1386		/*
   1387		 * Some devices don't reliably have _HIDs & _CIDs, so add
   1388		 * synthetic HIDs to make sure drivers can find them.
   1389		 */
   1390		if (acpi_is_video_device(handle))
   1391			acpi_add_id(pnp, ACPI_VIDEO_HID);
   1392		else if (acpi_bay_match(handle))
   1393			acpi_add_id(pnp, ACPI_BAY_HID);
   1394		else if (acpi_dock_match(handle))
   1395			acpi_add_id(pnp, ACPI_DOCK_HID);
   1396		else if (acpi_ibm_smbus_match(handle))
   1397			acpi_add_id(pnp, ACPI_SMBUS_IBM_HID);
   1398		else if (list_empty(&pnp->ids) &&
   1399			 acpi_object_is_system_bus(handle)) {
   1400			/* \_SB, \_TZ, LNXSYBUS */
   1401			acpi_add_id(pnp, ACPI_BUS_HID);
   1402			strcpy(pnp->device_name, ACPI_BUS_DEVICE_NAME);
   1403			strcpy(pnp->device_class, ACPI_BUS_CLASS);
   1404		}
   1405
   1406		break;
   1407	case ACPI_BUS_TYPE_POWER:
   1408		acpi_add_id(pnp, ACPI_POWER_HID);
   1409		break;
   1410	case ACPI_BUS_TYPE_PROCESSOR:
   1411		acpi_add_id(pnp, ACPI_PROCESSOR_OBJECT_HID);
   1412		break;
   1413	case ACPI_BUS_TYPE_THERMAL:
   1414		acpi_add_id(pnp, ACPI_THERMAL_HID);
   1415		break;
   1416	case ACPI_BUS_TYPE_POWER_BUTTON:
   1417		acpi_add_id(pnp, ACPI_BUTTON_HID_POWERF);
   1418		break;
   1419	case ACPI_BUS_TYPE_SLEEP_BUTTON:
   1420		acpi_add_id(pnp, ACPI_BUTTON_HID_SLEEPF);
   1421		break;
   1422	case ACPI_BUS_TYPE_ECDT_EC:
   1423		acpi_add_id(pnp, ACPI_ECDT_HID);
   1424		break;
   1425	}
   1426}
   1427
   1428void acpi_free_pnp_ids(struct acpi_device_pnp *pnp)
   1429{
   1430	struct acpi_hardware_id *id, *tmp;
   1431
   1432	list_for_each_entry_safe(id, tmp, &pnp->ids, list) {
   1433		kfree_const(id->id);
   1434		kfree(id);
   1435	}
   1436	kfree(pnp->unique_id);
   1437}
   1438
   1439/**
   1440 * acpi_dma_supported - Check DMA support for the specified device.
   1441 * @adev: The pointer to acpi device
   1442 *
   1443 * Return false if DMA is not supported. Otherwise, return true
   1444 */
   1445bool acpi_dma_supported(const struct acpi_device *adev)
   1446{
   1447	if (!adev)
   1448		return false;
   1449
   1450	if (adev->flags.cca_seen)
   1451		return true;
   1452
   1453	/*
   1454	* Per ACPI 6.0 sec 6.2.17, assume devices can do cache-coherent
   1455	* DMA on "Intel platforms".  Presumably that includes all x86 and
   1456	* ia64, and other arches will set CONFIG_ACPI_CCA_REQUIRED=y.
   1457	*/
   1458	if (!IS_ENABLED(CONFIG_ACPI_CCA_REQUIRED))
   1459		return true;
   1460
   1461	return false;
   1462}
   1463
   1464/**
   1465 * acpi_get_dma_attr - Check the supported DMA attr for the specified device.
   1466 * @adev: The pointer to acpi device
   1467 *
   1468 * Return enum dev_dma_attr.
   1469 */
   1470enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
   1471{
   1472	if (!acpi_dma_supported(adev))
   1473		return DEV_DMA_NOT_SUPPORTED;
   1474
   1475	if (adev->flags.coherent_dma)
   1476		return DEV_DMA_COHERENT;
   1477	else
   1478		return DEV_DMA_NON_COHERENT;
   1479}
   1480
   1481/**
   1482 * acpi_dma_get_range() - Get device DMA parameters.
   1483 *
   1484 * @dev: device to configure
   1485 * @dma_addr: pointer device DMA address result
   1486 * @offset: pointer to the DMA offset result
   1487 * @size: pointer to DMA range size result
   1488 *
   1489 * Evaluate DMA regions and return respectively DMA region start, offset
   1490 * and size in dma_addr, offset and size on parsing success; it does not
   1491 * update the passed in values on failure.
   1492 *
   1493 * Return 0 on success, < 0 on failure.
   1494 */
   1495int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset,
   1496		       u64 *size)
   1497{
   1498	struct acpi_device *adev;
   1499	LIST_HEAD(list);
   1500	struct resource_entry *rentry;
   1501	int ret;
   1502	struct device *dma_dev = dev;
   1503	u64 len, dma_start = U64_MAX, dma_end = 0, dma_offset = 0;
   1504
   1505	/*
   1506	 * Walk the device tree chasing an ACPI companion with a _DMA
   1507	 * object while we go. Stop if we find a device with an ACPI
   1508	 * companion containing a _DMA method.
   1509	 */
   1510	do {
   1511		adev = ACPI_COMPANION(dma_dev);
   1512		if (adev && acpi_has_method(adev->handle, METHOD_NAME__DMA))
   1513			break;
   1514
   1515		dma_dev = dma_dev->parent;
   1516	} while (dma_dev);
   1517
   1518	if (!dma_dev)
   1519		return -ENODEV;
   1520
   1521	if (!acpi_has_method(adev->handle, METHOD_NAME__CRS)) {
   1522		acpi_handle_warn(adev->handle, "_DMA is valid only if _CRS is present\n");
   1523		return -EINVAL;
   1524	}
   1525
   1526	ret = acpi_dev_get_dma_resources(adev, &list);
   1527	if (ret > 0) {
   1528		list_for_each_entry(rentry, &list, node) {
   1529			if (dma_offset && rentry->offset != dma_offset) {
   1530				ret = -EINVAL;
   1531				dev_warn(dma_dev, "Can't handle multiple windows with different offsets\n");
   1532				goto out;
   1533			}
   1534			dma_offset = rentry->offset;
   1535
   1536			/* Take lower and upper limits */
   1537			if (rentry->res->start < dma_start)
   1538				dma_start = rentry->res->start;
   1539			if (rentry->res->end > dma_end)
   1540				dma_end = rentry->res->end;
   1541		}
   1542
   1543		if (dma_start >= dma_end) {
   1544			ret = -EINVAL;
   1545			dev_dbg(dma_dev, "Invalid DMA regions configuration\n");
   1546			goto out;
   1547		}
   1548
   1549		*dma_addr = dma_start - dma_offset;
   1550		len = dma_end - dma_start;
   1551		*size = max(len, len + 1);
   1552		*offset = dma_offset;
   1553	}
   1554 out:
   1555	acpi_dev_free_resource_list(&list);
   1556
   1557	return ret >= 0 ? 0 : ret;
   1558}
   1559
   1560#ifdef CONFIG_IOMMU_API
   1561int acpi_iommu_fwspec_init(struct device *dev, u32 id,
   1562			   struct fwnode_handle *fwnode,
   1563			   const struct iommu_ops *ops)
   1564{
   1565	int ret = iommu_fwspec_init(dev, fwnode, ops);
   1566
   1567	if (!ret)
   1568		ret = iommu_fwspec_add_ids(dev, &id, 1);
   1569
   1570	return ret;
   1571}
   1572
   1573static inline const struct iommu_ops *acpi_iommu_fwspec_ops(struct device *dev)
   1574{
   1575	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
   1576
   1577	return fwspec ? fwspec->ops : NULL;
   1578}
   1579
   1580static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev,
   1581						       const u32 *id_in)
   1582{
   1583	int err;
   1584	const struct iommu_ops *ops;
   1585
   1586	/*
   1587	 * If we already translated the fwspec there is nothing left to do,
   1588	 * return the iommu_ops.
   1589	 */
   1590	ops = acpi_iommu_fwspec_ops(dev);
   1591	if (ops)
   1592		return ops;
   1593
   1594	err = iort_iommu_configure_id(dev, id_in);
   1595	if (err && err != -EPROBE_DEFER)
   1596		err = viot_iommu_configure(dev);
   1597
   1598	/*
   1599	 * If we have reason to believe the IOMMU driver missed the initial
   1600	 * iommu_probe_device() call for dev, replay it to get things in order.
   1601	 */
   1602	if (!err && dev->bus && !device_iommu_mapped(dev))
   1603		err = iommu_probe_device(dev);
   1604
   1605	/* Ignore all other errors apart from EPROBE_DEFER */
   1606	if (err == -EPROBE_DEFER) {
   1607		return ERR_PTR(err);
   1608	} else if (err) {
   1609		dev_dbg(dev, "Adding to IOMMU failed: %d\n", err);
   1610		return NULL;
   1611	}
   1612	return acpi_iommu_fwspec_ops(dev);
   1613}
   1614
   1615#else /* !CONFIG_IOMMU_API */
   1616
   1617int acpi_iommu_fwspec_init(struct device *dev, u32 id,
   1618			   struct fwnode_handle *fwnode,
   1619			   const struct iommu_ops *ops)
   1620{
   1621	return -ENODEV;
   1622}
   1623
   1624static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev,
   1625						       const u32 *id_in)
   1626{
   1627	return NULL;
   1628}
   1629
   1630#endif /* !CONFIG_IOMMU_API */
   1631
   1632/**
   1633 * acpi_dma_configure_id - Set-up DMA configuration for the device.
   1634 * @dev: The pointer to the device
   1635 * @attr: device dma attributes
   1636 * @input_id: input device id const value pointer
   1637 */
   1638int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
   1639			  const u32 *input_id)
   1640{
   1641	const struct iommu_ops *iommu;
   1642	u64 dma_addr = 0, size = 0;
   1643
   1644	if (attr == DEV_DMA_NOT_SUPPORTED) {
   1645		set_dma_ops(dev, &dma_dummy_ops);
   1646		return 0;
   1647	}
   1648
   1649	acpi_arch_dma_setup(dev, &dma_addr, &size);
   1650
   1651	iommu = acpi_iommu_configure_id(dev, input_id);
   1652	if (PTR_ERR(iommu) == -EPROBE_DEFER)
   1653		return -EPROBE_DEFER;
   1654
   1655	arch_setup_dma_ops(dev, dma_addr, size,
   1656				iommu, attr == DEV_DMA_COHERENT);
   1657
   1658	return 0;
   1659}
   1660EXPORT_SYMBOL_GPL(acpi_dma_configure_id);
   1661
   1662static void acpi_init_coherency(struct acpi_device *adev)
   1663{
   1664	unsigned long long cca = 0;
   1665	acpi_status status;
   1666	struct acpi_device *parent = adev->parent;
   1667
   1668	if (parent && parent->flags.cca_seen) {
   1669		/*
   1670		 * From ACPI spec, OSPM will ignore _CCA if an ancestor
   1671		 * already saw one.
   1672		 */
   1673		adev->flags.cca_seen = 1;
   1674		cca = parent->flags.coherent_dma;
   1675	} else {
   1676		status = acpi_evaluate_integer(adev->handle, "_CCA",
   1677					       NULL, &cca);
   1678		if (ACPI_SUCCESS(status))
   1679			adev->flags.cca_seen = 1;
   1680		else if (!IS_ENABLED(CONFIG_ACPI_CCA_REQUIRED))
   1681			/*
   1682			 * If architecture does not specify that _CCA is
   1683			 * required for DMA-able devices (e.g. x86),
   1684			 * we default to _CCA=1.
   1685			 */
   1686			cca = 1;
   1687		else
   1688			acpi_handle_debug(adev->handle,
   1689					  "ACPI device is missing _CCA.\n");
   1690	}
   1691
   1692	adev->flags.coherent_dma = cca;
   1693}
   1694
   1695static int acpi_check_serial_bus_slave(struct acpi_resource *ares, void *data)
   1696{
   1697	bool *is_serial_bus_slave_p = data;
   1698
   1699	if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
   1700		return 1;
   1701
   1702	*is_serial_bus_slave_p = true;
   1703
   1704	 /* no need to do more checking */
   1705	return -1;
   1706}
   1707
   1708static bool acpi_is_indirect_io_slave(struct acpi_device *device)
   1709{
   1710	struct acpi_device *parent = device->parent;
   1711	static const struct acpi_device_id indirect_io_hosts[] = {
   1712		{"HISI0191", 0},
   1713		{}
   1714	};
   1715
   1716	return parent && !acpi_match_device_ids(parent, indirect_io_hosts);
   1717}
   1718
   1719static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
   1720{
   1721	struct list_head resource_list;
   1722	bool is_serial_bus_slave = false;
   1723	static const struct acpi_device_id ignore_serial_bus_ids[] = {
   1724	/*
   1725	 * These devices have multiple SerialBus resources and a client
   1726	 * device must be instantiated for each of them, each with
   1727	 * its own device id.
   1728	 * Normally we only instantiate one client device for the first
   1729	 * resource, using the ACPI HID as id. These special cases are handled
   1730	 * by the drivers/platform/x86/serial-multi-instantiate.c driver, which
   1731	 * knows which client device id to use for each resource.
   1732	 */
   1733		{"BSG1160", },
   1734		{"BSG2150", },
   1735		{"CSC3551", },
   1736		{"INT33FE", },
   1737		{"INT3515", },
   1738		/* Non-conforming _HID for Cirrus Logic already released */
   1739		{"CLSA0100", },
   1740	/*
   1741	 * Some ACPI devs contain SerialBus resources even though they are not
   1742	 * attached to a serial bus at all.
   1743	 */
   1744		{"MSHW0028", },
   1745	/*
   1746	 * HIDs of device with an UartSerialBusV2 resource for which userspace
   1747	 * expects a regular tty cdev to be created (instead of the in kernel
   1748	 * serdev) and which have a kernel driver which expects a platform_dev
   1749	 * such as the rfkill-gpio driver.
   1750	 */
   1751		{"BCM4752", },
   1752		{"LNV4752", },
   1753		{}
   1754	};
   1755
   1756	if (acpi_is_indirect_io_slave(device))
   1757		return true;
   1758
   1759	/* Macs use device properties in lieu of _CRS resources */
   1760	if (x86_apple_machine &&
   1761	    (fwnode_property_present(&device->fwnode, "spiSclkPeriod") ||
   1762	     fwnode_property_present(&device->fwnode, "i2cAddress") ||
   1763	     fwnode_property_present(&device->fwnode, "baud")))
   1764		return true;
   1765
   1766	if (!acpi_match_device_ids(device, ignore_serial_bus_ids))
   1767		return false;
   1768
   1769	INIT_LIST_HEAD(&resource_list);
   1770	acpi_dev_get_resources(device, &resource_list,
   1771			       acpi_check_serial_bus_slave,
   1772			       &is_serial_bus_slave);
   1773	acpi_dev_free_resource_list(&resource_list);
   1774
   1775	return is_serial_bus_slave;
   1776}
   1777
   1778void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
   1779			     int type)
   1780{
   1781	INIT_LIST_HEAD(&device->pnp.ids);
   1782	device->device_type = type;
   1783	device->handle = handle;
   1784	device->parent = acpi_bus_get_parent(handle);
   1785	fwnode_init(&device->fwnode, &acpi_device_fwnode_ops);
   1786	acpi_set_device_status(device, ACPI_STA_DEFAULT);
   1787	acpi_device_get_busid(device);
   1788	acpi_set_pnp_ids(handle, &device->pnp, type);
   1789	acpi_init_properties(device);
   1790	acpi_bus_get_flags(device);
   1791	device->flags.match_driver = false;
   1792	device->flags.initialized = true;
   1793	device->flags.enumeration_by_parent =
   1794		acpi_device_enumeration_by_parent(device);
   1795	acpi_device_clear_enumerated(device);
   1796	device_initialize(&device->dev);
   1797	dev_set_uevent_suppress(&device->dev, true);
   1798	acpi_init_coherency(device);
   1799}
   1800
   1801static void acpi_scan_dep_init(struct acpi_device *adev)
   1802{
   1803	struct acpi_dep_data *dep;
   1804
   1805	list_for_each_entry(dep, &acpi_dep_list, node) {
   1806		if (dep->consumer == adev->handle) {
   1807			if (dep->honor_dep)
   1808				adev->flags.honor_deps = 1;
   1809
   1810			adev->dep_unmet++;
   1811		}
   1812	}
   1813}
   1814
   1815void acpi_device_add_finalize(struct acpi_device *device)
   1816{
   1817	dev_set_uevent_suppress(&device->dev, false);
   1818	kobject_uevent(&device->dev.kobj, KOBJ_ADD);
   1819}
   1820
   1821static void acpi_scan_init_status(struct acpi_device *adev)
   1822{
   1823	if (acpi_bus_get_status(adev))
   1824		acpi_set_device_status(adev, 0);
   1825}
   1826
   1827static int acpi_add_single_object(struct acpi_device **child,
   1828				  acpi_handle handle, int type, bool dep_init)
   1829{
   1830	struct acpi_device *device;
   1831	bool release_dep_lock = false;
   1832	int result;
   1833
   1834	device = kzalloc(sizeof(struct acpi_device), GFP_KERNEL);
   1835	if (!device)
   1836		return -ENOMEM;
   1837
   1838	acpi_init_device_object(device, handle, type);
   1839	/*
   1840	 * Getting the status is delayed till here so that we can call
   1841	 * acpi_bus_get_status() and use its quirk handling.  Note that
   1842	 * this must be done before the get power-/wakeup_dev-flags calls.
   1843	 */
   1844	if (type == ACPI_BUS_TYPE_DEVICE || type == ACPI_BUS_TYPE_PROCESSOR) {
   1845		if (dep_init) {
   1846			mutex_lock(&acpi_dep_list_lock);
   1847			/*
   1848			 * Hold the lock until the acpi_tie_acpi_dev() call
   1849			 * below to prevent concurrent acpi_scan_clear_dep()
   1850			 * from deleting a dependency list entry without
   1851			 * updating dep_unmet for the device.
   1852			 */
   1853			release_dep_lock = true;
   1854			acpi_scan_dep_init(device);
   1855		}
   1856		acpi_scan_init_status(device);
   1857	}
   1858
   1859	acpi_bus_get_power_flags(device);
   1860	acpi_bus_get_wakeup_device_flags(device);
   1861
   1862	result = acpi_tie_acpi_dev(device);
   1863
   1864	if (release_dep_lock)
   1865		mutex_unlock(&acpi_dep_list_lock);
   1866
   1867	if (!result)
   1868		result = __acpi_device_add(device, acpi_device_release);
   1869
   1870	if (result) {
   1871		acpi_device_release(&device->dev);
   1872		return result;
   1873	}
   1874
   1875	acpi_power_add_remove_device(device, true);
   1876	acpi_device_add_finalize(device);
   1877
   1878	acpi_handle_debug(handle, "Added as %s, parent %s\n",
   1879			  dev_name(&device->dev), device->parent ?
   1880				dev_name(&device->parent->dev) : "(null)");
   1881
   1882	*child = device;
   1883	return 0;
   1884}
   1885
   1886static acpi_status acpi_get_resource_memory(struct acpi_resource *ares,
   1887					    void *context)
   1888{
   1889	struct resource *res = context;
   1890
   1891	if (acpi_dev_resource_memory(ares, res))
   1892		return AE_CTRL_TERMINATE;
   1893
   1894	return AE_OK;
   1895}
   1896
   1897static bool acpi_device_should_be_hidden(acpi_handle handle)
   1898{
   1899	acpi_status status;
   1900	struct resource res;
   1901
   1902	/* Check if it should ignore the UART device */
   1903	if (!(spcr_uart_addr && acpi_has_method(handle, METHOD_NAME__CRS)))
   1904		return false;
   1905
   1906	/*
   1907	 * The UART device described in SPCR table is assumed to have only one
   1908	 * memory resource present. So we only look for the first one here.
   1909	 */
   1910	status = acpi_walk_resources(handle, METHOD_NAME__CRS,
   1911				     acpi_get_resource_memory, &res);
   1912	if (ACPI_FAILURE(status) || res.start != spcr_uart_addr)
   1913		return false;
   1914
   1915	acpi_handle_info(handle, "The UART device @%pa in SPCR table will be hidden\n",
   1916			 &res.start);
   1917
   1918	return true;
   1919}
   1920
   1921bool acpi_device_is_present(const struct acpi_device *adev)
   1922{
   1923	return adev->status.present || adev->status.functional;
   1924}
   1925
   1926static bool acpi_scan_handler_matching(struct acpi_scan_handler *handler,
   1927				       const char *idstr,
   1928				       const struct acpi_device_id **matchid)
   1929{
   1930	const struct acpi_device_id *devid;
   1931
   1932	if (handler->match)
   1933		return handler->match(idstr, matchid);
   1934
   1935	for (devid = handler->ids; devid->id[0]; devid++)
   1936		if (!strcmp((char *)devid->id, idstr)) {
   1937			if (matchid)
   1938				*matchid = devid;
   1939
   1940			return true;
   1941		}
   1942
   1943	return false;
   1944}
   1945
   1946static struct acpi_scan_handler *acpi_scan_match_handler(const char *idstr,
   1947					const struct acpi_device_id **matchid)
   1948{
   1949	struct acpi_scan_handler *handler;
   1950
   1951	list_for_each_entry(handler, &acpi_scan_handlers_list, list_node)
   1952		if (acpi_scan_handler_matching(handler, idstr, matchid))
   1953			return handler;
   1954
   1955	return NULL;
   1956}
   1957
   1958void acpi_scan_hotplug_enabled(struct acpi_hotplug_profile *hotplug, bool val)
   1959{
   1960	if (!!hotplug->enabled == !!val)
   1961		return;
   1962
   1963	mutex_lock(&acpi_scan_lock);
   1964
   1965	hotplug->enabled = val;
   1966
   1967	mutex_unlock(&acpi_scan_lock);
   1968}
   1969
   1970static void acpi_scan_init_hotplug(struct acpi_device *adev)
   1971{
   1972	struct acpi_hardware_id *hwid;
   1973
   1974	if (acpi_dock_match(adev->handle) || is_ejectable_bay(adev)) {
   1975		acpi_dock_add(adev);
   1976		return;
   1977	}
   1978	list_for_each_entry(hwid, &adev->pnp.ids, list) {
   1979		struct acpi_scan_handler *handler;
   1980
   1981		handler = acpi_scan_match_handler(hwid->id, NULL);
   1982		if (handler) {
   1983			adev->flags.hotplug_notify = true;
   1984			break;
   1985		}
   1986	}
   1987}
   1988
   1989static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep)
   1990{
   1991	struct acpi_handle_list dep_devices;
   1992	acpi_status status;
   1993	u32 count;
   1994	int i;
   1995
   1996	/*
   1997	 * Check for _HID here to avoid deferring the enumeration of:
   1998	 * 1. PCI devices.
   1999	 * 2. ACPI nodes describing USB ports.
   2000	 * Still, checking for _HID catches more then just these cases ...
   2001	 */
   2002	if (!check_dep || !acpi_has_method(handle, "_DEP") ||
   2003	    !acpi_has_method(handle, "_HID"))
   2004		return 0;
   2005
   2006	status = acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices);
   2007	if (ACPI_FAILURE(status)) {
   2008		acpi_handle_debug(handle, "Failed to evaluate _DEP.\n");
   2009		return 0;
   2010	}
   2011
   2012	for (count = 0, i = 0; i < dep_devices.count; i++) {
   2013		struct acpi_device_info *info;
   2014		struct acpi_dep_data *dep;
   2015		bool skip, honor_dep;
   2016
   2017		status = acpi_get_object_info(dep_devices.handles[i], &info);
   2018		if (ACPI_FAILURE(status)) {
   2019			acpi_handle_debug(handle, "Error reading _DEP device info\n");
   2020			continue;
   2021		}
   2022
   2023		skip = acpi_info_matches_ids(info, acpi_ignore_dep_ids);
   2024		honor_dep = acpi_info_matches_ids(info, acpi_honor_dep_ids);
   2025		kfree(info);
   2026
   2027		if (skip)
   2028			continue;
   2029
   2030		dep = kzalloc(sizeof(*dep), GFP_KERNEL);
   2031		if (!dep)
   2032			continue;
   2033
   2034		count++;
   2035
   2036		dep->supplier = dep_devices.handles[i];
   2037		dep->consumer = handle;
   2038		dep->honor_dep = honor_dep;
   2039
   2040		mutex_lock(&acpi_dep_list_lock);
   2041		list_add_tail(&dep->node , &acpi_dep_list);
   2042		mutex_unlock(&acpi_dep_list_lock);
   2043	}
   2044
   2045	return count;
   2046}
   2047
   2048static bool acpi_bus_scan_second_pass;
   2049
   2050static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep,
   2051				      struct acpi_device **adev_p)
   2052{
   2053	struct acpi_device *device = acpi_fetch_acpi_dev(handle);
   2054	acpi_object_type acpi_type;
   2055	int type;
   2056
   2057	if (device)
   2058		goto out;
   2059
   2060	if (ACPI_FAILURE(acpi_get_type(handle, &acpi_type)))
   2061		return AE_OK;
   2062
   2063	switch (acpi_type) {
   2064	case ACPI_TYPE_DEVICE:
   2065		if (acpi_device_should_be_hidden(handle))
   2066			return AE_OK;
   2067
   2068		/* Bail out if there are dependencies. */
   2069		if (acpi_scan_check_dep(handle, check_dep) > 0) {
   2070			acpi_bus_scan_second_pass = true;
   2071			return AE_CTRL_DEPTH;
   2072		}
   2073
   2074		fallthrough;
   2075	case ACPI_TYPE_ANY:	/* for ACPI_ROOT_OBJECT */
   2076		type = ACPI_BUS_TYPE_DEVICE;
   2077		break;
   2078
   2079	case ACPI_TYPE_PROCESSOR:
   2080		type = ACPI_BUS_TYPE_PROCESSOR;
   2081		break;
   2082
   2083	case ACPI_TYPE_THERMAL:
   2084		type = ACPI_BUS_TYPE_THERMAL;
   2085		break;
   2086
   2087	case ACPI_TYPE_POWER:
   2088		acpi_add_power_resource(handle);
   2089		fallthrough;
   2090	default:
   2091		return AE_OK;
   2092	}
   2093
   2094	/*
   2095	 * If check_dep is true at this point, the device has no dependencies,
   2096	 * or the creation of the device object would have been postponed above.
   2097	 */
   2098	acpi_add_single_object(&device, handle, type, !check_dep);
   2099	if (!device)
   2100		return AE_CTRL_DEPTH;
   2101
   2102	acpi_scan_init_hotplug(device);
   2103
   2104out:
   2105	if (!*adev_p)
   2106		*adev_p = device;
   2107
   2108	return AE_OK;
   2109}
   2110
   2111static acpi_status acpi_bus_check_add_1(acpi_handle handle, u32 lvl_not_used,
   2112					void *not_used, void **ret_p)
   2113{
   2114	return acpi_bus_check_add(handle, true, (struct acpi_device **)ret_p);
   2115}
   2116
   2117static acpi_status acpi_bus_check_add_2(acpi_handle handle, u32 lvl_not_used,
   2118					void *not_used, void **ret_p)
   2119{
   2120	return acpi_bus_check_add(handle, false, (struct acpi_device **)ret_p);
   2121}
   2122
   2123static void acpi_default_enumeration(struct acpi_device *device)
   2124{
   2125	/*
   2126	 * Do not enumerate devices with enumeration_by_parent flag set as
   2127	 * they will be enumerated by their respective parents.
   2128	 */
   2129	if (!device->flags.enumeration_by_parent) {
   2130		acpi_create_platform_device(device, NULL);
   2131		acpi_device_set_enumerated(device);
   2132	} else {
   2133		blocking_notifier_call_chain(&acpi_reconfig_chain,
   2134					     ACPI_RECONFIG_DEVICE_ADD, device);
   2135	}
   2136}
   2137
   2138static const struct acpi_device_id generic_device_ids[] = {
   2139	{ACPI_DT_NAMESPACE_HID, },
   2140	{"", },
   2141};
   2142
   2143static int acpi_generic_device_attach(struct acpi_device *adev,
   2144				      const struct acpi_device_id *not_used)
   2145{
   2146	/*
   2147	 * Since ACPI_DT_NAMESPACE_HID is the only ID handled here, the test
   2148	 * below can be unconditional.
   2149	 */
   2150	if (adev->data.of_compatible)
   2151		acpi_default_enumeration(adev);
   2152
   2153	return 1;
   2154}
   2155
   2156static struct acpi_scan_handler generic_device_handler = {
   2157	.ids = generic_device_ids,
   2158	.attach = acpi_generic_device_attach,
   2159};
   2160
   2161static int acpi_scan_attach_handler(struct acpi_device *device)
   2162{
   2163	struct acpi_hardware_id *hwid;
   2164	int ret = 0;
   2165
   2166	list_for_each_entry(hwid, &device->pnp.ids, list) {
   2167		const struct acpi_device_id *devid;
   2168		struct acpi_scan_handler *handler;
   2169
   2170		handler = acpi_scan_match_handler(hwid->id, &devid);
   2171		if (handler) {
   2172			if (!handler->attach) {
   2173				device->pnp.type.platform_id = 0;
   2174				continue;
   2175			}
   2176			device->handler = handler;
   2177			ret = handler->attach(device, devid);
   2178			if (ret > 0)
   2179				break;
   2180
   2181			device->handler = NULL;
   2182			if (ret < 0)
   2183				break;
   2184		}
   2185	}
   2186
   2187	return ret;
   2188}
   2189
   2190static void acpi_bus_attach(struct acpi_device *device, bool first_pass)
   2191{
   2192	struct acpi_device *child;
   2193	bool skip = !first_pass && device->flags.visited;
   2194	acpi_handle ejd;
   2195	int ret;
   2196
   2197	if (skip)
   2198		goto ok;
   2199
   2200	if (ACPI_SUCCESS(acpi_bus_get_ejd(device->handle, &ejd)))
   2201		register_dock_dependent_device(device, ejd);
   2202
   2203	acpi_bus_get_status(device);
   2204	/* Skip devices that are not ready for enumeration (e.g. not present) */
   2205	if (!acpi_dev_ready_for_enumeration(device)) {
   2206		device->flags.initialized = false;
   2207		acpi_device_clear_enumerated(device);
   2208		device->flags.power_manageable = 0;
   2209		return;
   2210	}
   2211	if (device->handler)
   2212		goto ok;
   2213
   2214	if (!device->flags.initialized) {
   2215		device->flags.power_manageable =
   2216			device->power.states[ACPI_STATE_D0].flags.valid;
   2217		if (acpi_bus_init_power(device))
   2218			device->flags.power_manageable = 0;
   2219
   2220		device->flags.initialized = true;
   2221	} else if (device->flags.visited) {
   2222		goto ok;
   2223	}
   2224
   2225	ret = acpi_scan_attach_handler(device);
   2226	if (ret < 0)
   2227		return;
   2228
   2229	device->flags.match_driver = true;
   2230	if (ret > 0 && !device->flags.enumeration_by_parent) {
   2231		acpi_device_set_enumerated(device);
   2232		goto ok;
   2233	}
   2234
   2235	ret = device_attach(&device->dev);
   2236	if (ret < 0)
   2237		return;
   2238
   2239	if (device->pnp.type.platform_id || device->flags.enumeration_by_parent)
   2240		acpi_default_enumeration(device);
   2241	else
   2242		acpi_device_set_enumerated(device);
   2243
   2244 ok:
   2245	list_for_each_entry(child, &device->children, node)
   2246		acpi_bus_attach(child, first_pass);
   2247
   2248	if (!skip && device->handler && device->handler->hotplug.notify_online)
   2249		device->handler->hotplug.notify_online(device);
   2250}
   2251
   2252static int acpi_dev_get_first_consumer_dev_cb(struct acpi_dep_data *dep, void *data)
   2253{
   2254	struct acpi_device *adev;
   2255
   2256	adev = acpi_bus_get_acpi_device(dep->consumer);
   2257	if (adev) {
   2258		*(struct acpi_device **)data = adev;
   2259		return 1;
   2260	}
   2261	/* Continue parsing if the device object is not present. */
   2262	return 0;
   2263}
   2264
   2265struct acpi_scan_clear_dep_work {
   2266	struct work_struct work;
   2267	struct acpi_device *adev;
   2268};
   2269
   2270static void acpi_scan_clear_dep_fn(struct work_struct *work)
   2271{
   2272	struct acpi_scan_clear_dep_work *cdw;
   2273
   2274	cdw = container_of(work, struct acpi_scan_clear_dep_work, work);
   2275
   2276	acpi_scan_lock_acquire();
   2277	acpi_bus_attach(cdw->adev, true);
   2278	acpi_scan_lock_release();
   2279
   2280	acpi_dev_put(cdw->adev);
   2281	kfree(cdw);
   2282}
   2283
   2284static bool acpi_scan_clear_dep_queue(struct acpi_device *adev)
   2285{
   2286	struct acpi_scan_clear_dep_work *cdw;
   2287
   2288	if (adev->dep_unmet)
   2289		return false;
   2290
   2291	cdw = kmalloc(sizeof(*cdw), GFP_KERNEL);
   2292	if (!cdw)
   2293		return false;
   2294
   2295	cdw->adev = adev;
   2296	INIT_WORK(&cdw->work, acpi_scan_clear_dep_fn);
   2297	/*
   2298	 * Since the work function may block on the lock until the entire
   2299	 * initial enumeration of devices is complete, put it into the unbound
   2300	 * workqueue.
   2301	 */
   2302	queue_work(system_unbound_wq, &cdw->work);
   2303
   2304	return true;
   2305}
   2306
   2307static int acpi_scan_clear_dep(struct acpi_dep_data *dep, void *data)
   2308{
   2309	struct acpi_device *adev = acpi_bus_get_acpi_device(dep->consumer);
   2310
   2311	if (adev) {
   2312		adev->dep_unmet--;
   2313		if (!acpi_scan_clear_dep_queue(adev))
   2314			acpi_dev_put(adev);
   2315	}
   2316
   2317	list_del(&dep->node);
   2318	kfree(dep);
   2319
   2320	return 0;
   2321}
   2322
   2323/**
   2324 * acpi_walk_dep_device_list - Apply a callback to every entry in acpi_dep_list
   2325 * @handle:	The ACPI handle of the supplier device
   2326 * @callback:	Pointer to the callback function to apply
   2327 * @data:	Pointer to some data to pass to the callback
   2328 *
   2329 * The return value of the callback determines this function's behaviour. If 0
   2330 * is returned we continue to iterate over acpi_dep_list. If a positive value
   2331 * is returned then the loop is broken but this function returns 0. If a
   2332 * negative value is returned by the callback then the loop is broken and that
   2333 * value is returned as the final error.
   2334 */
   2335static int acpi_walk_dep_device_list(acpi_handle handle,
   2336				int (*callback)(struct acpi_dep_data *, void *),
   2337				void *data)
   2338{
   2339	struct acpi_dep_data *dep, *tmp;
   2340	int ret = 0;
   2341
   2342	mutex_lock(&acpi_dep_list_lock);
   2343	list_for_each_entry_safe(dep, tmp, &acpi_dep_list, node) {
   2344		if (dep->supplier == handle) {
   2345			ret = callback(dep, data);
   2346			if (ret)
   2347				break;
   2348		}
   2349	}
   2350	mutex_unlock(&acpi_dep_list_lock);
   2351
   2352	return ret > 0 ? 0 : ret;
   2353}
   2354
   2355/**
   2356 * acpi_dev_clear_dependencies - Inform consumers that the device is now active
   2357 * @supplier: Pointer to the supplier &struct acpi_device
   2358 *
   2359 * Clear dependencies on the given device.
   2360 */
   2361void acpi_dev_clear_dependencies(struct acpi_device *supplier)
   2362{
   2363	acpi_walk_dep_device_list(supplier->handle, acpi_scan_clear_dep, NULL);
   2364}
   2365EXPORT_SYMBOL_GPL(acpi_dev_clear_dependencies);
   2366
   2367/**
   2368 * acpi_dev_ready_for_enumeration - Check if the ACPI device is ready for enumeration
   2369 * @device: Pointer to the &struct acpi_device to check
   2370 *
   2371 * Check if the device is present and has no unmet dependencies.
   2372 *
   2373 * Return true if the device is ready for enumeratino. Otherwise, return false.
   2374 */
   2375bool acpi_dev_ready_for_enumeration(const struct acpi_device *device)
   2376{
   2377	if (device->flags.honor_deps && device->dep_unmet)
   2378		return false;
   2379
   2380	return acpi_device_is_present(device);
   2381}
   2382EXPORT_SYMBOL_GPL(acpi_dev_ready_for_enumeration);
   2383
   2384/**
   2385 * acpi_dev_get_first_consumer_dev - Return ACPI device dependent on @supplier
   2386 * @supplier: Pointer to the dependee device
   2387 *
   2388 * Returns the first &struct acpi_device which declares itself dependent on
   2389 * @supplier via the _DEP buffer, parsed from the acpi_dep_list.
   2390 *
   2391 * The caller is responsible for putting the reference to adev when it is no
   2392 * longer needed.
   2393 */
   2394struct acpi_device *acpi_dev_get_first_consumer_dev(struct acpi_device *supplier)
   2395{
   2396	struct acpi_device *adev = NULL;
   2397
   2398	acpi_walk_dep_device_list(supplier->handle,
   2399				  acpi_dev_get_first_consumer_dev_cb, &adev);
   2400
   2401	return adev;
   2402}
   2403EXPORT_SYMBOL_GPL(acpi_dev_get_first_consumer_dev);
   2404
   2405/**
   2406 * acpi_bus_scan - Add ACPI device node objects in a given namespace scope.
   2407 * @handle: Root of the namespace scope to scan.
   2408 *
   2409 * Scan a given ACPI tree (probably recently hot-plugged) and create and add
   2410 * found devices.
   2411 *
   2412 * If no devices were found, -ENODEV is returned, but it does not mean that
   2413 * there has been a real error.  There just have been no suitable ACPI objects
   2414 * in the table trunk from which the kernel could create a device and add an
   2415 * appropriate driver.
   2416 *
   2417 * Must be called under acpi_scan_lock.
   2418 */
   2419int acpi_bus_scan(acpi_handle handle)
   2420{
   2421	struct acpi_device *device = NULL;
   2422
   2423	acpi_bus_scan_second_pass = false;
   2424
   2425	/* Pass 1: Avoid enumerating devices with missing dependencies. */
   2426
   2427	if (ACPI_SUCCESS(acpi_bus_check_add(handle, true, &device)))
   2428		acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
   2429				    acpi_bus_check_add_1, NULL, NULL,
   2430				    (void **)&device);
   2431
   2432	if (!device)
   2433		return -ENODEV;
   2434
   2435	acpi_bus_attach(device, true);
   2436
   2437	if (!acpi_bus_scan_second_pass)
   2438		return 0;
   2439
   2440	/* Pass 2: Enumerate all of the remaining devices. */
   2441
   2442	device = NULL;
   2443
   2444	if (ACPI_SUCCESS(acpi_bus_check_add(handle, false, &device)))
   2445		acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
   2446				    acpi_bus_check_add_2, NULL, NULL,
   2447				    (void **)&device);
   2448
   2449	acpi_bus_attach(device, false);
   2450
   2451	return 0;
   2452}
   2453EXPORT_SYMBOL(acpi_bus_scan);
   2454
   2455/**
   2456 * acpi_bus_trim - Detach scan handlers and drivers from ACPI device objects.
   2457 * @adev: Root of the ACPI namespace scope to walk.
   2458 *
   2459 * Must be called under acpi_scan_lock.
   2460 */
   2461void acpi_bus_trim(struct acpi_device *adev)
   2462{
   2463	struct acpi_scan_handler *handler = adev->handler;
   2464	struct acpi_device *child;
   2465
   2466	list_for_each_entry_reverse(child, &adev->children, node)
   2467		acpi_bus_trim(child);
   2468
   2469	adev->flags.match_driver = false;
   2470	if (handler) {
   2471		if (handler->detach)
   2472			handler->detach(adev);
   2473
   2474		adev->handler = NULL;
   2475	} else {
   2476		device_release_driver(&adev->dev);
   2477	}
   2478	/*
   2479	 * Most likely, the device is going away, so put it into D3cold before
   2480	 * that.
   2481	 */
   2482	acpi_device_set_power(adev, ACPI_STATE_D3_COLD);
   2483	adev->flags.initialized = false;
   2484	acpi_device_clear_enumerated(adev);
   2485}
   2486EXPORT_SYMBOL_GPL(acpi_bus_trim);
   2487
   2488int acpi_bus_register_early_device(int type)
   2489{
   2490	struct acpi_device *device = NULL;
   2491	int result;
   2492
   2493	result = acpi_add_single_object(&device, NULL, type, false);
   2494	if (result)
   2495		return result;
   2496
   2497	device->flags.match_driver = true;
   2498	return device_attach(&device->dev);
   2499}
   2500EXPORT_SYMBOL_GPL(acpi_bus_register_early_device);
   2501
   2502static void acpi_bus_scan_fixed(void)
   2503{
   2504	if (!(acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON)) {
   2505		struct acpi_device *adev = NULL;
   2506
   2507		acpi_add_single_object(&adev, NULL, ACPI_BUS_TYPE_POWER_BUTTON,
   2508				       false);
   2509		if (adev) {
   2510			adev->flags.match_driver = true;
   2511			if (device_attach(&adev->dev) >= 0)
   2512				device_init_wakeup(&adev->dev, true);
   2513			else
   2514				dev_dbg(&adev->dev, "No driver\n");
   2515		}
   2516	}
   2517
   2518	if (!(acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON)) {
   2519		struct acpi_device *adev = NULL;
   2520
   2521		acpi_add_single_object(&adev, NULL, ACPI_BUS_TYPE_SLEEP_BUTTON,
   2522				       false);
   2523		if (adev) {
   2524			adev->flags.match_driver = true;
   2525			if (device_attach(&adev->dev) < 0)
   2526				dev_dbg(&adev->dev, "No driver\n");
   2527		}
   2528	}
   2529}
   2530
   2531static void __init acpi_get_spcr_uart_addr(void)
   2532{
   2533	acpi_status status;
   2534	struct acpi_table_spcr *spcr_ptr;
   2535
   2536	status = acpi_get_table(ACPI_SIG_SPCR, 0,
   2537				(struct acpi_table_header **)&spcr_ptr);
   2538	if (ACPI_FAILURE(status)) {
   2539		pr_warn("STAO table present, but SPCR is missing\n");
   2540		return;
   2541	}
   2542
   2543	spcr_uart_addr = spcr_ptr->serial_port.address;
   2544	acpi_put_table((struct acpi_table_header *)spcr_ptr);
   2545}
   2546
   2547static bool acpi_scan_initialized;
   2548
   2549void __init acpi_scan_init(void)
   2550{
   2551	acpi_status status;
   2552	struct acpi_table_stao *stao_ptr;
   2553
   2554	acpi_pci_root_init();
   2555	acpi_pci_link_init();
   2556	acpi_processor_init();
   2557	acpi_platform_init();
   2558	acpi_lpss_init();
   2559	acpi_apd_init();
   2560	acpi_cmos_rtc_init();
   2561	acpi_container_init();
   2562	acpi_memory_hotplug_init();
   2563	acpi_watchdog_init();
   2564	acpi_pnp_init();
   2565	acpi_int340x_thermal_init();
   2566	acpi_amba_init();
   2567	acpi_init_lpit();
   2568
   2569	acpi_scan_add_handler(&generic_device_handler);
   2570
   2571	/*
   2572	 * If there is STAO table, check whether it needs to ignore the UART
   2573	 * device in SPCR table.
   2574	 */
   2575	status = acpi_get_table(ACPI_SIG_STAO, 0,
   2576				(struct acpi_table_header **)&stao_ptr);
   2577	if (ACPI_SUCCESS(status)) {
   2578		if (stao_ptr->header.length > sizeof(struct acpi_table_stao))
   2579			pr_info("STAO Name List not yet supported.\n");
   2580
   2581		if (stao_ptr->ignore_uart)
   2582			acpi_get_spcr_uart_addr();
   2583
   2584		acpi_put_table((struct acpi_table_header *)stao_ptr);
   2585	}
   2586
   2587	acpi_gpe_apply_masked_gpes();
   2588	acpi_update_all_gpes();
   2589
   2590	/*
   2591	 * Although we call __add_memory() that is documented to require the
   2592	 * device_hotplug_lock, it is not necessary here because this is an
   2593	 * early code when userspace or any other code path cannot trigger
   2594	 * hotplug/hotunplug operations.
   2595	 */
   2596	mutex_lock(&acpi_scan_lock);
   2597	/*
   2598	 * Enumerate devices in the ACPI namespace.
   2599	 */
   2600	if (acpi_bus_scan(ACPI_ROOT_OBJECT))
   2601		goto unlock;
   2602
   2603	acpi_root = acpi_fetch_acpi_dev(ACPI_ROOT_OBJECT);
   2604	if (!acpi_root)
   2605		goto unlock;
   2606
   2607	/* Fixed feature devices do not exist on HW-reduced platform */
   2608	if (!acpi_gbl_reduced_hardware)
   2609		acpi_bus_scan_fixed();
   2610
   2611	acpi_turn_off_unused_power_resources();
   2612
   2613	acpi_scan_initialized = true;
   2614
   2615unlock:
   2616	mutex_unlock(&acpi_scan_lock);
   2617}
   2618
   2619static struct acpi_probe_entry *ape;
   2620static int acpi_probe_count;
   2621static DEFINE_MUTEX(acpi_probe_mutex);
   2622
   2623static int __init acpi_match_madt(union acpi_subtable_headers *header,
   2624				  const unsigned long end)
   2625{
   2626	if (!ape->subtable_valid || ape->subtable_valid(&header->common, ape))
   2627		if (!ape->probe_subtbl(header, end))
   2628			acpi_probe_count++;
   2629
   2630	return 0;
   2631}
   2632
   2633int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
   2634{
   2635	int count = 0;
   2636
   2637	if (acpi_disabled)
   2638		return 0;
   2639
   2640	mutex_lock(&acpi_probe_mutex);
   2641	for (ape = ap_head; nr; ape++, nr--) {
   2642		if (ACPI_COMPARE_NAMESEG(ACPI_SIG_MADT, ape->id)) {
   2643			acpi_probe_count = 0;
   2644			acpi_table_parse_madt(ape->type, acpi_match_madt, 0);
   2645			count += acpi_probe_count;
   2646		} else {
   2647			int res;
   2648			res = acpi_table_parse(ape->id, ape->probe_table);
   2649			if (!res)
   2650				count++;
   2651		}
   2652	}
   2653	mutex_unlock(&acpi_probe_mutex);
   2654
   2655	return count;
   2656}
   2657
   2658static void acpi_table_events_fn(struct work_struct *work)
   2659{
   2660	acpi_scan_lock_acquire();
   2661	acpi_bus_scan(ACPI_ROOT_OBJECT);
   2662	acpi_scan_lock_release();
   2663
   2664	kfree(work);
   2665}
   2666
   2667void acpi_scan_table_notify(void)
   2668{
   2669	struct work_struct *work;
   2670
   2671	if (!acpi_scan_initialized)
   2672		return;
   2673
   2674	work = kmalloc(sizeof(*work), GFP_KERNEL);
   2675	if (!work)
   2676		return;
   2677
   2678	INIT_WORK(work, acpi_table_events_fn);
   2679	schedule_work(work);
   2680}
   2681
   2682int acpi_reconfig_notifier_register(struct notifier_block *nb)
   2683{
   2684	return blocking_notifier_chain_register(&acpi_reconfig_chain, nb);
   2685}
   2686EXPORT_SYMBOL(acpi_reconfig_notifier_register);
   2687
   2688int acpi_reconfig_notifier_unregister(struct notifier_block *nb)
   2689{
   2690	return blocking_notifier_chain_unregister(&acpi_reconfig_chain, nb);
   2691}
   2692EXPORT_SYMBOL(acpi_reconfig_notifier_unregister);