cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

pci-acpi.c (38982B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * PCI support in ACPI
      4 *
      5 * Copyright (C) 2005 David Shaohua Li <shaohua.li@intel.com>
      6 * Copyright (C) 2004 Tom Long Nguyen <tom.l.nguyen@intel.com>
      7 * Copyright (C) 2004 Intel Corp.
      8 */
      9
     10#include <linux/delay.h>
     11#include <linux/init.h>
     12#include <linux/irqdomain.h>
     13#include <linux/pci.h>
     14#include <linux/msi.h>
     15#include <linux/pci_hotplug.h>
     16#include <linux/module.h>
     17#include <linux/pci-acpi.h>
     18#include <linux/pm_runtime.h>
     19#include <linux/pm_qos.h>
     20#include <linux/rwsem.h>
     21#include "pci.h"
     22
     23/*
     24 * The GUID is defined in the PCI Firmware Specification available here:
     25 * https://www.pcisig.com/members/downloads/pcifw_r3_1_13Dec10.pdf
     26 */
     27const guid_t pci_acpi_dsm_guid =
     28	GUID_INIT(0xe5c937d0, 0x3553, 0x4d7a,
     29		  0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d);
     30
     31#if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
     32static int acpi_get_rc_addr(struct acpi_device *adev, struct resource *res)
     33{
     34	struct device *dev = &adev->dev;
     35	struct resource_entry *entry;
     36	struct list_head list;
     37	unsigned long flags;
     38	int ret;
     39
     40	INIT_LIST_HEAD(&list);
     41	flags = IORESOURCE_MEM;
     42	ret = acpi_dev_get_resources(adev, &list,
     43				     acpi_dev_filter_resource_type_cb,
     44				     (void *) flags);
     45	if (ret < 0) {
     46		dev_err(dev, "failed to parse _CRS method, error code %d\n",
     47			ret);
     48		return ret;
     49	}
     50
     51	if (ret == 0) {
     52		dev_err(dev, "no IO and memory resources present in _CRS\n");
     53		return -EINVAL;
     54	}
     55
     56	entry = list_first_entry(&list, struct resource_entry, node);
     57	*res = *entry->res;
     58	acpi_dev_free_resource_list(&list);
     59	return 0;
     60}
     61
     62static acpi_status acpi_match_rc(acpi_handle handle, u32 lvl, void *context,
     63				 void **retval)
     64{
     65	u16 *segment = context;
     66	unsigned long long uid;
     67	acpi_status status;
     68
     69	status = acpi_evaluate_integer(handle, "_UID", NULL, &uid);
     70	if (ACPI_FAILURE(status) || uid != *segment)
     71		return AE_CTRL_DEPTH;
     72
     73	*(acpi_handle *)retval = handle;
     74	return AE_CTRL_TERMINATE;
     75}
     76
     77int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
     78			  struct resource *res)
     79{
     80	struct acpi_device *adev;
     81	acpi_status status;
     82	acpi_handle handle;
     83	int ret;
     84
     85	status = acpi_get_devices(hid, acpi_match_rc, &segment, &handle);
     86	if (ACPI_FAILURE(status)) {
     87		dev_err(dev, "can't find _HID %s device to locate resources\n",
     88			hid);
     89		return -ENODEV;
     90	}
     91
     92	adev = acpi_fetch_acpi_dev(handle);
     93	if (!adev)
     94		return -ENODEV;
     95
     96	ret = acpi_get_rc_addr(adev, res);
     97	if (ret) {
     98		dev_err(dev, "can't get resource from %s\n",
     99			dev_name(&adev->dev));
    100		return ret;
    101	}
    102
    103	return 0;
    104}
    105#endif
    106
    107phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
    108{
    109	acpi_status status = AE_NOT_EXIST;
    110	unsigned long long mcfg_addr;
    111
    112	if (handle)
    113		status = acpi_evaluate_integer(handle, METHOD_NAME__CBA,
    114					       NULL, &mcfg_addr);
    115	if (ACPI_FAILURE(status))
    116		return 0;
    117
    118	return (phys_addr_t)mcfg_addr;
    119}
    120
    121/* _HPX PCI Setting Record (Type 0); same as _HPP */
    122struct hpx_type0 {
    123	u32 revision;		/* Not present in _HPP */
    124	u8  cache_line_size;	/* Not applicable to PCIe */
    125	u8  latency_timer;	/* Not applicable to PCIe */
    126	u8  enable_serr;
    127	u8  enable_perr;
    128};
    129
    130static struct hpx_type0 pci_default_type0 = {
    131	.revision = 1,
    132	.cache_line_size = 8,
    133	.latency_timer = 0x40,
    134	.enable_serr = 0,
    135	.enable_perr = 0,
    136};
    137
    138static void program_hpx_type0(struct pci_dev *dev, struct hpx_type0 *hpx)
    139{
    140	u16 pci_cmd, pci_bctl;
    141
    142	if (!hpx)
    143		hpx = &pci_default_type0;
    144
    145	if (hpx->revision > 1) {
    146		pci_warn(dev, "PCI settings rev %d not supported; using defaults\n",
    147			 hpx->revision);
    148		hpx = &pci_default_type0;
    149	}
    150
    151	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpx->cache_line_size);
    152	pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpx->latency_timer);
    153	pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
    154	if (hpx->enable_serr)
    155		pci_cmd |= PCI_COMMAND_SERR;
    156	if (hpx->enable_perr)
    157		pci_cmd |= PCI_COMMAND_PARITY;
    158	pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
    159
    160	/* Program bridge control value */
    161	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
    162		pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
    163				      hpx->latency_timer);
    164		pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
    165		if (hpx->enable_perr)
    166			pci_bctl |= PCI_BRIDGE_CTL_PARITY;
    167		pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
    168	}
    169}
    170
    171static acpi_status decode_type0_hpx_record(union acpi_object *record,
    172					   struct hpx_type0 *hpx0)
    173{
    174	int i;
    175	union acpi_object *fields = record->package.elements;
    176	u32 revision = fields[1].integer.value;
    177
    178	switch (revision) {
    179	case 1:
    180		if (record->package.count != 6)
    181			return AE_ERROR;
    182		for (i = 2; i < 6; i++)
    183			if (fields[i].type != ACPI_TYPE_INTEGER)
    184				return AE_ERROR;
    185		hpx0->revision        = revision;
    186		hpx0->cache_line_size = fields[2].integer.value;
    187		hpx0->latency_timer   = fields[3].integer.value;
    188		hpx0->enable_serr     = fields[4].integer.value;
    189		hpx0->enable_perr     = fields[5].integer.value;
    190		break;
    191	default:
    192		pr_warn("%s: Type 0 Revision %d record not supported\n",
    193		       __func__, revision);
    194		return AE_ERROR;
    195	}
    196	return AE_OK;
    197}
    198
    199/* _HPX PCI-X Setting Record (Type 1) */
    200struct hpx_type1 {
    201	u32 revision;
    202	u8  max_mem_read;
    203	u8  avg_max_split;
    204	u16 tot_max_split;
    205};
    206
    207static void program_hpx_type1(struct pci_dev *dev, struct hpx_type1 *hpx)
    208{
    209	int pos;
    210
    211	if (!hpx)
    212		return;
    213
    214	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
    215	if (!pos)
    216		return;
    217
    218	pci_warn(dev, "PCI-X settings not supported\n");
    219}
    220
    221static acpi_status decode_type1_hpx_record(union acpi_object *record,
    222					   struct hpx_type1 *hpx1)
    223{
    224	int i;
    225	union acpi_object *fields = record->package.elements;
    226	u32 revision = fields[1].integer.value;
    227
    228	switch (revision) {
    229	case 1:
    230		if (record->package.count != 5)
    231			return AE_ERROR;
    232		for (i = 2; i < 5; i++)
    233			if (fields[i].type != ACPI_TYPE_INTEGER)
    234				return AE_ERROR;
    235		hpx1->revision      = revision;
    236		hpx1->max_mem_read  = fields[2].integer.value;
    237		hpx1->avg_max_split = fields[3].integer.value;
    238		hpx1->tot_max_split = fields[4].integer.value;
    239		break;
    240	default:
    241		pr_warn("%s: Type 1 Revision %d record not supported\n",
    242		       __func__, revision);
    243		return AE_ERROR;
    244	}
    245	return AE_OK;
    246}
    247
    248static bool pcie_root_rcb_set(struct pci_dev *dev)
    249{
    250	struct pci_dev *rp = pcie_find_root_port(dev);
    251	u16 lnkctl;
    252
    253	if (!rp)
    254		return false;
    255
    256	pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
    257	if (lnkctl & PCI_EXP_LNKCTL_RCB)
    258		return true;
    259
    260	return false;
    261}
    262
    263/* _HPX PCI Express Setting Record (Type 2) */
    264struct hpx_type2 {
    265	u32 revision;
    266	u32 unc_err_mask_and;
    267	u32 unc_err_mask_or;
    268	u32 unc_err_sever_and;
    269	u32 unc_err_sever_or;
    270	u32 cor_err_mask_and;
    271	u32 cor_err_mask_or;
    272	u32 adv_err_cap_and;
    273	u32 adv_err_cap_or;
    274	u16 pci_exp_devctl_and;
    275	u16 pci_exp_devctl_or;
    276	u16 pci_exp_lnkctl_and;
    277	u16 pci_exp_lnkctl_or;
    278	u32 sec_unc_err_sever_and;
    279	u32 sec_unc_err_sever_or;
    280	u32 sec_unc_err_mask_and;
    281	u32 sec_unc_err_mask_or;
    282};
    283
    284static void program_hpx_type2(struct pci_dev *dev, struct hpx_type2 *hpx)
    285{
    286	int pos;
    287	u32 reg32;
    288
    289	if (!hpx)
    290		return;
    291
    292	if (!pci_is_pcie(dev))
    293		return;
    294
    295	if (hpx->revision > 1) {
    296		pci_warn(dev, "PCIe settings rev %d not supported\n",
    297			 hpx->revision);
    298		return;
    299	}
    300
    301	/*
    302	 * Don't allow _HPX to change MPS or MRRS settings.  We manage
    303	 * those to make sure they're consistent with the rest of the
    304	 * platform.
    305	 */
    306	hpx->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
    307				    PCI_EXP_DEVCTL_READRQ;
    308	hpx->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
    309				    PCI_EXP_DEVCTL_READRQ);
    310
    311	/* Initialize Device Control Register */
    312	pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
    313			~hpx->pci_exp_devctl_and, hpx->pci_exp_devctl_or);
    314
    315	/* Initialize Link Control Register */
    316	if (pcie_cap_has_lnkctl(dev)) {
    317
    318		/*
    319		 * If the Root Port supports Read Completion Boundary of
    320		 * 128, set RCB to 128.  Otherwise, clear it.
    321		 */
    322		hpx->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
    323		hpx->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
    324		if (pcie_root_rcb_set(dev))
    325			hpx->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
    326
    327		pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
    328			~hpx->pci_exp_lnkctl_and, hpx->pci_exp_lnkctl_or);
    329	}
    330
    331	/* Find Advanced Error Reporting Enhanced Capability */
    332	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
    333	if (!pos)
    334		return;
    335
    336	/* Initialize Uncorrectable Error Mask Register */
    337	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
    338	reg32 = (reg32 & hpx->unc_err_mask_and) | hpx->unc_err_mask_or;
    339	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
    340
    341	/* Initialize Uncorrectable Error Severity Register */
    342	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
    343	reg32 = (reg32 & hpx->unc_err_sever_and) | hpx->unc_err_sever_or;
    344	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
    345
    346	/* Initialize Correctable Error Mask Register */
    347	pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
    348	reg32 = (reg32 & hpx->cor_err_mask_and) | hpx->cor_err_mask_or;
    349	pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
    350
    351	/* Initialize Advanced Error Capabilities and Control Register */
    352	pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
    353	reg32 = (reg32 & hpx->adv_err_cap_and) | hpx->adv_err_cap_or;
    354
    355	/* Don't enable ECRC generation or checking if unsupported */
    356	if (!(reg32 & PCI_ERR_CAP_ECRC_GENC))
    357		reg32 &= ~PCI_ERR_CAP_ECRC_GENE;
    358	if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC))
    359		reg32 &= ~PCI_ERR_CAP_ECRC_CHKE;
    360	pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
    361
    362	/*
    363	 * FIXME: The following two registers are not supported yet.
    364	 *
    365	 *   o Secondary Uncorrectable Error Severity Register
    366	 *   o Secondary Uncorrectable Error Mask Register
    367	 */
    368}
    369
    370static acpi_status decode_type2_hpx_record(union acpi_object *record,
    371					   struct hpx_type2 *hpx2)
    372{
    373	int i;
    374	union acpi_object *fields = record->package.elements;
    375	u32 revision = fields[1].integer.value;
    376
    377	switch (revision) {
    378	case 1:
    379		if (record->package.count != 18)
    380			return AE_ERROR;
    381		for (i = 2; i < 18; i++)
    382			if (fields[i].type != ACPI_TYPE_INTEGER)
    383				return AE_ERROR;
    384		hpx2->revision      = revision;
    385		hpx2->unc_err_mask_and      = fields[2].integer.value;
    386		hpx2->unc_err_mask_or       = fields[3].integer.value;
    387		hpx2->unc_err_sever_and     = fields[4].integer.value;
    388		hpx2->unc_err_sever_or      = fields[5].integer.value;
    389		hpx2->cor_err_mask_and      = fields[6].integer.value;
    390		hpx2->cor_err_mask_or       = fields[7].integer.value;
    391		hpx2->adv_err_cap_and       = fields[8].integer.value;
    392		hpx2->adv_err_cap_or        = fields[9].integer.value;
    393		hpx2->pci_exp_devctl_and    = fields[10].integer.value;
    394		hpx2->pci_exp_devctl_or     = fields[11].integer.value;
    395		hpx2->pci_exp_lnkctl_and    = fields[12].integer.value;
    396		hpx2->pci_exp_lnkctl_or     = fields[13].integer.value;
    397		hpx2->sec_unc_err_sever_and = fields[14].integer.value;
    398		hpx2->sec_unc_err_sever_or  = fields[15].integer.value;
    399		hpx2->sec_unc_err_mask_and  = fields[16].integer.value;
    400		hpx2->sec_unc_err_mask_or   = fields[17].integer.value;
    401		break;
    402	default:
    403		pr_warn("%s: Type 2 Revision %d record not supported\n",
    404		       __func__, revision);
    405		return AE_ERROR;
    406	}
    407	return AE_OK;
    408}
    409
    410/* _HPX PCI Express Setting Record (Type 3) */
    411struct hpx_type3 {
    412	u16 device_type;
    413	u16 function_type;
    414	u16 config_space_location;
    415	u16 pci_exp_cap_id;
    416	u16 pci_exp_cap_ver;
    417	u16 pci_exp_vendor_id;
    418	u16 dvsec_id;
    419	u16 dvsec_rev;
    420	u16 match_offset;
    421	u32 match_mask_and;
    422	u32 match_value;
    423	u16 reg_offset;
    424	u32 reg_mask_and;
    425	u32 reg_mask_or;
    426};
    427
    428enum hpx_type3_dev_type {
    429	HPX_TYPE_ENDPOINT	= BIT(0),
    430	HPX_TYPE_LEG_END	= BIT(1),
    431	HPX_TYPE_RC_END		= BIT(2),
    432	HPX_TYPE_RC_EC		= BIT(3),
    433	HPX_TYPE_ROOT_PORT	= BIT(4),
    434	HPX_TYPE_UPSTREAM	= BIT(5),
    435	HPX_TYPE_DOWNSTREAM	= BIT(6),
    436	HPX_TYPE_PCI_BRIDGE	= BIT(7),
    437	HPX_TYPE_PCIE_BRIDGE	= BIT(8),
    438};
    439
    440static u16 hpx3_device_type(struct pci_dev *dev)
    441{
    442	u16 pcie_type = pci_pcie_type(dev);
    443	static const int pcie_to_hpx3_type[] = {
    444		[PCI_EXP_TYPE_ENDPOINT]    = HPX_TYPE_ENDPOINT,
    445		[PCI_EXP_TYPE_LEG_END]     = HPX_TYPE_LEG_END,
    446		[PCI_EXP_TYPE_RC_END]      = HPX_TYPE_RC_END,
    447		[PCI_EXP_TYPE_RC_EC]       = HPX_TYPE_RC_EC,
    448		[PCI_EXP_TYPE_ROOT_PORT]   = HPX_TYPE_ROOT_PORT,
    449		[PCI_EXP_TYPE_UPSTREAM]    = HPX_TYPE_UPSTREAM,
    450		[PCI_EXP_TYPE_DOWNSTREAM]  = HPX_TYPE_DOWNSTREAM,
    451		[PCI_EXP_TYPE_PCI_BRIDGE]  = HPX_TYPE_PCI_BRIDGE,
    452		[PCI_EXP_TYPE_PCIE_BRIDGE] = HPX_TYPE_PCIE_BRIDGE,
    453	};
    454
    455	if (pcie_type >= ARRAY_SIZE(pcie_to_hpx3_type))
    456		return 0;
    457
    458	return pcie_to_hpx3_type[pcie_type];
    459}
    460
    461enum hpx_type3_fn_type {
    462	HPX_FN_NORMAL		= BIT(0),
    463	HPX_FN_SRIOV_PHYS	= BIT(1),
    464	HPX_FN_SRIOV_VIRT	= BIT(2),
    465};
    466
    467static u8 hpx3_function_type(struct pci_dev *dev)
    468{
    469	if (dev->is_virtfn)
    470		return HPX_FN_SRIOV_VIRT;
    471	else if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV) > 0)
    472		return HPX_FN_SRIOV_PHYS;
    473	else
    474		return HPX_FN_NORMAL;
    475}
    476
    477static bool hpx3_cap_ver_matches(u8 pcie_cap_id, u8 hpx3_cap_id)
    478{
    479	u8 cap_ver = hpx3_cap_id & 0xf;
    480
    481	if ((hpx3_cap_id & BIT(4)) && cap_ver >= pcie_cap_id)
    482		return true;
    483	else if (cap_ver == pcie_cap_id)
    484		return true;
    485
    486	return false;
    487}
    488
    489enum hpx_type3_cfg_loc {
    490	HPX_CFG_PCICFG		= 0,
    491	HPX_CFG_PCIE_CAP	= 1,
    492	HPX_CFG_PCIE_CAP_EXT	= 2,
    493	HPX_CFG_VEND_CAP	= 3,
    494	HPX_CFG_DVSEC		= 4,
    495	HPX_CFG_MAX,
    496};
    497
    498static void program_hpx_type3_register(struct pci_dev *dev,
    499				       const struct hpx_type3 *reg)
    500{
    501	u32 match_reg, write_reg, header, orig_value;
    502	u16 pos;
    503
    504	if (!(hpx3_device_type(dev) & reg->device_type))
    505		return;
    506
    507	if (!(hpx3_function_type(dev) & reg->function_type))
    508		return;
    509
    510	switch (reg->config_space_location) {
    511	case HPX_CFG_PCICFG:
    512		pos = 0;
    513		break;
    514	case HPX_CFG_PCIE_CAP:
    515		pos = pci_find_capability(dev, reg->pci_exp_cap_id);
    516		if (pos == 0)
    517			return;
    518
    519		break;
    520	case HPX_CFG_PCIE_CAP_EXT:
    521		pos = pci_find_ext_capability(dev, reg->pci_exp_cap_id);
    522		if (pos == 0)
    523			return;
    524
    525		pci_read_config_dword(dev, pos, &header);
    526		if (!hpx3_cap_ver_matches(PCI_EXT_CAP_VER(header),
    527					  reg->pci_exp_cap_ver))
    528			return;
    529
    530		break;
    531	case HPX_CFG_VEND_CAP:
    532	case HPX_CFG_DVSEC:
    533	default:
    534		pci_warn(dev, "Encountered _HPX type 3 with unsupported config space location");
    535		return;
    536	}
    537
    538	pci_read_config_dword(dev, pos + reg->match_offset, &match_reg);
    539
    540	if ((match_reg & reg->match_mask_and) != reg->match_value)
    541		return;
    542
    543	pci_read_config_dword(dev, pos + reg->reg_offset, &write_reg);
    544	orig_value = write_reg;
    545	write_reg &= reg->reg_mask_and;
    546	write_reg |= reg->reg_mask_or;
    547
    548	if (orig_value == write_reg)
    549		return;
    550
    551	pci_write_config_dword(dev, pos + reg->reg_offset, write_reg);
    552
    553	pci_dbg(dev, "Applied _HPX3 at [0x%x]: 0x%08x -> 0x%08x",
    554		pos, orig_value, write_reg);
    555}
    556
    557static void program_hpx_type3(struct pci_dev *dev, struct hpx_type3 *hpx)
    558{
    559	if (!hpx)
    560		return;
    561
    562	if (!pci_is_pcie(dev))
    563		return;
    564
    565	program_hpx_type3_register(dev, hpx);
    566}
    567
    568static void parse_hpx3_register(struct hpx_type3 *hpx3_reg,
    569				union acpi_object *reg_fields)
    570{
    571	hpx3_reg->device_type            = reg_fields[0].integer.value;
    572	hpx3_reg->function_type          = reg_fields[1].integer.value;
    573	hpx3_reg->config_space_location  = reg_fields[2].integer.value;
    574	hpx3_reg->pci_exp_cap_id         = reg_fields[3].integer.value;
    575	hpx3_reg->pci_exp_cap_ver        = reg_fields[4].integer.value;
    576	hpx3_reg->pci_exp_vendor_id      = reg_fields[5].integer.value;
    577	hpx3_reg->dvsec_id               = reg_fields[6].integer.value;
    578	hpx3_reg->dvsec_rev              = reg_fields[7].integer.value;
    579	hpx3_reg->match_offset           = reg_fields[8].integer.value;
    580	hpx3_reg->match_mask_and         = reg_fields[9].integer.value;
    581	hpx3_reg->match_value            = reg_fields[10].integer.value;
    582	hpx3_reg->reg_offset             = reg_fields[11].integer.value;
    583	hpx3_reg->reg_mask_and           = reg_fields[12].integer.value;
    584	hpx3_reg->reg_mask_or            = reg_fields[13].integer.value;
    585}
    586
    587static acpi_status program_type3_hpx_record(struct pci_dev *dev,
    588					   union acpi_object *record)
    589{
    590	union acpi_object *fields = record->package.elements;
    591	u32 desc_count, expected_length, revision;
    592	union acpi_object *reg_fields;
    593	struct hpx_type3 hpx3;
    594	int i;
    595
    596	revision = fields[1].integer.value;
    597	switch (revision) {
    598	case 1:
    599		desc_count = fields[2].integer.value;
    600		expected_length = 3 + desc_count * 14;
    601
    602		if (record->package.count != expected_length)
    603			return AE_ERROR;
    604
    605		for (i = 2; i < expected_length; i++)
    606			if (fields[i].type != ACPI_TYPE_INTEGER)
    607				return AE_ERROR;
    608
    609		for (i = 0; i < desc_count; i++) {
    610			reg_fields = fields + 3 + i * 14;
    611			parse_hpx3_register(&hpx3, reg_fields);
    612			program_hpx_type3(dev, &hpx3);
    613		}
    614
    615		break;
    616	default:
    617		printk(KERN_WARNING
    618			"%s: Type 3 Revision %d record not supported\n",
    619			__func__, revision);
    620		return AE_ERROR;
    621	}
    622	return AE_OK;
    623}
    624
    625static acpi_status acpi_run_hpx(struct pci_dev *dev, acpi_handle handle)
    626{
    627	acpi_status status;
    628	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
    629	union acpi_object *package, *record, *fields;
    630	struct hpx_type0 hpx0;
    631	struct hpx_type1 hpx1;
    632	struct hpx_type2 hpx2;
    633	u32 type;
    634	int i;
    635
    636	status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer);
    637	if (ACPI_FAILURE(status))
    638		return status;
    639
    640	package = (union acpi_object *)buffer.pointer;
    641	if (package->type != ACPI_TYPE_PACKAGE) {
    642		status = AE_ERROR;
    643		goto exit;
    644	}
    645
    646	for (i = 0; i < package->package.count; i++) {
    647		record = &package->package.elements[i];
    648		if (record->type != ACPI_TYPE_PACKAGE) {
    649			status = AE_ERROR;
    650			goto exit;
    651		}
    652
    653		fields = record->package.elements;
    654		if (fields[0].type != ACPI_TYPE_INTEGER ||
    655		    fields[1].type != ACPI_TYPE_INTEGER) {
    656			status = AE_ERROR;
    657			goto exit;
    658		}
    659
    660		type = fields[0].integer.value;
    661		switch (type) {
    662		case 0:
    663			memset(&hpx0, 0, sizeof(hpx0));
    664			status = decode_type0_hpx_record(record, &hpx0);
    665			if (ACPI_FAILURE(status))
    666				goto exit;
    667			program_hpx_type0(dev, &hpx0);
    668			break;
    669		case 1:
    670			memset(&hpx1, 0, sizeof(hpx1));
    671			status = decode_type1_hpx_record(record, &hpx1);
    672			if (ACPI_FAILURE(status))
    673				goto exit;
    674			program_hpx_type1(dev, &hpx1);
    675			break;
    676		case 2:
    677			memset(&hpx2, 0, sizeof(hpx2));
    678			status = decode_type2_hpx_record(record, &hpx2);
    679			if (ACPI_FAILURE(status))
    680				goto exit;
    681			program_hpx_type2(dev, &hpx2);
    682			break;
    683		case 3:
    684			status = program_type3_hpx_record(dev, record);
    685			if (ACPI_FAILURE(status))
    686				goto exit;
    687			break;
    688		default:
    689			pr_err("%s: Type %d record not supported\n",
    690			       __func__, type);
    691			status = AE_ERROR;
    692			goto exit;
    693		}
    694	}
    695 exit:
    696	kfree(buffer.pointer);
    697	return status;
    698}
    699
    700static acpi_status acpi_run_hpp(struct pci_dev *dev, acpi_handle handle)
    701{
    702	acpi_status status;
    703	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
    704	union acpi_object *package, *fields;
    705	struct hpx_type0 hpx0;
    706	int i;
    707
    708	memset(&hpx0, 0, sizeof(hpx0));
    709
    710	status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer);
    711	if (ACPI_FAILURE(status))
    712		return status;
    713
    714	package = (union acpi_object *) buffer.pointer;
    715	if (package->type != ACPI_TYPE_PACKAGE ||
    716	    package->package.count != 4) {
    717		status = AE_ERROR;
    718		goto exit;
    719	}
    720
    721	fields = package->package.elements;
    722	for (i = 0; i < 4; i++) {
    723		if (fields[i].type != ACPI_TYPE_INTEGER) {
    724			status = AE_ERROR;
    725			goto exit;
    726		}
    727	}
    728
    729	hpx0.revision        = 1;
    730	hpx0.cache_line_size = fields[0].integer.value;
    731	hpx0.latency_timer   = fields[1].integer.value;
    732	hpx0.enable_serr     = fields[2].integer.value;
    733	hpx0.enable_perr     = fields[3].integer.value;
    734
    735	program_hpx_type0(dev, &hpx0);
    736
    737exit:
    738	kfree(buffer.pointer);
    739	return status;
    740}
    741
    742/* pci_acpi_program_hp_params
    743 *
    744 * @dev - the pci_dev for which we want parameters
    745 */
    746int pci_acpi_program_hp_params(struct pci_dev *dev)
    747{
    748	acpi_status status;
    749	acpi_handle handle, phandle;
    750	struct pci_bus *pbus;
    751
    752	if (acpi_pci_disabled)
    753		return -ENODEV;
    754
    755	handle = NULL;
    756	for (pbus = dev->bus; pbus; pbus = pbus->parent) {
    757		handle = acpi_pci_get_bridge_handle(pbus);
    758		if (handle)
    759			break;
    760	}
    761
    762	/*
    763	 * _HPP settings apply to all child buses, until another _HPP is
    764	 * encountered. If we don't find an _HPP for the input pci dev,
    765	 * look for it in the parent device scope since that would apply to
    766	 * this pci dev.
    767	 */
    768	while (handle) {
    769		status = acpi_run_hpx(dev, handle);
    770		if (ACPI_SUCCESS(status))
    771			return 0;
    772		status = acpi_run_hpp(dev, handle);
    773		if (ACPI_SUCCESS(status))
    774			return 0;
    775		if (acpi_is_root_bridge(handle))
    776			break;
    777		status = acpi_get_parent(handle, &phandle);
    778		if (ACPI_FAILURE(status))
    779			break;
    780		handle = phandle;
    781	}
    782	return -ENODEV;
    783}
    784
    785/**
    786 * pciehp_is_native - Check whether a hotplug port is handled by the OS
    787 * @bridge: Hotplug port to check
    788 *
    789 * Returns true if the given @bridge is handled by the native PCIe hotplug
    790 * driver.
    791 */
    792bool pciehp_is_native(struct pci_dev *bridge)
    793{
    794	const struct pci_host_bridge *host;
    795	u32 slot_cap;
    796
    797	if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
    798		return false;
    799
    800	pcie_capability_read_dword(bridge, PCI_EXP_SLTCAP, &slot_cap);
    801	if (!(slot_cap & PCI_EXP_SLTCAP_HPC))
    802		return false;
    803
    804	if (pcie_ports_native)
    805		return true;
    806
    807	host = pci_find_host_bridge(bridge->bus);
    808	return host->native_pcie_hotplug;
    809}
    810
    811/**
    812 * shpchp_is_native - Check whether a hotplug port is handled by the OS
    813 * @bridge: Hotplug port to check
    814 *
    815 * Returns true if the given @bridge is handled by the native SHPC hotplug
    816 * driver.
    817 */
    818bool shpchp_is_native(struct pci_dev *bridge)
    819{
    820	return bridge->shpc_managed;
    821}
    822
    823/**
    824 * pci_acpi_wake_bus - Root bus wakeup notification fork function.
    825 * @context: Device wakeup context.
    826 */
    827static void pci_acpi_wake_bus(struct acpi_device_wakeup_context *context)
    828{
    829	struct acpi_device *adev;
    830	struct acpi_pci_root *root;
    831
    832	adev = container_of(context, struct acpi_device, wakeup.context);
    833	root = acpi_driver_data(adev);
    834	pci_pme_wakeup_bus(root->bus);
    835}
    836
    837/**
    838 * pci_acpi_wake_dev - PCI device wakeup notification work function.
    839 * @context: Device wakeup context.
    840 */
    841static void pci_acpi_wake_dev(struct acpi_device_wakeup_context *context)
    842{
    843	struct pci_dev *pci_dev;
    844
    845	pci_dev = to_pci_dev(context->dev);
    846
    847	if (pci_dev->pme_poll)
    848		pci_dev->pme_poll = false;
    849
    850	if (pci_dev->current_state == PCI_D3cold) {
    851		pci_wakeup_event(pci_dev);
    852		pm_request_resume(&pci_dev->dev);
    853		return;
    854	}
    855
    856	/* Clear PME Status if set. */
    857	if (pci_dev->pme_support)
    858		pci_check_pme_status(pci_dev);
    859
    860	pci_wakeup_event(pci_dev);
    861	pm_request_resume(&pci_dev->dev);
    862
    863	pci_pme_wakeup_bus(pci_dev->subordinate);
    864}
    865
    866/**
    867 * pci_acpi_add_bus_pm_notifier - Register PM notifier for root PCI bus.
    868 * @dev: PCI root bridge ACPI device.
    869 */
    870acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev)
    871{
    872	return acpi_add_pm_notifier(dev, NULL, pci_acpi_wake_bus);
    873}
    874
    875/**
    876 * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device.
    877 * @dev: ACPI device to add the notifier for.
    878 * @pci_dev: PCI device to check for the PME status if an event is signaled.
    879 */
    880acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
    881				     struct pci_dev *pci_dev)
    882{
    883	return acpi_add_pm_notifier(dev, &pci_dev->dev, pci_acpi_wake_dev);
    884}
    885
    886/*
    887 * _SxD returns the D-state with the highest power
    888 * (lowest D-state number) supported in the S-state "x".
    889 *
    890 * If the devices does not have a _PRW
    891 * (Power Resources for Wake) supporting system wakeup from "x"
    892 * then the OS is free to choose a lower power (higher number
    893 * D-state) than the return value from _SxD.
    894 *
    895 * But if _PRW is enabled at S-state "x", the OS
    896 * must not choose a power lower than _SxD --
    897 * unless the device has an _SxW method specifying
    898 * the lowest power (highest D-state number) the device
    899 * may enter while still able to wake the system.
    900 *
    901 * ie. depending on global OS policy:
    902 *
    903 * if (_PRW at S-state x)
    904 *	choose from highest power _SxD to lowest power _SxW
    905 * else // no _PRW at S-state x
    906 *	choose highest power _SxD or any lower power
    907 */
    908
    909pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
    910{
    911	int acpi_state, d_max;
    912
    913	if (pdev->no_d3cold)
    914		d_max = ACPI_STATE_D3_HOT;
    915	else
    916		d_max = ACPI_STATE_D3_COLD;
    917	acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL, d_max);
    918	if (acpi_state < 0)
    919		return PCI_POWER_ERROR;
    920
    921	switch (acpi_state) {
    922	case ACPI_STATE_D0:
    923		return PCI_D0;
    924	case ACPI_STATE_D1:
    925		return PCI_D1;
    926	case ACPI_STATE_D2:
    927		return PCI_D2;
    928	case ACPI_STATE_D3_HOT:
    929		return PCI_D3hot;
    930	case ACPI_STATE_D3_COLD:
    931		return PCI_D3cold;
    932	}
    933	return PCI_POWER_ERROR;
    934}
    935
    936static struct acpi_device *acpi_pci_find_companion(struct device *dev);
    937
    938void pci_set_acpi_fwnode(struct pci_dev *dev)
    939{
    940	if (!dev_fwnode(&dev->dev) && !pci_dev_is_added(dev))
    941		ACPI_COMPANION_SET(&dev->dev,
    942				   acpi_pci_find_companion(&dev->dev));
    943}
    944
    945/**
    946 * pci_dev_acpi_reset - do a function level reset using _RST method
    947 * @dev: device to reset
    948 * @probe: if true, return 0 if device supports _RST
    949 */
    950int pci_dev_acpi_reset(struct pci_dev *dev, bool probe)
    951{
    952	acpi_handle handle = ACPI_HANDLE(&dev->dev);
    953
    954	if (!handle || !acpi_has_method(handle, "_RST"))
    955		return -ENOTTY;
    956
    957	if (probe)
    958		return 0;
    959
    960	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_RST", NULL, NULL))) {
    961		pci_warn(dev, "ACPI _RST failed\n");
    962		return -ENOTTY;
    963	}
    964
    965	return 0;
    966}
    967
    968bool acpi_pci_power_manageable(struct pci_dev *dev)
    969{
    970	struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
    971
    972	return adev && acpi_device_power_manageable(adev);
    973}
    974
    975bool acpi_pci_bridge_d3(struct pci_dev *dev)
    976{
    977	struct pci_dev *rpdev;
    978	struct acpi_device *adev;
    979	acpi_status status;
    980	unsigned long long state;
    981	const union acpi_object *obj;
    982
    983	if (acpi_pci_disabled || !dev->is_hotplug_bridge)
    984		return false;
    985
    986	/* Assume D3 support if the bridge is power-manageable by ACPI. */
    987	if (acpi_pci_power_manageable(dev))
    988		return true;
    989
    990	rpdev = pcie_find_root_port(dev);
    991	if (!rpdev)
    992		return false;
    993
    994	adev = ACPI_COMPANION(&rpdev->dev);
    995	if (!adev)
    996		return false;
    997
    998	/*
    999	 * If the Root Port cannot signal wakeup signals at all, i.e., it
   1000	 * doesn't supply a wakeup GPE via _PRW, it cannot signal hotplug
   1001	 * events from low-power states including D3hot and D3cold.
   1002	 */
   1003	if (!adev->wakeup.flags.valid)
   1004		return false;
   1005
   1006	/*
   1007	 * If the Root Port cannot wake itself from D3hot or D3cold, we
   1008	 * can't use D3.
   1009	 */
   1010	status = acpi_evaluate_integer(adev->handle, "_S0W", NULL, &state);
   1011	if (ACPI_SUCCESS(status) && state < ACPI_STATE_D3_HOT)
   1012		return false;
   1013
   1014	/*
   1015	 * The "HotPlugSupportInD3" property in a Root Port _DSD indicates
   1016	 * the Port can signal hotplug events while in D3.  We assume any
   1017	 * bridges *below* that Root Port can also signal hotplug events
   1018	 * while in D3.
   1019	 */
   1020	if (!acpi_dev_get_property(adev, "HotPlugSupportInD3",
   1021				   ACPI_TYPE_INTEGER, &obj) &&
   1022	    obj->integer.value == 1)
   1023		return true;
   1024
   1025	return false;
   1026}
   1027
   1028int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
   1029{
   1030	struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
   1031	static const u8 state_conv[] = {
   1032		[PCI_D0] = ACPI_STATE_D0,
   1033		[PCI_D1] = ACPI_STATE_D1,
   1034		[PCI_D2] = ACPI_STATE_D2,
   1035		[PCI_D3hot] = ACPI_STATE_D3_HOT,
   1036		[PCI_D3cold] = ACPI_STATE_D3_COLD,
   1037	};
   1038	int error = -EINVAL;
   1039
   1040	/* If the ACPI device has _EJ0, ignore the device */
   1041	if (!adev || acpi_has_method(adev->handle, "_EJ0"))
   1042		return -ENODEV;
   1043
   1044	switch (state) {
   1045	case PCI_D3cold:
   1046		if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) ==
   1047				PM_QOS_FLAGS_ALL) {
   1048			error = -EBUSY;
   1049			break;
   1050		}
   1051		fallthrough;
   1052	case PCI_D0:
   1053	case PCI_D1:
   1054	case PCI_D2:
   1055	case PCI_D3hot:
   1056		error = acpi_device_set_power(adev, state_conv[state]);
   1057	}
   1058
   1059	if (!error)
   1060		pci_dbg(dev, "power state changed by ACPI to %s\n",
   1061		        acpi_power_state_string(adev->power.state));
   1062
   1063	return error;
   1064}
   1065
   1066pci_power_t acpi_pci_get_power_state(struct pci_dev *dev)
   1067{
   1068	struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
   1069	static const pci_power_t state_conv[] = {
   1070		[ACPI_STATE_D0]      = PCI_D0,
   1071		[ACPI_STATE_D1]      = PCI_D1,
   1072		[ACPI_STATE_D2]      = PCI_D2,
   1073		[ACPI_STATE_D3_HOT]  = PCI_D3hot,
   1074		[ACPI_STATE_D3_COLD] = PCI_D3cold,
   1075	};
   1076	int state;
   1077
   1078	if (!adev || !acpi_device_power_manageable(adev))
   1079		return PCI_UNKNOWN;
   1080
   1081	state = adev->power.state;
   1082	if (state == ACPI_STATE_UNKNOWN)
   1083		return PCI_UNKNOWN;
   1084
   1085	return state_conv[state];
   1086}
   1087
   1088void acpi_pci_refresh_power_state(struct pci_dev *dev)
   1089{
   1090	struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
   1091
   1092	if (adev && acpi_device_power_manageable(adev))
   1093		acpi_device_update_power(adev, NULL);
   1094}
   1095
   1096static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable)
   1097{
   1098	while (bus->parent) {
   1099		if (acpi_pm_device_can_wakeup(&bus->self->dev))
   1100			return acpi_pm_set_device_wakeup(&bus->self->dev, enable);
   1101
   1102		bus = bus->parent;
   1103	}
   1104
   1105	/* We have reached the root bus. */
   1106	if (bus->bridge) {
   1107		if (acpi_pm_device_can_wakeup(bus->bridge))
   1108			return acpi_pm_set_device_wakeup(bus->bridge, enable);
   1109	}
   1110	return 0;
   1111}
   1112
   1113int acpi_pci_wakeup(struct pci_dev *dev, bool enable)
   1114{
   1115	if (acpi_pci_disabled)
   1116		return 0;
   1117
   1118	if (acpi_pm_device_can_wakeup(&dev->dev))
   1119		return acpi_pm_set_device_wakeup(&dev->dev, enable);
   1120
   1121	return acpi_pci_propagate_wakeup(dev->bus, enable);
   1122}
   1123
   1124bool acpi_pci_need_resume(struct pci_dev *dev)
   1125{
   1126	struct acpi_device *adev;
   1127
   1128	if (acpi_pci_disabled)
   1129		return false;
   1130
   1131	/*
   1132	 * In some cases (eg. Samsung 305V4A) leaving a bridge in suspend over
   1133	 * system-wide suspend/resume confuses the platform firmware, so avoid
   1134	 * doing that.  According to Section 16.1.6 of ACPI 6.2, endpoint
   1135	 * devices are expected to be in D3 before invoking the S3 entry path
   1136	 * from the firmware, so they should not be affected by this issue.
   1137	 */
   1138	if (pci_is_bridge(dev) && acpi_target_system_state() != ACPI_STATE_S0)
   1139		return true;
   1140
   1141	adev = ACPI_COMPANION(&dev->dev);
   1142	if (!adev || !acpi_device_power_manageable(adev))
   1143		return false;
   1144
   1145	if (adev->wakeup.flags.valid &&
   1146	    device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count)
   1147		return true;
   1148
   1149	if (acpi_target_system_state() == ACPI_STATE_S0)
   1150		return false;
   1151
   1152	return !!adev->power.flags.dsw_present;
   1153}
   1154
   1155void acpi_pci_add_bus(struct pci_bus *bus)
   1156{
   1157	union acpi_object *obj;
   1158	struct pci_host_bridge *bridge;
   1159
   1160	if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge))
   1161		return;
   1162
   1163	acpi_pci_slot_enumerate(bus);
   1164	acpiphp_enumerate_slots(bus);
   1165
   1166	/*
   1167	 * For a host bridge, check its _DSM for function 8 and if
   1168	 * that is available, mark it in pci_host_bridge.
   1169	 */
   1170	if (!pci_is_root_bus(bus))
   1171		return;
   1172
   1173	obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 3,
   1174				DSM_PCI_POWER_ON_RESET_DELAY, NULL);
   1175	if (!obj)
   1176		return;
   1177
   1178	if (obj->type == ACPI_TYPE_INTEGER && obj->integer.value == 1) {
   1179		bridge = pci_find_host_bridge(bus);
   1180		bridge->ignore_reset_delay = 1;
   1181	}
   1182	ACPI_FREE(obj);
   1183}
   1184
   1185void acpi_pci_remove_bus(struct pci_bus *bus)
   1186{
   1187	if (acpi_pci_disabled || !bus->bridge)
   1188		return;
   1189
   1190	acpiphp_remove_slots(bus);
   1191	acpi_pci_slot_remove(bus);
   1192}
   1193
   1194/* ACPI bus type */
   1195
   1196
   1197static DECLARE_RWSEM(pci_acpi_companion_lookup_sem);
   1198static struct acpi_device *(*pci_acpi_find_companion_hook)(struct pci_dev *);
   1199
   1200/**
   1201 * pci_acpi_set_companion_lookup_hook - Set ACPI companion lookup callback.
   1202 * @func: ACPI companion lookup callback pointer or NULL.
   1203 *
   1204 * Set a special ACPI companion lookup callback for PCI devices whose companion
   1205 * objects in the ACPI namespace have _ADR with non-standard bus-device-function
   1206 * encodings.
   1207 *
   1208 * Return 0 on success or a negative error code on failure (in which case no
   1209 * changes are made).
   1210 *
   1211 * The caller is responsible for the appropriate ordering of the invocations of
   1212 * this function with respect to the enumeration of the PCI devices needing the
   1213 * callback installed by it.
   1214 */
   1215int pci_acpi_set_companion_lookup_hook(struct acpi_device *(*func)(struct pci_dev *))
   1216{
   1217	int ret;
   1218
   1219	if (!func)
   1220		return -EINVAL;
   1221
   1222	down_write(&pci_acpi_companion_lookup_sem);
   1223
   1224	if (pci_acpi_find_companion_hook) {
   1225		ret = -EBUSY;
   1226	} else {
   1227		pci_acpi_find_companion_hook = func;
   1228		ret = 0;
   1229	}
   1230
   1231	up_write(&pci_acpi_companion_lookup_sem);
   1232
   1233	return ret;
   1234}
   1235EXPORT_SYMBOL_GPL(pci_acpi_set_companion_lookup_hook);
   1236
   1237/**
   1238 * pci_acpi_clear_companion_lookup_hook - Clear ACPI companion lookup callback.
   1239 *
   1240 * Clear the special ACPI companion lookup callback previously set by
   1241 * pci_acpi_set_companion_lookup_hook().  Block until the last running instance
   1242 * of the callback returns before clearing it.
   1243 *
   1244 * The caller is responsible for the appropriate ordering of the invocations of
   1245 * this function with respect to the enumeration of the PCI devices needing the
   1246 * callback cleared by it.
   1247 */
   1248void pci_acpi_clear_companion_lookup_hook(void)
   1249{
   1250	down_write(&pci_acpi_companion_lookup_sem);
   1251
   1252	pci_acpi_find_companion_hook = NULL;
   1253
   1254	up_write(&pci_acpi_companion_lookup_sem);
   1255}
   1256EXPORT_SYMBOL_GPL(pci_acpi_clear_companion_lookup_hook);
   1257
   1258static struct acpi_device *acpi_pci_find_companion(struct device *dev)
   1259{
   1260	struct pci_dev *pci_dev = to_pci_dev(dev);
   1261	struct acpi_device *adev;
   1262	bool check_children;
   1263	u64 addr;
   1264
   1265	if (!dev->parent)
   1266		return NULL;
   1267
   1268	down_read(&pci_acpi_companion_lookup_sem);
   1269
   1270	adev = pci_acpi_find_companion_hook ?
   1271		pci_acpi_find_companion_hook(pci_dev) : NULL;
   1272
   1273	up_read(&pci_acpi_companion_lookup_sem);
   1274
   1275	if (adev)
   1276		return adev;
   1277
   1278	check_children = pci_is_bridge(pci_dev);
   1279	/* Please ref to ACPI spec for the syntax of _ADR */
   1280	addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
   1281	adev = acpi_find_child_device(ACPI_COMPANION(dev->parent), addr,
   1282				      check_children);
   1283
   1284	/*
   1285	 * There may be ACPI device objects in the ACPI namespace that are
   1286	 * children of the device object representing the host bridge, but don't
   1287	 * represent PCI devices.  Both _HID and _ADR may be present for them,
   1288	 * even though that is against the specification (for example, see
   1289	 * Section 6.1 of ACPI 6.3), but in many cases the _ADR returns 0 which
   1290	 * appears to indicate that they should not be taken into consideration
   1291	 * as potential companions of PCI devices on the root bus.
   1292	 *
   1293	 * To catch this special case, disregard the returned device object if
   1294	 * it has a valid _HID, addr is 0 and the PCI device at hand is on the
   1295	 * root bus.
   1296	 */
   1297	if (adev && adev->pnp.type.platform_id && !addr &&
   1298	    pci_is_root_bus(pci_dev->bus))
   1299		return NULL;
   1300
   1301	return adev;
   1302}
   1303
   1304/**
   1305 * pci_acpi_optimize_delay - optimize PCI D3 and D3cold delay from ACPI
   1306 * @pdev: the PCI device whose delay is to be updated
   1307 * @handle: ACPI handle of this device
   1308 *
   1309 * Update the d3hot_delay and d3cold_delay of a PCI device from the ACPI _DSM
   1310 * control method of either the device itself or the PCI host bridge.
   1311 *
   1312 * Function 8, "Reset Delay," applies to the entire hierarchy below a PCI
   1313 * host bridge.  If it returns one, the OS may assume that all devices in
   1314 * the hierarchy have already completed power-on reset delays.
   1315 *
   1316 * Function 9, "Device Readiness Durations," applies only to the object
   1317 * where it is located.  It returns delay durations required after various
   1318 * events if the device requires less time than the spec requires.  Delays
   1319 * from this function take precedence over the Reset Delay function.
   1320 *
   1321 * These _DSM functions are defined by the draft ECN of January 28, 2014,
   1322 * titled "ACPI additions for FW latency optimizations."
   1323 */
   1324static void pci_acpi_optimize_delay(struct pci_dev *pdev,
   1325				    acpi_handle handle)
   1326{
   1327	struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
   1328	int value;
   1329	union acpi_object *obj, *elements;
   1330
   1331	if (bridge->ignore_reset_delay)
   1332		pdev->d3cold_delay = 0;
   1333
   1334	obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 3,
   1335				DSM_PCI_DEVICE_READINESS_DURATIONS, NULL);
   1336	if (!obj)
   1337		return;
   1338
   1339	if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 5) {
   1340		elements = obj->package.elements;
   1341		if (elements[0].type == ACPI_TYPE_INTEGER) {
   1342			value = (int)elements[0].integer.value / 1000;
   1343			if (value < PCI_PM_D3COLD_WAIT)
   1344				pdev->d3cold_delay = value;
   1345		}
   1346		if (elements[3].type == ACPI_TYPE_INTEGER) {
   1347			value = (int)elements[3].integer.value / 1000;
   1348			if (value < PCI_PM_D3HOT_WAIT)
   1349				pdev->d3hot_delay = value;
   1350		}
   1351	}
   1352	ACPI_FREE(obj);
   1353}
   1354
   1355static void pci_acpi_set_external_facing(struct pci_dev *dev)
   1356{
   1357	u8 val;
   1358
   1359	if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
   1360		return;
   1361	if (device_property_read_u8(&dev->dev, "ExternalFacingPort", &val))
   1362		return;
   1363
   1364	/*
   1365	 * These root ports expose PCIe (including DMA) outside of the
   1366	 * system.  Everything downstream from them is external.
   1367	 */
   1368	if (val)
   1369		dev->external_facing = 1;
   1370}
   1371
   1372void pci_acpi_setup(struct device *dev, struct acpi_device *adev)
   1373{
   1374	struct pci_dev *pci_dev = to_pci_dev(dev);
   1375
   1376	pci_acpi_optimize_delay(pci_dev, adev->handle);
   1377	pci_acpi_set_external_facing(pci_dev);
   1378	pci_acpi_add_edr_notifier(pci_dev);
   1379
   1380	pci_acpi_add_pm_notifier(adev, pci_dev);
   1381	if (!adev->wakeup.flags.valid)
   1382		return;
   1383
   1384	device_set_wakeup_capable(dev, true);
   1385	/*
   1386	 * For bridges that can do D3 we enable wake automatically (as
   1387	 * we do for the power management itself in that case). The
   1388	 * reason is that the bridge may have additional methods such as
   1389	 * _DSW that need to be called.
   1390	 */
   1391	if (pci_dev->bridge_d3)
   1392		device_wakeup_enable(dev);
   1393
   1394	acpi_pci_wakeup(pci_dev, false);
   1395	acpi_device_power_add_dependent(adev, dev);
   1396
   1397	if (pci_is_bridge(pci_dev))
   1398		acpi_dev_power_up_children_with_adr(adev);
   1399}
   1400
   1401void pci_acpi_cleanup(struct device *dev, struct acpi_device *adev)
   1402{
   1403	struct pci_dev *pci_dev = to_pci_dev(dev);
   1404
   1405	pci_acpi_remove_edr_notifier(pci_dev);
   1406	pci_acpi_remove_pm_notifier(adev);
   1407	if (adev->wakeup.flags.valid) {
   1408		acpi_device_power_remove_dependent(adev, dev);
   1409		if (pci_dev->bridge_d3)
   1410			device_wakeup_disable(dev);
   1411
   1412		device_set_wakeup_capable(dev, false);
   1413	}
   1414}
   1415
   1416static struct fwnode_handle *(*pci_msi_get_fwnode_cb)(struct device *dev);
   1417
   1418/**
   1419 * pci_msi_register_fwnode_provider - Register callback to retrieve fwnode
   1420 * @fn:       Callback matching a device to a fwnode that identifies a PCI
   1421 *            MSI domain.
   1422 *
   1423 * This should be called by irqchip driver, which is the parent of
   1424 * the MSI domain to provide callback interface to query fwnode.
   1425 */
   1426void
   1427pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *))
   1428{
   1429	pci_msi_get_fwnode_cb = fn;
   1430}
   1431
   1432/**
   1433 * pci_host_bridge_acpi_msi_domain - Retrieve MSI domain of a PCI host bridge
   1434 * @bus:      The PCI host bridge bus.
   1435 *
   1436 * This function uses the callback function registered by
   1437 * pci_msi_register_fwnode_provider() to retrieve the irq_domain with
   1438 * type DOMAIN_BUS_PCI_MSI of the specified host bridge bus.
   1439 * This returns NULL on error or when the domain is not found.
   1440 */
   1441struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus)
   1442{
   1443	struct fwnode_handle *fwnode;
   1444
   1445	if (!pci_msi_get_fwnode_cb)
   1446		return NULL;
   1447
   1448	fwnode = pci_msi_get_fwnode_cb(&bus->dev);
   1449	if (!fwnode)
   1450		return NULL;
   1451
   1452	return irq_find_matching_fwnode(fwnode, DOMAIN_BUS_PCI_MSI);
   1453}
   1454
   1455static int __init acpi_pci_init(void)
   1456{
   1457	if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) {
   1458		pr_info("ACPI FADT declares the system doesn't support MSI, so disable it\n");
   1459		pci_no_msi();
   1460	}
   1461
   1462	if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
   1463		pr_info("ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n");
   1464		pcie_no_aspm();
   1465	}
   1466
   1467	if (acpi_pci_disabled)
   1468		return 0;
   1469
   1470	acpi_pci_slot_init();
   1471	acpiphp_init();
   1472
   1473	return 0;
   1474}
   1475arch_initcall(acpi_pci_init);