cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

iort.c (43829B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) 2016, Semihalf
      4 *	Author: Tomasz Nowicki <tn@semihalf.com>
      5 *
      6 * This file implements early detection/parsing of I/O mapping
      7 * reported to OS through firmware via I/O Remapping Table (IORT)
      8 * IORT document number: ARM DEN 0049A
      9 */
     10
     11#define pr_fmt(fmt)	"ACPI: IORT: " fmt
     12
     13#include <linux/acpi_iort.h>
     14#include <linux/bitfield.h>
     15#include <linux/iommu.h>
     16#include <linux/kernel.h>
     17#include <linux/list.h>
     18#include <linux/pci.h>
     19#include <linux/platform_device.h>
     20#include <linux/slab.h>
     21#include <linux/dma-map-ops.h>
     22
     23#define IORT_TYPE_MASK(type)	(1 << (type))
     24#define IORT_MSI_TYPE		(1 << ACPI_IORT_NODE_ITS_GROUP)
     25#define IORT_IOMMU_TYPE		((1 << ACPI_IORT_NODE_SMMU) |	\
     26				(1 << ACPI_IORT_NODE_SMMU_V3))
     27
     28struct iort_its_msi_chip {
     29	struct list_head	list;
     30	struct fwnode_handle	*fw_node;
     31	phys_addr_t		base_addr;
     32	u32			translation_id;
     33};
     34
     35struct iort_fwnode {
     36	struct list_head list;
     37	struct acpi_iort_node *iort_node;
     38	struct fwnode_handle *fwnode;
     39};
     40static LIST_HEAD(iort_fwnode_list);
     41static DEFINE_SPINLOCK(iort_fwnode_lock);
     42
     43/**
     44 * iort_set_fwnode() - Create iort_fwnode and use it to register
     45 *		       iommu data in the iort_fwnode_list
     46 *
     47 * @iort_node: IORT table node associated with the IOMMU
     48 * @fwnode: fwnode associated with the IORT node
     49 *
     50 * Returns: 0 on success
     51 *          <0 on failure
     52 */
     53static inline int iort_set_fwnode(struct acpi_iort_node *iort_node,
     54				  struct fwnode_handle *fwnode)
     55{
     56	struct iort_fwnode *np;
     57
     58	np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC);
     59
     60	if (WARN_ON(!np))
     61		return -ENOMEM;
     62
     63	INIT_LIST_HEAD(&np->list);
     64	np->iort_node = iort_node;
     65	np->fwnode = fwnode;
     66
     67	spin_lock(&iort_fwnode_lock);
     68	list_add_tail(&np->list, &iort_fwnode_list);
     69	spin_unlock(&iort_fwnode_lock);
     70
     71	return 0;
     72}
     73
     74/**
     75 * iort_get_fwnode() - Retrieve fwnode associated with an IORT node
     76 *
     77 * @node: IORT table node to be looked-up
     78 *
     79 * Returns: fwnode_handle pointer on success, NULL on failure
     80 */
     81static inline struct fwnode_handle *iort_get_fwnode(
     82			struct acpi_iort_node *node)
     83{
     84	struct iort_fwnode *curr;
     85	struct fwnode_handle *fwnode = NULL;
     86
     87	spin_lock(&iort_fwnode_lock);
     88	list_for_each_entry(curr, &iort_fwnode_list, list) {
     89		if (curr->iort_node == node) {
     90			fwnode = curr->fwnode;
     91			break;
     92		}
     93	}
     94	spin_unlock(&iort_fwnode_lock);
     95
     96	return fwnode;
     97}
     98
     99/**
    100 * iort_delete_fwnode() - Delete fwnode associated with an IORT node
    101 *
    102 * @node: IORT table node associated with fwnode to delete
    103 */
    104static inline void iort_delete_fwnode(struct acpi_iort_node *node)
    105{
    106	struct iort_fwnode *curr, *tmp;
    107
    108	spin_lock(&iort_fwnode_lock);
    109	list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) {
    110		if (curr->iort_node == node) {
    111			list_del(&curr->list);
    112			kfree(curr);
    113			break;
    114		}
    115	}
    116	spin_unlock(&iort_fwnode_lock);
    117}
    118
    119/**
    120 * iort_get_iort_node() - Retrieve iort_node associated with an fwnode
    121 *
    122 * @fwnode: fwnode associated with device to be looked-up
    123 *
    124 * Returns: iort_node pointer on success, NULL on failure
    125 */
    126static inline struct acpi_iort_node *iort_get_iort_node(
    127			struct fwnode_handle *fwnode)
    128{
    129	struct iort_fwnode *curr;
    130	struct acpi_iort_node *iort_node = NULL;
    131
    132	spin_lock(&iort_fwnode_lock);
    133	list_for_each_entry(curr, &iort_fwnode_list, list) {
    134		if (curr->fwnode == fwnode) {
    135			iort_node = curr->iort_node;
    136			break;
    137		}
    138	}
    139	spin_unlock(&iort_fwnode_lock);
    140
    141	return iort_node;
    142}
    143
    144typedef acpi_status (*iort_find_node_callback)
    145	(struct acpi_iort_node *node, void *context);
    146
    147/* Root pointer to the mapped IORT table */
    148static struct acpi_table_header *iort_table;
    149
    150static LIST_HEAD(iort_msi_chip_list);
    151static DEFINE_SPINLOCK(iort_msi_chip_lock);
    152
    153/**
    154 * iort_register_domain_token() - register domain token along with related
    155 * ITS ID and base address to the list from where we can get it back later on.
    156 * @trans_id: ITS ID.
    157 * @base: ITS base address.
    158 * @fw_node: Domain token.
    159 *
    160 * Returns: 0 on success, -ENOMEM if no memory when allocating list element
    161 */
    162int iort_register_domain_token(int trans_id, phys_addr_t base,
    163			       struct fwnode_handle *fw_node)
    164{
    165	struct iort_its_msi_chip *its_msi_chip;
    166
    167	its_msi_chip = kzalloc(sizeof(*its_msi_chip), GFP_KERNEL);
    168	if (!its_msi_chip)
    169		return -ENOMEM;
    170
    171	its_msi_chip->fw_node = fw_node;
    172	its_msi_chip->translation_id = trans_id;
    173	its_msi_chip->base_addr = base;
    174
    175	spin_lock(&iort_msi_chip_lock);
    176	list_add(&its_msi_chip->list, &iort_msi_chip_list);
    177	spin_unlock(&iort_msi_chip_lock);
    178
    179	return 0;
    180}
    181
    182/**
    183 * iort_deregister_domain_token() - Deregister domain token based on ITS ID
    184 * @trans_id: ITS ID.
    185 *
    186 * Returns: none.
    187 */
    188void iort_deregister_domain_token(int trans_id)
    189{
    190	struct iort_its_msi_chip *its_msi_chip, *t;
    191
    192	spin_lock(&iort_msi_chip_lock);
    193	list_for_each_entry_safe(its_msi_chip, t, &iort_msi_chip_list, list) {
    194		if (its_msi_chip->translation_id == trans_id) {
    195			list_del(&its_msi_chip->list);
    196			kfree(its_msi_chip);
    197			break;
    198		}
    199	}
    200	spin_unlock(&iort_msi_chip_lock);
    201}
    202
    203/**
    204 * iort_find_domain_token() - Find domain token based on given ITS ID
    205 * @trans_id: ITS ID.
    206 *
    207 * Returns: domain token when find on the list, NULL otherwise
    208 */
    209struct fwnode_handle *iort_find_domain_token(int trans_id)
    210{
    211	struct fwnode_handle *fw_node = NULL;
    212	struct iort_its_msi_chip *its_msi_chip;
    213
    214	spin_lock(&iort_msi_chip_lock);
    215	list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
    216		if (its_msi_chip->translation_id == trans_id) {
    217			fw_node = its_msi_chip->fw_node;
    218			break;
    219		}
    220	}
    221	spin_unlock(&iort_msi_chip_lock);
    222
    223	return fw_node;
    224}
    225
    226static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type,
    227					     iort_find_node_callback callback,
    228					     void *context)
    229{
    230	struct acpi_iort_node *iort_node, *iort_end;
    231	struct acpi_table_iort *iort;
    232	int i;
    233
    234	if (!iort_table)
    235		return NULL;
    236
    237	/* Get the first IORT node */
    238	iort = (struct acpi_table_iort *)iort_table;
    239	iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
    240				 iort->node_offset);
    241	iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
    242				iort_table->length);
    243
    244	for (i = 0; i < iort->node_count; i++) {
    245		if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
    246			       "IORT node pointer overflows, bad table!\n"))
    247			return NULL;
    248
    249		if (iort_node->type == type &&
    250		    ACPI_SUCCESS(callback(iort_node, context)))
    251			return iort_node;
    252
    253		iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
    254					 iort_node->length);
    255	}
    256
    257	return NULL;
    258}
    259
    260static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
    261					    void *context)
    262{
    263	struct device *dev = context;
    264	acpi_status status = AE_NOT_FOUND;
    265
    266	if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) {
    267		struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
    268		struct acpi_device *adev;
    269		struct acpi_iort_named_component *ncomp;
    270		struct device *nc_dev = dev;
    271
    272		/*
    273		 * Walk the device tree to find a device with an
    274		 * ACPI companion; there is no point in scanning
    275		 * IORT for a device matching a named component if
    276		 * the device does not have an ACPI companion to
    277		 * start with.
    278		 */
    279		do {
    280			adev = ACPI_COMPANION(nc_dev);
    281			if (adev)
    282				break;
    283
    284			nc_dev = nc_dev->parent;
    285		} while (nc_dev);
    286
    287		if (!adev)
    288			goto out;
    289
    290		status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
    291		if (ACPI_FAILURE(status)) {
    292			dev_warn(nc_dev, "Can't get device full path name\n");
    293			goto out;
    294		}
    295
    296		ncomp = (struct acpi_iort_named_component *)node->node_data;
    297		status = !strcmp(ncomp->device_name, buf.pointer) ?
    298							AE_OK : AE_NOT_FOUND;
    299		acpi_os_free(buf.pointer);
    300	} else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
    301		struct acpi_iort_root_complex *pci_rc;
    302		struct pci_bus *bus;
    303
    304		bus = to_pci_bus(dev);
    305		pci_rc = (struct acpi_iort_root_complex *)node->node_data;
    306
    307		/*
    308		 * It is assumed that PCI segment numbers maps one-to-one
    309		 * with root complexes. Each segment number can represent only
    310		 * one root complex.
    311		 */
    312		status = pci_rc->pci_segment_number == pci_domain_nr(bus) ?
    313							AE_OK : AE_NOT_FOUND;
    314	}
    315out:
    316	return status;
    317}
    318
    319static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
    320		       u32 *rid_out, bool check_overlap)
    321{
    322	/* Single mapping does not care for input id */
    323	if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
    324		if (type == ACPI_IORT_NODE_NAMED_COMPONENT ||
    325		    type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
    326			*rid_out = map->output_base;
    327			return 0;
    328		}
    329
    330		pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n",
    331			map, type);
    332		return -ENXIO;
    333	}
    334
    335	if (rid_in < map->input_base ||
    336	    (rid_in > map->input_base + map->id_count))
    337		return -ENXIO;
    338
    339	if (check_overlap) {
    340		/*
    341		 * We already found a mapping for this input ID at the end of
    342		 * another region. If it coincides with the start of this
    343		 * region, we assume the prior match was due to the off-by-1
    344		 * issue mentioned below, and allow it to be superseded.
    345		 * Otherwise, things are *really* broken, and we just disregard
    346		 * duplicate matches entirely to retain compatibility.
    347		 */
    348		pr_err(FW_BUG "[map %p] conflicting mapping for input ID 0x%x\n",
    349		       map, rid_in);
    350		if (rid_in != map->input_base)
    351			return -ENXIO;
    352
    353		pr_err(FW_BUG "applying workaround.\n");
    354	}
    355
    356	*rid_out = map->output_base + (rid_in - map->input_base);
    357
    358	/*
    359	 * Due to confusion regarding the meaning of the id_count field (which
    360	 * carries the number of IDs *minus 1*), we may have to disregard this
    361	 * match if it is at the end of the range, and overlaps with the start
    362	 * of another one.
    363	 */
    364	if (map->id_count > 0 && rid_in == map->input_base + map->id_count)
    365		return -EAGAIN;
    366	return 0;
    367}
    368
    369static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
    370					       u32 *id_out, int index)
    371{
    372	struct acpi_iort_node *parent;
    373	struct acpi_iort_id_mapping *map;
    374
    375	if (!node->mapping_offset || !node->mapping_count ||
    376				     index >= node->mapping_count)
    377		return NULL;
    378
    379	map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
    380			   node->mapping_offset + index * sizeof(*map));
    381
    382	/* Firmware bug! */
    383	if (!map->output_reference) {
    384		pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
    385		       node, node->type);
    386		return NULL;
    387	}
    388
    389	parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
    390			       map->output_reference);
    391
    392	if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
    393		if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
    394		    node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX ||
    395		    node->type == ACPI_IORT_NODE_SMMU_V3 ||
    396		    node->type == ACPI_IORT_NODE_PMCG) {
    397			*id_out = map->output_base;
    398			return parent;
    399		}
    400	}
    401
    402	return NULL;
    403}
    404
    405static int iort_get_id_mapping_index(struct acpi_iort_node *node)
    406{
    407	struct acpi_iort_smmu_v3 *smmu;
    408	struct acpi_iort_pmcg *pmcg;
    409
    410	switch (node->type) {
    411	case ACPI_IORT_NODE_SMMU_V3:
    412		/*
    413		 * SMMUv3 dev ID mapping index was introduced in revision 1
    414		 * table, not available in revision 0
    415		 */
    416		if (node->revision < 1)
    417			return -EINVAL;
    418
    419		smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
    420		/*
    421		 * ID mapping index is only ignored if all interrupts are
    422		 * GSIV based
    423		 */
    424		if (smmu->event_gsiv && smmu->pri_gsiv && smmu->gerr_gsiv
    425		    && smmu->sync_gsiv)
    426			return -EINVAL;
    427
    428		if (smmu->id_mapping_index >= node->mapping_count) {
    429			pr_err(FW_BUG "[node %p type %d] ID mapping index overflows valid mappings\n",
    430			       node, node->type);
    431			return -EINVAL;
    432		}
    433
    434		return smmu->id_mapping_index;
    435	case ACPI_IORT_NODE_PMCG:
    436		pmcg = (struct acpi_iort_pmcg *)node->node_data;
    437		if (pmcg->overflow_gsiv || node->mapping_count == 0)
    438			return -EINVAL;
    439
    440		return 0;
    441	default:
    442		return -EINVAL;
    443	}
    444}
    445
    446static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node,
    447					       u32 id_in, u32 *id_out,
    448					       u8 type_mask)
    449{
    450	u32 id = id_in;
    451
    452	/* Parse the ID mapping tree to find specified node type */
    453	while (node) {
    454		struct acpi_iort_id_mapping *map;
    455		int i, index, rc = 0;
    456		u32 out_ref = 0, map_id = id;
    457
    458		if (IORT_TYPE_MASK(node->type) & type_mask) {
    459			if (id_out)
    460				*id_out = id;
    461			return node;
    462		}
    463
    464		if (!node->mapping_offset || !node->mapping_count)
    465			goto fail_map;
    466
    467		map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
    468				   node->mapping_offset);
    469
    470		/* Firmware bug! */
    471		if (!map->output_reference) {
    472			pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
    473			       node, node->type);
    474			goto fail_map;
    475		}
    476
    477		/*
    478		 * Get the special ID mapping index (if any) and skip its
    479		 * associated ID map to prevent erroneous multi-stage
    480		 * IORT ID translations.
    481		 */
    482		index = iort_get_id_mapping_index(node);
    483
    484		/* Do the ID translation */
    485		for (i = 0; i < node->mapping_count; i++, map++) {
    486			/* if it is special mapping index, skip it */
    487			if (i == index)
    488				continue;
    489
    490			rc = iort_id_map(map, node->type, map_id, &id, out_ref);
    491			if (!rc)
    492				break;
    493			if (rc == -EAGAIN)
    494				out_ref = map->output_reference;
    495		}
    496
    497		if (i == node->mapping_count && !out_ref)
    498			goto fail_map;
    499
    500		node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
    501				    rc ? out_ref : map->output_reference);
    502	}
    503
    504fail_map:
    505	/* Map input ID to output ID unchanged on mapping failure */
    506	if (id_out)
    507		*id_out = id_in;
    508
    509	return NULL;
    510}
    511
    512static struct acpi_iort_node *iort_node_map_platform_id(
    513		struct acpi_iort_node *node, u32 *id_out, u8 type_mask,
    514		int index)
    515{
    516	struct acpi_iort_node *parent;
    517	u32 id;
    518
    519	/* step 1: retrieve the initial dev id */
    520	parent = iort_node_get_id(node, &id, index);
    521	if (!parent)
    522		return NULL;
    523
    524	/*
    525	 * optional step 2: map the initial dev id if its parent is not
    526	 * the target type we want, map it again for the use cases such
    527	 * as NC (named component) -> SMMU -> ITS. If the type is matched,
    528	 * return the initial dev id and its parent pointer directly.
    529	 */
    530	if (!(IORT_TYPE_MASK(parent->type) & type_mask))
    531		parent = iort_node_map_id(parent, id, id_out, type_mask);
    532	else
    533		if (id_out)
    534			*id_out = id;
    535
    536	return parent;
    537}
    538
    539static struct acpi_iort_node *iort_find_dev_node(struct device *dev)
    540{
    541	struct pci_bus *pbus;
    542
    543	if (!dev_is_pci(dev)) {
    544		struct acpi_iort_node *node;
    545		/*
    546		 * scan iort_fwnode_list to see if it's an iort platform
    547		 * device (such as SMMU, PMCG),its iort node already cached
    548		 * and associated with fwnode when iort platform devices
    549		 * were initialized.
    550		 */
    551		node = iort_get_iort_node(dev->fwnode);
    552		if (node)
    553			return node;
    554		/*
    555		 * if not, then it should be a platform device defined in
    556		 * DSDT/SSDT (with Named Component node in IORT)
    557		 */
    558		return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
    559				      iort_match_node_callback, dev);
    560	}
    561
    562	pbus = to_pci_dev(dev)->bus;
    563
    564	return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
    565			      iort_match_node_callback, &pbus->dev);
    566}
    567
    568/**
    569 * iort_msi_map_id() - Map a MSI input ID for a device
    570 * @dev: The device for which the mapping is to be done.
    571 * @input_id: The device input ID.
    572 *
    573 * Returns: mapped MSI ID on success, input ID otherwise
    574 */
    575u32 iort_msi_map_id(struct device *dev, u32 input_id)
    576{
    577	struct acpi_iort_node *node;
    578	u32 dev_id;
    579
    580	node = iort_find_dev_node(dev);
    581	if (!node)
    582		return input_id;
    583
    584	iort_node_map_id(node, input_id, &dev_id, IORT_MSI_TYPE);
    585	return dev_id;
    586}
    587
    588/**
    589 * iort_pmsi_get_dev_id() - Get the device id for a device
    590 * @dev: The device for which the mapping is to be done.
    591 * @dev_id: The device ID found.
    592 *
    593 * Returns: 0 for successful find a dev id, -ENODEV on error
    594 */
    595int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
    596{
    597	int i, index;
    598	struct acpi_iort_node *node;
    599
    600	node = iort_find_dev_node(dev);
    601	if (!node)
    602		return -ENODEV;
    603
    604	index = iort_get_id_mapping_index(node);
    605	/* if there is a valid index, go get the dev_id directly */
    606	if (index >= 0) {
    607		if (iort_node_get_id(node, dev_id, index))
    608			return 0;
    609	} else {
    610		for (i = 0; i < node->mapping_count; i++) {
    611			if (iort_node_map_platform_id(node, dev_id,
    612						      IORT_MSI_TYPE, i))
    613				return 0;
    614		}
    615	}
    616
    617	return -ENODEV;
    618}
    619
    620static int __maybe_unused iort_find_its_base(u32 its_id, phys_addr_t *base)
    621{
    622	struct iort_its_msi_chip *its_msi_chip;
    623	int ret = -ENODEV;
    624
    625	spin_lock(&iort_msi_chip_lock);
    626	list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
    627		if (its_msi_chip->translation_id == its_id) {
    628			*base = its_msi_chip->base_addr;
    629			ret = 0;
    630			break;
    631		}
    632	}
    633	spin_unlock(&iort_msi_chip_lock);
    634
    635	return ret;
    636}
    637
    638/**
    639 * iort_dev_find_its_id() - Find the ITS identifier for a device
    640 * @dev: The device.
    641 * @id: Device's ID
    642 * @idx: Index of the ITS identifier list.
    643 * @its_id: ITS identifier.
    644 *
    645 * Returns: 0 on success, appropriate error value otherwise
    646 */
    647static int iort_dev_find_its_id(struct device *dev, u32 id,
    648				unsigned int idx, int *its_id)
    649{
    650	struct acpi_iort_its_group *its;
    651	struct acpi_iort_node *node;
    652
    653	node = iort_find_dev_node(dev);
    654	if (!node)
    655		return -ENXIO;
    656
    657	node = iort_node_map_id(node, id, NULL, IORT_MSI_TYPE);
    658	if (!node)
    659		return -ENXIO;
    660
    661	/* Move to ITS specific data */
    662	its = (struct acpi_iort_its_group *)node->node_data;
    663	if (idx >= its->its_count) {
    664		dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n",
    665			idx, its->its_count);
    666		return -ENXIO;
    667	}
    668
    669	*its_id = its->identifiers[idx];
    670	return 0;
    671}
    672
    673/**
    674 * iort_get_device_domain() - Find MSI domain related to a device
    675 * @dev: The device.
    676 * @id: Requester ID for the device.
    677 * @bus_token: irq domain bus token.
    678 *
    679 * Returns: the MSI domain for this device, NULL otherwise
    680 */
    681struct irq_domain *iort_get_device_domain(struct device *dev, u32 id,
    682					  enum irq_domain_bus_token bus_token)
    683{
    684	struct fwnode_handle *handle;
    685	int its_id;
    686
    687	if (iort_dev_find_its_id(dev, id, 0, &its_id))
    688		return NULL;
    689
    690	handle = iort_find_domain_token(its_id);
    691	if (!handle)
    692		return NULL;
    693
    694	return irq_find_matching_fwnode(handle, bus_token);
    695}
    696
    697static void iort_set_device_domain(struct device *dev,
    698				   struct acpi_iort_node *node)
    699{
    700	struct acpi_iort_its_group *its;
    701	struct acpi_iort_node *msi_parent;
    702	struct acpi_iort_id_mapping *map;
    703	struct fwnode_handle *iort_fwnode;
    704	struct irq_domain *domain;
    705	int index;
    706
    707	index = iort_get_id_mapping_index(node);
    708	if (index < 0)
    709		return;
    710
    711	map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
    712			   node->mapping_offset + index * sizeof(*map));
    713
    714	/* Firmware bug! */
    715	if (!map->output_reference ||
    716	    !(map->flags & ACPI_IORT_ID_SINGLE_MAPPING)) {
    717		pr_err(FW_BUG "[node %p type %d] Invalid MSI mapping\n",
    718		       node, node->type);
    719		return;
    720	}
    721
    722	msi_parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
    723				  map->output_reference);
    724
    725	if (!msi_parent || msi_parent->type != ACPI_IORT_NODE_ITS_GROUP)
    726		return;
    727
    728	/* Move to ITS specific data */
    729	its = (struct acpi_iort_its_group *)msi_parent->node_data;
    730
    731	iort_fwnode = iort_find_domain_token(its->identifiers[0]);
    732	if (!iort_fwnode)
    733		return;
    734
    735	domain = irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
    736	if (domain)
    737		dev_set_msi_domain(dev, domain);
    738}
    739
    740/**
    741 * iort_get_platform_device_domain() - Find MSI domain related to a
    742 * platform device
    743 * @dev: the dev pointer associated with the platform device
    744 *
    745 * Returns: the MSI domain for this device, NULL otherwise
    746 */
    747static struct irq_domain *iort_get_platform_device_domain(struct device *dev)
    748{
    749	struct acpi_iort_node *node, *msi_parent = NULL;
    750	struct fwnode_handle *iort_fwnode;
    751	struct acpi_iort_its_group *its;
    752	int i;
    753
    754	/* find its associated iort node */
    755	node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
    756			      iort_match_node_callback, dev);
    757	if (!node)
    758		return NULL;
    759
    760	/* then find its msi parent node */
    761	for (i = 0; i < node->mapping_count; i++) {
    762		msi_parent = iort_node_map_platform_id(node, NULL,
    763						       IORT_MSI_TYPE, i);
    764		if (msi_parent)
    765			break;
    766	}
    767
    768	if (!msi_parent)
    769		return NULL;
    770
    771	/* Move to ITS specific data */
    772	its = (struct acpi_iort_its_group *)msi_parent->node_data;
    773
    774	iort_fwnode = iort_find_domain_token(its->identifiers[0]);
    775	if (!iort_fwnode)
    776		return NULL;
    777
    778	return irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
    779}
    780
    781void acpi_configure_pmsi_domain(struct device *dev)
    782{
    783	struct irq_domain *msi_domain;
    784
    785	msi_domain = iort_get_platform_device_domain(dev);
    786	if (msi_domain)
    787		dev_set_msi_domain(dev, msi_domain);
    788}
    789
    790#ifdef CONFIG_IOMMU_API
    791static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev)
    792{
    793	struct acpi_iort_node *iommu;
    794	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
    795
    796	iommu = iort_get_iort_node(fwspec->iommu_fwnode);
    797
    798	if (iommu && (iommu->type == ACPI_IORT_NODE_SMMU_V3)) {
    799		struct acpi_iort_smmu_v3 *smmu;
    800
    801		smmu = (struct acpi_iort_smmu_v3 *)iommu->node_data;
    802		if (smmu->model == ACPI_IORT_SMMU_V3_HISILICON_HI161X)
    803			return iommu;
    804	}
    805
    806	return NULL;
    807}
    808
    809/**
    810 * iort_iommu_msi_get_resv_regions - Reserved region driver helper
    811 * @dev: Device from iommu_get_resv_regions()
    812 * @head: Reserved region list from iommu_get_resv_regions()
    813 *
    814 * Returns: Number of msi reserved regions on success (0 if platform
    815 *          doesn't require the reservation or no associated msi regions),
    816 *          appropriate error value otherwise. The ITS interrupt translation
    817 *          spaces (ITS_base + SZ_64K, SZ_64K) associated with the device
    818 *          are the msi reserved regions.
    819 */
    820int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
    821{
    822	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
    823	struct acpi_iort_its_group *its;
    824	struct acpi_iort_node *iommu_node, *its_node = NULL;
    825	int i, resv = 0;
    826
    827	iommu_node = iort_get_msi_resv_iommu(dev);
    828	if (!iommu_node)
    829		return 0;
    830
    831	/*
    832	 * Current logic to reserve ITS regions relies on HW topologies
    833	 * where a given PCI or named component maps its IDs to only one
    834	 * ITS group; if a PCI or named component can map its IDs to
    835	 * different ITS groups through IORT mappings this function has
    836	 * to be reworked to ensure we reserve regions for all ITS groups
    837	 * a given PCI or named component may map IDs to.
    838	 */
    839
    840	for (i = 0; i < fwspec->num_ids; i++) {
    841		its_node = iort_node_map_id(iommu_node,
    842					fwspec->ids[i],
    843					NULL, IORT_MSI_TYPE);
    844		if (its_node)
    845			break;
    846	}
    847
    848	if (!its_node)
    849		return 0;
    850
    851	/* Move to ITS specific data */
    852	its = (struct acpi_iort_its_group *)its_node->node_data;
    853
    854	for (i = 0; i < its->its_count; i++) {
    855		phys_addr_t base;
    856
    857		if (!iort_find_its_base(its->identifiers[i], &base)) {
    858			int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
    859			struct iommu_resv_region *region;
    860
    861			region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K,
    862							 prot, IOMMU_RESV_MSI);
    863			if (region) {
    864				list_add_tail(&region->list, head);
    865				resv++;
    866			}
    867		}
    868	}
    869
    870	return (resv == its->its_count) ? resv : -ENODEV;
    871}
    872
    873static inline bool iort_iommu_driver_enabled(u8 type)
    874{
    875	switch (type) {
    876	case ACPI_IORT_NODE_SMMU_V3:
    877		return IS_ENABLED(CONFIG_ARM_SMMU_V3);
    878	case ACPI_IORT_NODE_SMMU:
    879		return IS_ENABLED(CONFIG_ARM_SMMU);
    880	default:
    881		pr_warn("IORT node type %u does not describe an SMMU\n", type);
    882		return false;
    883	}
    884}
    885
    886static bool iort_pci_rc_supports_ats(struct acpi_iort_node *node)
    887{
    888	struct acpi_iort_root_complex *pci_rc;
    889
    890	pci_rc = (struct acpi_iort_root_complex *)node->node_data;
    891	return pci_rc->ats_attribute & ACPI_IORT_ATS_SUPPORTED;
    892}
    893
    894static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node,
    895			    u32 streamid)
    896{
    897	const struct iommu_ops *ops;
    898	struct fwnode_handle *iort_fwnode;
    899
    900	if (!node)
    901		return -ENODEV;
    902
    903	iort_fwnode = iort_get_fwnode(node);
    904	if (!iort_fwnode)
    905		return -ENODEV;
    906
    907	/*
    908	 * If the ops look-up fails, this means that either
    909	 * the SMMU drivers have not been probed yet or that
    910	 * the SMMU drivers are not built in the kernel;
    911	 * Depending on whether the SMMU drivers are built-in
    912	 * in the kernel or not, defer the IOMMU configuration
    913	 * or just abort it.
    914	 */
    915	ops = iommu_ops_from_fwnode(iort_fwnode);
    916	if (!ops)
    917		return iort_iommu_driver_enabled(node->type) ?
    918		       -EPROBE_DEFER : -ENODEV;
    919
    920	return acpi_iommu_fwspec_init(dev, streamid, iort_fwnode, ops);
    921}
    922
    923struct iort_pci_alias_info {
    924	struct device *dev;
    925	struct acpi_iort_node *node;
    926};
    927
    928static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
    929{
    930	struct iort_pci_alias_info *info = data;
    931	struct acpi_iort_node *parent;
    932	u32 streamid;
    933
    934	parent = iort_node_map_id(info->node, alias, &streamid,
    935				  IORT_IOMMU_TYPE);
    936	return iort_iommu_xlate(info->dev, parent, streamid);
    937}
    938
    939static void iort_named_component_init(struct device *dev,
    940				      struct acpi_iort_node *node)
    941{
    942	struct property_entry props[3] = {};
    943	struct acpi_iort_named_component *nc;
    944
    945	nc = (struct acpi_iort_named_component *)node->node_data;
    946	props[0] = PROPERTY_ENTRY_U32("pasid-num-bits",
    947				      FIELD_GET(ACPI_IORT_NC_PASID_BITS,
    948						nc->node_flags));
    949	if (nc->node_flags & ACPI_IORT_NC_STALL_SUPPORTED)
    950		props[1] = PROPERTY_ENTRY_BOOL("dma-can-stall");
    951
    952	if (device_create_managed_software_node(dev, props, NULL))
    953		dev_warn(dev, "Could not add device properties\n");
    954}
    955
    956static int iort_nc_iommu_map(struct device *dev, struct acpi_iort_node *node)
    957{
    958	struct acpi_iort_node *parent;
    959	int err = -ENODEV, i = 0;
    960	u32 streamid = 0;
    961
    962	do {
    963
    964		parent = iort_node_map_platform_id(node, &streamid,
    965						   IORT_IOMMU_TYPE,
    966						   i++);
    967
    968		if (parent)
    969			err = iort_iommu_xlate(dev, parent, streamid);
    970	} while (parent && !err);
    971
    972	return err;
    973}
    974
    975static int iort_nc_iommu_map_id(struct device *dev,
    976				struct acpi_iort_node *node,
    977				const u32 *in_id)
    978{
    979	struct acpi_iort_node *parent;
    980	u32 streamid;
    981
    982	parent = iort_node_map_id(node, *in_id, &streamid, IORT_IOMMU_TYPE);
    983	if (parent)
    984		return iort_iommu_xlate(dev, parent, streamid);
    985
    986	return -ENODEV;
    987}
    988
    989
    990/**
    991 * iort_iommu_configure_id - Set-up IOMMU configuration for a device.
    992 *
    993 * @dev: device to configure
    994 * @id_in: optional input id const value pointer
    995 *
    996 * Returns: 0 on success, <0 on failure
    997 */
    998int iort_iommu_configure_id(struct device *dev, const u32 *id_in)
    999{
   1000	struct acpi_iort_node *node;
   1001	int err = -ENODEV;
   1002
   1003	if (dev_is_pci(dev)) {
   1004		struct iommu_fwspec *fwspec;
   1005		struct pci_bus *bus = to_pci_dev(dev)->bus;
   1006		struct iort_pci_alias_info info = { .dev = dev };
   1007
   1008		node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
   1009				      iort_match_node_callback, &bus->dev);
   1010		if (!node)
   1011			return -ENODEV;
   1012
   1013		info.node = node;
   1014		err = pci_for_each_dma_alias(to_pci_dev(dev),
   1015					     iort_pci_iommu_init, &info);
   1016
   1017		fwspec = dev_iommu_fwspec_get(dev);
   1018		if (fwspec && iort_pci_rc_supports_ats(node))
   1019			fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
   1020	} else {
   1021		node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
   1022				      iort_match_node_callback, dev);
   1023		if (!node)
   1024			return -ENODEV;
   1025
   1026		err = id_in ? iort_nc_iommu_map_id(dev, node, id_in) :
   1027			      iort_nc_iommu_map(dev, node);
   1028
   1029		if (!err)
   1030			iort_named_component_init(dev, node);
   1031	}
   1032
   1033	return err;
   1034}
   1035
   1036#else
   1037int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
   1038{ return 0; }
   1039int iort_iommu_configure_id(struct device *dev, const u32 *input_id)
   1040{ return -ENODEV; }
   1041#endif
   1042
   1043static int nc_dma_get_range(struct device *dev, u64 *size)
   1044{
   1045	struct acpi_iort_node *node;
   1046	struct acpi_iort_named_component *ncomp;
   1047
   1048	node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
   1049			      iort_match_node_callback, dev);
   1050	if (!node)
   1051		return -ENODEV;
   1052
   1053	ncomp = (struct acpi_iort_named_component *)node->node_data;
   1054
   1055	if (!ncomp->memory_address_limit) {
   1056		pr_warn(FW_BUG "Named component missing memory address limit\n");
   1057		return -EINVAL;
   1058	}
   1059
   1060	*size = ncomp->memory_address_limit >= 64 ? U64_MAX :
   1061			1ULL<<ncomp->memory_address_limit;
   1062
   1063	return 0;
   1064}
   1065
   1066static int rc_dma_get_range(struct device *dev, u64 *size)
   1067{
   1068	struct acpi_iort_node *node;
   1069	struct acpi_iort_root_complex *rc;
   1070	struct pci_bus *pbus = to_pci_dev(dev)->bus;
   1071
   1072	node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
   1073			      iort_match_node_callback, &pbus->dev);
   1074	if (!node || node->revision < 1)
   1075		return -ENODEV;
   1076
   1077	rc = (struct acpi_iort_root_complex *)node->node_data;
   1078
   1079	if (!rc->memory_address_limit) {
   1080		pr_warn(FW_BUG "Root complex missing memory address limit\n");
   1081		return -EINVAL;
   1082	}
   1083
   1084	*size = rc->memory_address_limit >= 64 ? U64_MAX :
   1085			1ULL<<rc->memory_address_limit;
   1086
   1087	return 0;
   1088}
   1089
   1090/**
   1091 * iort_dma_get_ranges() - Look up DMA addressing limit for the device
   1092 * @dev: device to lookup
   1093 * @size: DMA range size result pointer
   1094 *
   1095 * Return: 0 on success, an error otherwise.
   1096 */
   1097int iort_dma_get_ranges(struct device *dev, u64 *size)
   1098{
   1099	if (dev_is_pci(dev))
   1100		return rc_dma_get_range(dev, size);
   1101	else
   1102		return nc_dma_get_range(dev, size);
   1103}
   1104
   1105static void __init acpi_iort_register_irq(int hwirq, const char *name,
   1106					  int trigger,
   1107					  struct resource *res)
   1108{
   1109	int irq = acpi_register_gsi(NULL, hwirq, trigger,
   1110				    ACPI_ACTIVE_HIGH);
   1111
   1112	if (irq <= 0) {
   1113		pr_err("could not register gsi hwirq %d name [%s]\n", hwirq,
   1114								      name);
   1115		return;
   1116	}
   1117
   1118	res->start = irq;
   1119	res->end = irq;
   1120	res->flags = IORESOURCE_IRQ;
   1121	res->name = name;
   1122}
   1123
   1124static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node)
   1125{
   1126	struct acpi_iort_smmu_v3 *smmu;
   1127	/* Always present mem resource */
   1128	int num_res = 1;
   1129
   1130	/* Retrieve SMMUv3 specific data */
   1131	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
   1132
   1133	if (smmu->event_gsiv)
   1134		num_res++;
   1135
   1136	if (smmu->pri_gsiv)
   1137		num_res++;
   1138
   1139	if (smmu->gerr_gsiv)
   1140		num_res++;
   1141
   1142	if (smmu->sync_gsiv)
   1143		num_res++;
   1144
   1145	return num_res;
   1146}
   1147
   1148static bool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 *smmu)
   1149{
   1150	/*
   1151	 * Cavium ThunderX2 implementation doesn't not support unique
   1152	 * irq line. Use single irq line for all the SMMUv3 interrupts.
   1153	 */
   1154	if (smmu->model != ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
   1155		return false;
   1156
   1157	/*
   1158	 * ThunderX2 doesn't support MSIs from the SMMU, so we're checking
   1159	 * SPI numbers here.
   1160	 */
   1161	return smmu->event_gsiv == smmu->pri_gsiv &&
   1162	       smmu->event_gsiv == smmu->gerr_gsiv &&
   1163	       smmu->event_gsiv == smmu->sync_gsiv;
   1164}
   1165
   1166static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 *smmu)
   1167{
   1168	/*
   1169	 * Override the size, for Cavium ThunderX2 implementation
   1170	 * which doesn't support the page 1 SMMU register space.
   1171	 */
   1172	if (smmu->model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
   1173		return SZ_64K;
   1174
   1175	return SZ_128K;
   1176}
   1177
   1178static void __init arm_smmu_v3_init_resources(struct resource *res,
   1179					      struct acpi_iort_node *node)
   1180{
   1181	struct acpi_iort_smmu_v3 *smmu;
   1182	int num_res = 0;
   1183
   1184	/* Retrieve SMMUv3 specific data */
   1185	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
   1186
   1187	res[num_res].start = smmu->base_address;
   1188	res[num_res].end = smmu->base_address +
   1189				arm_smmu_v3_resource_size(smmu) - 1;
   1190	res[num_res].flags = IORESOURCE_MEM;
   1191
   1192	num_res++;
   1193	if (arm_smmu_v3_is_combined_irq(smmu)) {
   1194		if (smmu->event_gsiv)
   1195			acpi_iort_register_irq(smmu->event_gsiv, "combined",
   1196					       ACPI_EDGE_SENSITIVE,
   1197					       &res[num_res++]);
   1198	} else {
   1199
   1200		if (smmu->event_gsiv)
   1201			acpi_iort_register_irq(smmu->event_gsiv, "eventq",
   1202					       ACPI_EDGE_SENSITIVE,
   1203					       &res[num_res++]);
   1204
   1205		if (smmu->pri_gsiv)
   1206			acpi_iort_register_irq(smmu->pri_gsiv, "priq",
   1207					       ACPI_EDGE_SENSITIVE,
   1208					       &res[num_res++]);
   1209
   1210		if (smmu->gerr_gsiv)
   1211			acpi_iort_register_irq(smmu->gerr_gsiv, "gerror",
   1212					       ACPI_EDGE_SENSITIVE,
   1213					       &res[num_res++]);
   1214
   1215		if (smmu->sync_gsiv)
   1216			acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync",
   1217					       ACPI_EDGE_SENSITIVE,
   1218					       &res[num_res++]);
   1219	}
   1220}
   1221
   1222static void __init arm_smmu_v3_dma_configure(struct device *dev,
   1223					     struct acpi_iort_node *node)
   1224{
   1225	struct acpi_iort_smmu_v3 *smmu;
   1226	enum dev_dma_attr attr;
   1227
   1228	/* Retrieve SMMUv3 specific data */
   1229	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
   1230
   1231	attr = (smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) ?
   1232			DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
   1233
   1234	/* We expect the dma masks to be equivalent for all SMMUv3 set-ups */
   1235	dev->dma_mask = &dev->coherent_dma_mask;
   1236
   1237	/* Configure DMA for the page table walker */
   1238	acpi_dma_configure(dev, attr);
   1239}
   1240
   1241#if defined(CONFIG_ACPI_NUMA)
   1242/*
   1243 * set numa proximity domain for smmuv3 device
   1244 */
   1245static int  __init arm_smmu_v3_set_proximity(struct device *dev,
   1246					      struct acpi_iort_node *node)
   1247{
   1248	struct acpi_iort_smmu_v3 *smmu;
   1249
   1250	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
   1251	if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
   1252		int dev_node = pxm_to_node(smmu->pxm);
   1253
   1254		if (dev_node != NUMA_NO_NODE && !node_online(dev_node))
   1255			return -EINVAL;
   1256
   1257		set_dev_node(dev, dev_node);
   1258		pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
   1259			smmu->base_address,
   1260			smmu->pxm);
   1261	}
   1262	return 0;
   1263}
   1264#else
   1265#define arm_smmu_v3_set_proximity NULL
   1266#endif
   1267
   1268static int __init arm_smmu_count_resources(struct acpi_iort_node *node)
   1269{
   1270	struct acpi_iort_smmu *smmu;
   1271
   1272	/* Retrieve SMMU specific data */
   1273	smmu = (struct acpi_iort_smmu *)node->node_data;
   1274
   1275	/*
   1276	 * Only consider the global fault interrupt and ignore the
   1277	 * configuration access interrupt.
   1278	 *
   1279	 * MMIO address and global fault interrupt resources are always
   1280	 * present so add them to the context interrupt count as a static
   1281	 * value.
   1282	 */
   1283	return smmu->context_interrupt_count + 2;
   1284}
   1285
   1286static void __init arm_smmu_init_resources(struct resource *res,
   1287					   struct acpi_iort_node *node)
   1288{
   1289	struct acpi_iort_smmu *smmu;
   1290	int i, hw_irq, trigger, num_res = 0;
   1291	u64 *ctx_irq, *glb_irq;
   1292
   1293	/* Retrieve SMMU specific data */
   1294	smmu = (struct acpi_iort_smmu *)node->node_data;
   1295
   1296	res[num_res].start = smmu->base_address;
   1297	res[num_res].end = smmu->base_address + smmu->span - 1;
   1298	res[num_res].flags = IORESOURCE_MEM;
   1299	num_res++;
   1300
   1301	glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset);
   1302	/* Global IRQs */
   1303	hw_irq = IORT_IRQ_MASK(glb_irq[0]);
   1304	trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]);
   1305
   1306	acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger,
   1307				     &res[num_res++]);
   1308
   1309	/* Context IRQs */
   1310	ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset);
   1311	for (i = 0; i < smmu->context_interrupt_count; i++) {
   1312		hw_irq = IORT_IRQ_MASK(ctx_irq[i]);
   1313		trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]);
   1314
   1315		acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger,
   1316				       &res[num_res++]);
   1317	}
   1318}
   1319
   1320static void __init arm_smmu_dma_configure(struct device *dev,
   1321					  struct acpi_iort_node *node)
   1322{
   1323	struct acpi_iort_smmu *smmu;
   1324	enum dev_dma_attr attr;
   1325
   1326	/* Retrieve SMMU specific data */
   1327	smmu = (struct acpi_iort_smmu *)node->node_data;
   1328
   1329	attr = (smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) ?
   1330			DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
   1331
   1332	/* We expect the dma masks to be equivalent for SMMU set-ups */
   1333	dev->dma_mask = &dev->coherent_dma_mask;
   1334
   1335	/* Configure DMA for the page table walker */
   1336	acpi_dma_configure(dev, attr);
   1337}
   1338
   1339static int __init arm_smmu_v3_pmcg_count_resources(struct acpi_iort_node *node)
   1340{
   1341	struct acpi_iort_pmcg *pmcg;
   1342
   1343	/* Retrieve PMCG specific data */
   1344	pmcg = (struct acpi_iort_pmcg *)node->node_data;
   1345
   1346	/*
   1347	 * There are always 2 memory resources.
   1348	 * If the overflow_gsiv is present then add that for a total of 3.
   1349	 */
   1350	return pmcg->overflow_gsiv ? 3 : 2;
   1351}
   1352
   1353static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res,
   1354						   struct acpi_iort_node *node)
   1355{
   1356	struct acpi_iort_pmcg *pmcg;
   1357
   1358	/* Retrieve PMCG specific data */
   1359	pmcg = (struct acpi_iort_pmcg *)node->node_data;
   1360
   1361	res[0].start = pmcg->page0_base_address;
   1362	res[0].end = pmcg->page0_base_address + SZ_4K - 1;
   1363	res[0].flags = IORESOURCE_MEM;
   1364	/*
   1365	 * The initial version in DEN0049C lacked a way to describe register
   1366	 * page 1, which makes it broken for most PMCG implementations; in
   1367	 * that case, just let the driver fail gracefully if it expects to
   1368	 * find a second memory resource.
   1369	 */
   1370	if (node->revision > 0) {
   1371		res[1].start = pmcg->page1_base_address;
   1372		res[1].end = pmcg->page1_base_address + SZ_4K - 1;
   1373		res[1].flags = IORESOURCE_MEM;
   1374	}
   1375
   1376	if (pmcg->overflow_gsiv)
   1377		acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow",
   1378				       ACPI_EDGE_SENSITIVE, &res[2]);
   1379}
   1380
   1381static struct acpi_platform_list pmcg_plat_info[] __initdata = {
   1382	/* HiSilicon Hip08 Platform */
   1383	{"HISI  ", "HIP08   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
   1384	 "Erratum #162001800", IORT_SMMU_V3_PMCG_HISI_HIP08},
   1385	{ }
   1386};
   1387
   1388static int __init arm_smmu_v3_pmcg_add_platdata(struct platform_device *pdev)
   1389{
   1390	u32 model;
   1391	int idx;
   1392
   1393	idx = acpi_match_platform_list(pmcg_plat_info);
   1394	if (idx >= 0)
   1395		model = pmcg_plat_info[idx].data;
   1396	else
   1397		model = IORT_SMMU_V3_PMCG_GENERIC;
   1398
   1399	return platform_device_add_data(pdev, &model, sizeof(model));
   1400}
   1401
   1402struct iort_dev_config {
   1403	const char *name;
   1404	int (*dev_init)(struct acpi_iort_node *node);
   1405	void (*dev_dma_configure)(struct device *dev,
   1406				  struct acpi_iort_node *node);
   1407	int (*dev_count_resources)(struct acpi_iort_node *node);
   1408	void (*dev_init_resources)(struct resource *res,
   1409				     struct acpi_iort_node *node);
   1410	int (*dev_set_proximity)(struct device *dev,
   1411				    struct acpi_iort_node *node);
   1412	int (*dev_add_platdata)(struct platform_device *pdev);
   1413};
   1414
   1415static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = {
   1416	.name = "arm-smmu-v3",
   1417	.dev_dma_configure = arm_smmu_v3_dma_configure,
   1418	.dev_count_resources = arm_smmu_v3_count_resources,
   1419	.dev_init_resources = arm_smmu_v3_init_resources,
   1420	.dev_set_proximity = arm_smmu_v3_set_proximity,
   1421};
   1422
   1423static const struct iort_dev_config iort_arm_smmu_cfg __initconst = {
   1424	.name = "arm-smmu",
   1425	.dev_dma_configure = arm_smmu_dma_configure,
   1426	.dev_count_resources = arm_smmu_count_resources,
   1427	.dev_init_resources = arm_smmu_init_resources,
   1428};
   1429
   1430static const struct iort_dev_config iort_arm_smmu_v3_pmcg_cfg __initconst = {
   1431	.name = "arm-smmu-v3-pmcg",
   1432	.dev_count_resources = arm_smmu_v3_pmcg_count_resources,
   1433	.dev_init_resources = arm_smmu_v3_pmcg_init_resources,
   1434	.dev_add_platdata = arm_smmu_v3_pmcg_add_platdata,
   1435};
   1436
   1437static __init const struct iort_dev_config *iort_get_dev_cfg(
   1438			struct acpi_iort_node *node)
   1439{
   1440	switch (node->type) {
   1441	case ACPI_IORT_NODE_SMMU_V3:
   1442		return &iort_arm_smmu_v3_cfg;
   1443	case ACPI_IORT_NODE_SMMU:
   1444		return &iort_arm_smmu_cfg;
   1445	case ACPI_IORT_NODE_PMCG:
   1446		return &iort_arm_smmu_v3_pmcg_cfg;
   1447	default:
   1448		return NULL;
   1449	}
   1450}
   1451
   1452/**
   1453 * iort_add_platform_device() - Allocate a platform device for IORT node
   1454 * @node: Pointer to device ACPI IORT node
   1455 * @ops: Pointer to IORT device config struct
   1456 *
   1457 * Returns: 0 on success, <0 failure
   1458 */
   1459static int __init iort_add_platform_device(struct acpi_iort_node *node,
   1460					   const struct iort_dev_config *ops)
   1461{
   1462	struct fwnode_handle *fwnode;
   1463	struct platform_device *pdev;
   1464	struct resource *r;
   1465	int ret, count;
   1466
   1467	pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO);
   1468	if (!pdev)
   1469		return -ENOMEM;
   1470
   1471	if (ops->dev_set_proximity) {
   1472		ret = ops->dev_set_proximity(&pdev->dev, node);
   1473		if (ret)
   1474			goto dev_put;
   1475	}
   1476
   1477	count = ops->dev_count_resources(node);
   1478
   1479	r = kcalloc(count, sizeof(*r), GFP_KERNEL);
   1480	if (!r) {
   1481		ret = -ENOMEM;
   1482		goto dev_put;
   1483	}
   1484
   1485	ops->dev_init_resources(r, node);
   1486
   1487	ret = platform_device_add_resources(pdev, r, count);
   1488	/*
   1489	 * Resources are duplicated in platform_device_add_resources,
   1490	 * free their allocated memory
   1491	 */
   1492	kfree(r);
   1493
   1494	if (ret)
   1495		goto dev_put;
   1496
   1497	/*
   1498	 * Platform devices based on PMCG nodes uses platform_data to
   1499	 * pass the hardware model info to the driver. For others, add
   1500	 * a copy of IORT node pointer to platform_data to be used to
   1501	 * retrieve IORT data information.
   1502	 */
   1503	if (ops->dev_add_platdata)
   1504		ret = ops->dev_add_platdata(pdev);
   1505	else
   1506		ret = platform_device_add_data(pdev, &node, sizeof(node));
   1507
   1508	if (ret)
   1509		goto dev_put;
   1510
   1511	fwnode = iort_get_fwnode(node);
   1512
   1513	if (!fwnode) {
   1514		ret = -ENODEV;
   1515		goto dev_put;
   1516	}
   1517
   1518	pdev->dev.fwnode = fwnode;
   1519
   1520	if (ops->dev_dma_configure)
   1521		ops->dev_dma_configure(&pdev->dev, node);
   1522
   1523	iort_set_device_domain(&pdev->dev, node);
   1524
   1525	ret = platform_device_add(pdev);
   1526	if (ret)
   1527		goto dma_deconfigure;
   1528
   1529	return 0;
   1530
   1531dma_deconfigure:
   1532	arch_teardown_dma_ops(&pdev->dev);
   1533dev_put:
   1534	platform_device_put(pdev);
   1535
   1536	return ret;
   1537}
   1538
   1539#ifdef CONFIG_PCI
   1540static void __init iort_enable_acs(struct acpi_iort_node *iort_node)
   1541{
   1542	static bool acs_enabled __initdata;
   1543
   1544	if (acs_enabled)
   1545		return;
   1546
   1547	if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
   1548		struct acpi_iort_node *parent;
   1549		struct acpi_iort_id_mapping *map;
   1550		int i;
   1551
   1552		map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node,
   1553				   iort_node->mapping_offset);
   1554
   1555		for (i = 0; i < iort_node->mapping_count; i++, map++) {
   1556			if (!map->output_reference)
   1557				continue;
   1558
   1559			parent = ACPI_ADD_PTR(struct acpi_iort_node,
   1560					iort_table,  map->output_reference);
   1561			/*
   1562			 * If we detect a RC->SMMU mapping, make sure
   1563			 * we enable ACS on the system.
   1564			 */
   1565			if ((parent->type == ACPI_IORT_NODE_SMMU) ||
   1566				(parent->type == ACPI_IORT_NODE_SMMU_V3)) {
   1567				pci_request_acs();
   1568				acs_enabled = true;
   1569				return;
   1570			}
   1571		}
   1572	}
   1573}
   1574#else
   1575static inline void iort_enable_acs(struct acpi_iort_node *iort_node) { }
   1576#endif
   1577
   1578static void __init iort_init_platform_devices(void)
   1579{
   1580	struct acpi_iort_node *iort_node, *iort_end;
   1581	struct acpi_table_iort *iort;
   1582	struct fwnode_handle *fwnode;
   1583	int i, ret;
   1584	const struct iort_dev_config *ops;
   1585
   1586	/*
   1587	 * iort_table and iort both point to the start of IORT table, but
   1588	 * have different struct types
   1589	 */
   1590	iort = (struct acpi_table_iort *)iort_table;
   1591
   1592	/* Get the first IORT node */
   1593	iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
   1594				 iort->node_offset);
   1595	iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
   1596				iort_table->length);
   1597
   1598	for (i = 0; i < iort->node_count; i++) {
   1599		if (iort_node >= iort_end) {
   1600			pr_err("iort node pointer overflows, bad table\n");
   1601			return;
   1602		}
   1603
   1604		iort_enable_acs(iort_node);
   1605
   1606		ops = iort_get_dev_cfg(iort_node);
   1607		if (ops) {
   1608			fwnode = acpi_alloc_fwnode_static();
   1609			if (!fwnode)
   1610				return;
   1611
   1612			iort_set_fwnode(iort_node, fwnode);
   1613
   1614			ret = iort_add_platform_device(iort_node, ops);
   1615			if (ret) {
   1616				iort_delete_fwnode(iort_node);
   1617				acpi_free_fwnode_static(fwnode);
   1618				return;
   1619			}
   1620		}
   1621
   1622		iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
   1623					 iort_node->length);
   1624	}
   1625}
   1626
   1627void __init acpi_iort_init(void)
   1628{
   1629	acpi_status status;
   1630
   1631	/* iort_table will be used at runtime after the iort init,
   1632	 * so we don't need to call acpi_put_table() to release
   1633	 * the IORT table mapping.
   1634	 */
   1635	status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table);
   1636	if (ACPI_FAILURE(status)) {
   1637		if (status != AE_NOT_FOUND) {
   1638			const char *msg = acpi_format_exception(status);
   1639
   1640			pr_err("Failed to get table, %s\n", msg);
   1641		}
   1642
   1643		return;
   1644	}
   1645
   1646	iort_init_platform_devices();
   1647}
   1648
   1649#ifdef CONFIG_ZONE_DMA
   1650/*
   1651 * Extract the highest CPU physical address accessible to all DMA masters in
   1652 * the system. PHYS_ADDR_MAX is returned when no constrained device is found.
   1653 */
   1654phys_addr_t __init acpi_iort_dma_get_max_cpu_address(void)
   1655{
   1656	phys_addr_t limit = PHYS_ADDR_MAX;
   1657	struct acpi_iort_node *node, *end;
   1658	struct acpi_table_iort *iort;
   1659	acpi_status status;
   1660	int i;
   1661
   1662	if (acpi_disabled)
   1663		return limit;
   1664
   1665	status = acpi_get_table(ACPI_SIG_IORT, 0,
   1666				(struct acpi_table_header **)&iort);
   1667	if (ACPI_FAILURE(status))
   1668		return limit;
   1669
   1670	node = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->node_offset);
   1671	end = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->header.length);
   1672
   1673	for (i = 0; i < iort->node_count; i++) {
   1674		if (node >= end)
   1675			break;
   1676
   1677		switch (node->type) {
   1678			struct acpi_iort_named_component *ncomp;
   1679			struct acpi_iort_root_complex *rc;
   1680			phys_addr_t local_limit;
   1681
   1682		case ACPI_IORT_NODE_NAMED_COMPONENT:
   1683			ncomp = (struct acpi_iort_named_component *)node->node_data;
   1684			local_limit = DMA_BIT_MASK(ncomp->memory_address_limit);
   1685			limit = min_not_zero(limit, local_limit);
   1686			break;
   1687
   1688		case ACPI_IORT_NODE_PCI_ROOT_COMPLEX:
   1689			if (node->revision < 1)
   1690				break;
   1691
   1692			rc = (struct acpi_iort_root_complex *)node->node_data;
   1693			local_limit = DMA_BIT_MASK(rc->memory_address_limit);
   1694			limit = min_not_zero(limit, local_limit);
   1695			break;
   1696		}
   1697		node = ACPI_ADD_PTR(struct acpi_iort_node, node, node->length);
   1698	}
   1699	acpi_put_table(&iort->header);
   1700	return limit;
   1701}
   1702#endif