cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

pcie-iproc-msi.c (17801B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (C) 2015 Broadcom Corporation
      4 */
      5
      6#include <linux/interrupt.h>
      7#include <linux/irqchip/chained_irq.h>
      8#include <linux/irqdomain.h>
      9#include <linux/msi.h>
     10#include <linux/of_irq.h>
     11#include <linux/of_pci.h>
     12#include <linux/pci.h>
     13
     14#include "pcie-iproc.h"
     15
     16#define IPROC_MSI_INTR_EN_SHIFT        11
     17#define IPROC_MSI_INTR_EN              BIT(IPROC_MSI_INTR_EN_SHIFT)
     18#define IPROC_MSI_INT_N_EVENT_SHIFT    1
     19#define IPROC_MSI_INT_N_EVENT          BIT(IPROC_MSI_INT_N_EVENT_SHIFT)
     20#define IPROC_MSI_EQ_EN_SHIFT          0
     21#define IPROC_MSI_EQ_EN                BIT(IPROC_MSI_EQ_EN_SHIFT)
     22
     23#define IPROC_MSI_EQ_MASK              0x3f
     24
     25/* Max number of GIC interrupts */
     26#define NR_HW_IRQS                     6
     27
     28/* Number of entries in each event queue */
     29#define EQ_LEN                         64
     30
     31/* Size of each event queue memory region */
     32#define EQ_MEM_REGION_SIZE             SZ_4K
     33
     34/* Size of each MSI address region */
     35#define MSI_MEM_REGION_SIZE            SZ_4K
     36
     37enum iproc_msi_reg {
     38	IPROC_MSI_EQ_PAGE = 0,
     39	IPROC_MSI_EQ_PAGE_UPPER,
     40	IPROC_MSI_PAGE,
     41	IPROC_MSI_PAGE_UPPER,
     42	IPROC_MSI_CTRL,
     43	IPROC_MSI_EQ_HEAD,
     44	IPROC_MSI_EQ_TAIL,
     45	IPROC_MSI_INTS_EN,
     46	IPROC_MSI_REG_SIZE,
     47};
     48
     49struct iproc_msi;
     50
     51/**
     52 * struct iproc_msi_grp - iProc MSI group
     53 *
     54 * One MSI group is allocated per GIC interrupt, serviced by one iProc MSI
     55 * event queue.
     56 *
     57 * @msi: pointer to iProc MSI data
     58 * @gic_irq: GIC interrupt
     59 * @eq: Event queue number
     60 */
     61struct iproc_msi_grp {
     62	struct iproc_msi *msi;
     63	int gic_irq;
     64	unsigned int eq;
     65};
     66
     67/**
     68 * struct iproc_msi - iProc event queue based MSI
     69 *
     70 * Only meant to be used on platforms without MSI support integrated into the
     71 * GIC.
     72 *
     73 * @pcie: pointer to iProc PCIe data
     74 * @reg_offsets: MSI register offsets
     75 * @grps: MSI groups
     76 * @nr_irqs: number of total interrupts connected to GIC
     77 * @nr_cpus: number of toal CPUs
     78 * @has_inten_reg: indicates the MSI interrupt enable register needs to be
     79 * set explicitly (required for some legacy platforms)
     80 * @bitmap: MSI vector bitmap
     81 * @bitmap_lock: lock to protect access to the MSI bitmap
     82 * @nr_msi_vecs: total number of MSI vectors
     83 * @inner_domain: inner IRQ domain
     84 * @msi_domain: MSI IRQ domain
     85 * @nr_eq_region: required number of 4K aligned memory region for MSI event
     86 * queues
     87 * @nr_msi_region: required number of 4K aligned address region for MSI posted
     88 * writes
     89 * @eq_cpu: pointer to allocated memory region for MSI event queues
     90 * @eq_dma: DMA address of MSI event queues
     91 * @msi_addr: MSI address
     92 */
     93struct iproc_msi {
     94	struct iproc_pcie *pcie;
     95	const u16 (*reg_offsets)[IPROC_MSI_REG_SIZE];
     96	struct iproc_msi_grp *grps;
     97	int nr_irqs;
     98	int nr_cpus;
     99	bool has_inten_reg;
    100	unsigned long *bitmap;
    101	struct mutex bitmap_lock;
    102	unsigned int nr_msi_vecs;
    103	struct irq_domain *inner_domain;
    104	struct irq_domain *msi_domain;
    105	unsigned int nr_eq_region;
    106	unsigned int nr_msi_region;
    107	void *eq_cpu;
    108	dma_addr_t eq_dma;
    109	phys_addr_t msi_addr;
    110};
    111
    112static const u16 iproc_msi_reg_paxb[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = {
    113	{ 0x200, 0x2c0, 0x204, 0x2c4, 0x210, 0x250, 0x254, 0x208 },
    114	{ 0x200, 0x2c0, 0x204, 0x2c4, 0x214, 0x258, 0x25c, 0x208 },
    115	{ 0x200, 0x2c0, 0x204, 0x2c4, 0x218, 0x260, 0x264, 0x208 },
    116	{ 0x200, 0x2c0, 0x204, 0x2c4, 0x21c, 0x268, 0x26c, 0x208 },
    117	{ 0x200, 0x2c0, 0x204, 0x2c4, 0x220, 0x270, 0x274, 0x208 },
    118	{ 0x200, 0x2c0, 0x204, 0x2c4, 0x224, 0x278, 0x27c, 0x208 },
    119};
    120
    121static const u16 iproc_msi_reg_paxc[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = {
    122	{ 0xc00, 0xc04, 0xc08, 0xc0c, 0xc40, 0xc50, 0xc60 },
    123	{ 0xc10, 0xc14, 0xc18, 0xc1c, 0xc44, 0xc54, 0xc64 },
    124	{ 0xc20, 0xc24, 0xc28, 0xc2c, 0xc48, 0xc58, 0xc68 },
    125	{ 0xc30, 0xc34, 0xc38, 0xc3c, 0xc4c, 0xc5c, 0xc6c },
    126};
    127
    128static inline u32 iproc_msi_read_reg(struct iproc_msi *msi,
    129				     enum iproc_msi_reg reg,
    130				     unsigned int eq)
    131{
    132	struct iproc_pcie *pcie = msi->pcie;
    133
    134	return readl_relaxed(pcie->base + msi->reg_offsets[eq][reg]);
    135}
    136
    137static inline void iproc_msi_write_reg(struct iproc_msi *msi,
    138				       enum iproc_msi_reg reg,
    139				       int eq, u32 val)
    140{
    141	struct iproc_pcie *pcie = msi->pcie;
    142
    143	writel_relaxed(val, pcie->base + msi->reg_offsets[eq][reg]);
    144}
    145
    146static inline u32 hwirq_to_group(struct iproc_msi *msi, unsigned long hwirq)
    147{
    148	return (hwirq % msi->nr_irqs);
    149}
    150
    151static inline unsigned int iproc_msi_addr_offset(struct iproc_msi *msi,
    152						 unsigned long hwirq)
    153{
    154	if (msi->nr_msi_region > 1)
    155		return hwirq_to_group(msi, hwirq) * MSI_MEM_REGION_SIZE;
    156	else
    157		return hwirq_to_group(msi, hwirq) * sizeof(u32);
    158}
    159
    160static inline unsigned int iproc_msi_eq_offset(struct iproc_msi *msi, u32 eq)
    161{
    162	if (msi->nr_eq_region > 1)
    163		return eq * EQ_MEM_REGION_SIZE;
    164	else
    165		return eq * EQ_LEN * sizeof(u32);
    166}
    167
    168static struct irq_chip iproc_msi_irq_chip = {
    169	.name = "iProc-MSI",
    170};
    171
    172static struct msi_domain_info iproc_msi_domain_info = {
    173	.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
    174		MSI_FLAG_PCI_MSIX,
    175	.chip = &iproc_msi_irq_chip,
    176};
    177
    178/*
    179 * In iProc PCIe core, each MSI group is serviced by a GIC interrupt and a
    180 * dedicated event queue.  Each MSI group can support up to 64 MSI vectors.
    181 *
    182 * The number of MSI groups varies between different iProc SoCs.  The total
    183 * number of CPU cores also varies.  To support MSI IRQ affinity, we
    184 * distribute GIC interrupts across all available CPUs.  MSI vector is moved
    185 * from one GIC interrupt to another to steer to the target CPU.
    186 *
    187 * Assuming:
    188 * - the number of MSI groups is M
    189 * - the number of CPU cores is N
    190 * - M is always a multiple of N
    191 *
    192 * Total number of raw MSI vectors = M * 64
    193 * Total number of supported MSI vectors = (M * 64) / N
    194 */
    195static inline int hwirq_to_cpu(struct iproc_msi *msi, unsigned long hwirq)
    196{
    197	return (hwirq % msi->nr_cpus);
    198}
    199
    200static inline unsigned long hwirq_to_canonical_hwirq(struct iproc_msi *msi,
    201						     unsigned long hwirq)
    202{
    203	return (hwirq - hwirq_to_cpu(msi, hwirq));
    204}
    205
    206static int iproc_msi_irq_set_affinity(struct irq_data *data,
    207				      const struct cpumask *mask, bool force)
    208{
    209	struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
    210	int target_cpu = cpumask_first(mask);
    211	int curr_cpu;
    212	int ret;
    213
    214	curr_cpu = hwirq_to_cpu(msi, data->hwirq);
    215	if (curr_cpu == target_cpu)
    216		ret = IRQ_SET_MASK_OK_DONE;
    217	else {
    218		/* steer MSI to the target CPU */
    219		data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu;
    220		ret = IRQ_SET_MASK_OK;
    221	}
    222
    223	irq_data_update_effective_affinity(data, cpumask_of(target_cpu));
    224
    225	return ret;
    226}
    227
    228static void iproc_msi_irq_compose_msi_msg(struct irq_data *data,
    229					  struct msi_msg *msg)
    230{
    231	struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
    232	dma_addr_t addr;
    233
    234	addr = msi->msi_addr + iproc_msi_addr_offset(msi, data->hwirq);
    235	msg->address_lo = lower_32_bits(addr);
    236	msg->address_hi = upper_32_bits(addr);
    237	msg->data = data->hwirq << 5;
    238}
    239
    240static struct irq_chip iproc_msi_bottom_irq_chip = {
    241	.name = "MSI",
    242	.irq_set_affinity = iproc_msi_irq_set_affinity,
    243	.irq_compose_msi_msg = iproc_msi_irq_compose_msi_msg,
    244};
    245
    246static int iproc_msi_irq_domain_alloc(struct irq_domain *domain,
    247				      unsigned int virq, unsigned int nr_irqs,
    248				      void *args)
    249{
    250	struct iproc_msi *msi = domain->host_data;
    251	int hwirq, i;
    252
    253	if (msi->nr_cpus > 1 && nr_irqs > 1)
    254		return -EINVAL;
    255
    256	mutex_lock(&msi->bitmap_lock);
    257
    258	/*
    259	 * Allocate 'nr_irqs' multiplied by 'nr_cpus' number of MSI vectors
    260	 * each time
    261	 */
    262	hwirq = bitmap_find_free_region(msi->bitmap, msi->nr_msi_vecs,
    263					order_base_2(msi->nr_cpus * nr_irqs));
    264
    265	mutex_unlock(&msi->bitmap_lock);
    266
    267	if (hwirq < 0)
    268		return -ENOSPC;
    269
    270	for (i = 0; i < nr_irqs; i++) {
    271		irq_domain_set_info(domain, virq + i, hwirq + i,
    272				    &iproc_msi_bottom_irq_chip,
    273				    domain->host_data, handle_simple_irq,
    274				    NULL, NULL);
    275	}
    276
    277	return 0;
    278}
    279
    280static void iproc_msi_irq_domain_free(struct irq_domain *domain,
    281				      unsigned int virq, unsigned int nr_irqs)
    282{
    283	struct irq_data *data = irq_domain_get_irq_data(domain, virq);
    284	struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
    285	unsigned int hwirq;
    286
    287	mutex_lock(&msi->bitmap_lock);
    288
    289	hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq);
    290	bitmap_release_region(msi->bitmap, hwirq,
    291			      order_base_2(msi->nr_cpus * nr_irqs));
    292
    293	mutex_unlock(&msi->bitmap_lock);
    294
    295	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
    296}
    297
    298static const struct irq_domain_ops msi_domain_ops = {
    299	.alloc = iproc_msi_irq_domain_alloc,
    300	.free = iproc_msi_irq_domain_free,
    301};
    302
    303static inline u32 decode_msi_hwirq(struct iproc_msi *msi, u32 eq, u32 head)
    304{
    305	u32 __iomem *msg;
    306	u32 hwirq;
    307	unsigned int offs;
    308
    309	offs = iproc_msi_eq_offset(msi, eq) + head * sizeof(u32);
    310	msg = (u32 __iomem *)(msi->eq_cpu + offs);
    311	hwirq = readl(msg);
    312	hwirq = (hwirq >> 5) + (hwirq & 0x1f);
    313
    314	/*
    315	 * Since we have multiple hwirq mapped to a single MSI vector,
    316	 * now we need to derive the hwirq at CPU0.  It can then be used to
    317	 * mapped back to virq.
    318	 */
    319	return hwirq_to_canonical_hwirq(msi, hwirq);
    320}
    321
    322static void iproc_msi_handler(struct irq_desc *desc)
    323{
    324	struct irq_chip *chip = irq_desc_get_chip(desc);
    325	struct iproc_msi_grp *grp;
    326	struct iproc_msi *msi;
    327	u32 eq, head, tail, nr_events;
    328	unsigned long hwirq;
    329
    330	chained_irq_enter(chip, desc);
    331
    332	grp = irq_desc_get_handler_data(desc);
    333	msi = grp->msi;
    334	eq = grp->eq;
    335
    336	/*
    337	 * iProc MSI event queue is tracked by head and tail pointers.  Head
    338	 * pointer indicates the next entry (MSI data) to be consumed by SW in
    339	 * the queue and needs to be updated by SW.  iProc MSI core uses the
    340	 * tail pointer as the next data insertion point.
    341	 *
    342	 * Entries between head and tail pointers contain valid MSI data.  MSI
    343	 * data is guaranteed to be in the event queue memory before the tail
    344	 * pointer is updated by the iProc MSI core.
    345	 */
    346	head = iproc_msi_read_reg(msi, IPROC_MSI_EQ_HEAD,
    347				  eq) & IPROC_MSI_EQ_MASK;
    348	do {
    349		tail = iproc_msi_read_reg(msi, IPROC_MSI_EQ_TAIL,
    350					  eq) & IPROC_MSI_EQ_MASK;
    351
    352		/*
    353		 * Figure out total number of events (MSI data) to be
    354		 * processed.
    355		 */
    356		nr_events = (tail < head) ?
    357			(EQ_LEN - (head - tail)) : (tail - head);
    358		if (!nr_events)
    359			break;
    360
    361		/* process all outstanding events */
    362		while (nr_events--) {
    363			hwirq = decode_msi_hwirq(msi, eq, head);
    364			generic_handle_domain_irq(msi->inner_domain, hwirq);
    365
    366			head++;
    367			head %= EQ_LEN;
    368		}
    369
    370		/*
    371		 * Now all outstanding events have been processed.  Update the
    372		 * head pointer.
    373		 */
    374		iproc_msi_write_reg(msi, IPROC_MSI_EQ_HEAD, eq, head);
    375
    376		/*
    377		 * Now go read the tail pointer again to see if there are new
    378		 * outstanding events that came in during the above window.
    379		 */
    380	} while (true);
    381
    382	chained_irq_exit(chip, desc);
    383}
    384
    385static void iproc_msi_enable(struct iproc_msi *msi)
    386{
    387	int i, eq;
    388	u32 val;
    389
    390	/* Program memory region for each event queue */
    391	for (i = 0; i < msi->nr_eq_region; i++) {
    392		dma_addr_t addr = msi->eq_dma + (i * EQ_MEM_REGION_SIZE);
    393
    394		iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE, i,
    395				    lower_32_bits(addr));
    396		iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE_UPPER, i,
    397				    upper_32_bits(addr));
    398	}
    399
    400	/* Program address region for MSI posted writes */
    401	for (i = 0; i < msi->nr_msi_region; i++) {
    402		phys_addr_t addr = msi->msi_addr + (i * MSI_MEM_REGION_SIZE);
    403
    404		iproc_msi_write_reg(msi, IPROC_MSI_PAGE, i,
    405				    lower_32_bits(addr));
    406		iproc_msi_write_reg(msi, IPROC_MSI_PAGE_UPPER, i,
    407				    upper_32_bits(addr));
    408	}
    409
    410	for (eq = 0; eq < msi->nr_irqs; eq++) {
    411		/* Enable MSI event queue */
    412		val = IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT |
    413			IPROC_MSI_EQ_EN;
    414		iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val);
    415
    416		/*
    417		 * Some legacy platforms require the MSI interrupt enable
    418		 * register to be set explicitly.
    419		 */
    420		if (msi->has_inten_reg) {
    421			val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq);
    422			val |= BIT(eq);
    423			iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val);
    424		}
    425	}
    426}
    427
    428static void iproc_msi_disable(struct iproc_msi *msi)
    429{
    430	u32 eq, val;
    431
    432	for (eq = 0; eq < msi->nr_irqs; eq++) {
    433		if (msi->has_inten_reg) {
    434			val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq);
    435			val &= ~BIT(eq);
    436			iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val);
    437		}
    438
    439		val = iproc_msi_read_reg(msi, IPROC_MSI_CTRL, eq);
    440		val &= ~(IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT |
    441			 IPROC_MSI_EQ_EN);
    442		iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val);
    443	}
    444}
    445
    446static int iproc_msi_alloc_domains(struct device_node *node,
    447				   struct iproc_msi *msi)
    448{
    449	msi->inner_domain = irq_domain_add_linear(NULL, msi->nr_msi_vecs,
    450						  &msi_domain_ops, msi);
    451	if (!msi->inner_domain)
    452		return -ENOMEM;
    453
    454	msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node),
    455						    &iproc_msi_domain_info,
    456						    msi->inner_domain);
    457	if (!msi->msi_domain) {
    458		irq_domain_remove(msi->inner_domain);
    459		return -ENOMEM;
    460	}
    461
    462	return 0;
    463}
    464
    465static void iproc_msi_free_domains(struct iproc_msi *msi)
    466{
    467	if (msi->msi_domain)
    468		irq_domain_remove(msi->msi_domain);
    469
    470	if (msi->inner_domain)
    471		irq_domain_remove(msi->inner_domain);
    472}
    473
    474static void iproc_msi_irq_free(struct iproc_msi *msi, unsigned int cpu)
    475{
    476	int i;
    477
    478	for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) {
    479		irq_set_chained_handler_and_data(msi->grps[i].gic_irq,
    480						 NULL, NULL);
    481	}
    482}
    483
    484static int iproc_msi_irq_setup(struct iproc_msi *msi, unsigned int cpu)
    485{
    486	int i, ret;
    487	cpumask_var_t mask;
    488	struct iproc_pcie *pcie = msi->pcie;
    489
    490	for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) {
    491		irq_set_chained_handler_and_data(msi->grps[i].gic_irq,
    492						 iproc_msi_handler,
    493						 &msi->grps[i]);
    494		/* Dedicate GIC interrupt to each CPU core */
    495		if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
    496			cpumask_clear(mask);
    497			cpumask_set_cpu(cpu, mask);
    498			ret = irq_set_affinity(msi->grps[i].gic_irq, mask);
    499			if (ret)
    500				dev_err(pcie->dev,
    501					"failed to set affinity for IRQ%d\n",
    502					msi->grps[i].gic_irq);
    503			free_cpumask_var(mask);
    504		} else {
    505			dev_err(pcie->dev, "failed to alloc CPU mask\n");
    506			ret = -EINVAL;
    507		}
    508
    509		if (ret) {
    510			/* Free all configured/unconfigured IRQs */
    511			iproc_msi_irq_free(msi, cpu);
    512			return ret;
    513		}
    514	}
    515
    516	return 0;
    517}
    518
    519int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
    520{
    521	struct iproc_msi *msi;
    522	int i, ret;
    523	unsigned int cpu;
    524
    525	if (!of_device_is_compatible(node, "brcm,iproc-msi"))
    526		return -ENODEV;
    527
    528	if (!of_find_property(node, "msi-controller", NULL))
    529		return -ENODEV;
    530
    531	if (pcie->msi)
    532		return -EBUSY;
    533
    534	msi = devm_kzalloc(pcie->dev, sizeof(*msi), GFP_KERNEL);
    535	if (!msi)
    536		return -ENOMEM;
    537
    538	msi->pcie = pcie;
    539	pcie->msi = msi;
    540	msi->msi_addr = pcie->base_addr;
    541	mutex_init(&msi->bitmap_lock);
    542	msi->nr_cpus = num_possible_cpus();
    543
    544	if (msi->nr_cpus == 1)
    545		iproc_msi_domain_info.flags |=  MSI_FLAG_MULTI_PCI_MSI;
    546
    547	msi->nr_irqs = of_irq_count(node);
    548	if (!msi->nr_irqs) {
    549		dev_err(pcie->dev, "found no MSI GIC interrupt\n");
    550		return -ENODEV;
    551	}
    552
    553	if (msi->nr_irqs > NR_HW_IRQS) {
    554		dev_warn(pcie->dev, "too many MSI GIC interrupts defined %d\n",
    555			 msi->nr_irqs);
    556		msi->nr_irqs = NR_HW_IRQS;
    557	}
    558
    559	if (msi->nr_irqs < msi->nr_cpus) {
    560		dev_err(pcie->dev,
    561			"not enough GIC interrupts for MSI affinity\n");
    562		return -EINVAL;
    563	}
    564
    565	if (msi->nr_irqs % msi->nr_cpus != 0) {
    566		msi->nr_irqs -= msi->nr_irqs % msi->nr_cpus;
    567		dev_warn(pcie->dev, "Reducing number of interrupts to %d\n",
    568			 msi->nr_irqs);
    569	}
    570
    571	switch (pcie->type) {
    572	case IPROC_PCIE_PAXB_BCMA:
    573	case IPROC_PCIE_PAXB:
    574		msi->reg_offsets = iproc_msi_reg_paxb;
    575		msi->nr_eq_region = 1;
    576		msi->nr_msi_region = 1;
    577		break;
    578	case IPROC_PCIE_PAXC:
    579		msi->reg_offsets = iproc_msi_reg_paxc;
    580		msi->nr_eq_region = msi->nr_irqs;
    581		msi->nr_msi_region = msi->nr_irqs;
    582		break;
    583	default:
    584		dev_err(pcie->dev, "incompatible iProc PCIe interface\n");
    585		return -EINVAL;
    586	}
    587
    588	if (of_find_property(node, "brcm,pcie-msi-inten", NULL))
    589		msi->has_inten_reg = true;
    590
    591	msi->nr_msi_vecs = msi->nr_irqs * EQ_LEN;
    592	msi->bitmap = devm_kcalloc(pcie->dev, BITS_TO_LONGS(msi->nr_msi_vecs),
    593				   sizeof(*msi->bitmap), GFP_KERNEL);
    594	if (!msi->bitmap)
    595		return -ENOMEM;
    596
    597	msi->grps = devm_kcalloc(pcie->dev, msi->nr_irqs, sizeof(*msi->grps),
    598				 GFP_KERNEL);
    599	if (!msi->grps)
    600		return -ENOMEM;
    601
    602	for (i = 0; i < msi->nr_irqs; i++) {
    603		unsigned int irq = irq_of_parse_and_map(node, i);
    604
    605		if (!irq) {
    606			dev_err(pcie->dev, "unable to parse/map interrupt\n");
    607			ret = -ENODEV;
    608			goto free_irqs;
    609		}
    610		msi->grps[i].gic_irq = irq;
    611		msi->grps[i].msi = msi;
    612		msi->grps[i].eq = i;
    613	}
    614
    615	/* Reserve memory for event queue and make sure memories are zeroed */
    616	msi->eq_cpu = dma_alloc_coherent(pcie->dev,
    617					 msi->nr_eq_region * EQ_MEM_REGION_SIZE,
    618					 &msi->eq_dma, GFP_KERNEL);
    619	if (!msi->eq_cpu) {
    620		ret = -ENOMEM;
    621		goto free_irqs;
    622	}
    623
    624	ret = iproc_msi_alloc_domains(node, msi);
    625	if (ret) {
    626		dev_err(pcie->dev, "failed to create MSI domains\n");
    627		goto free_eq_dma;
    628	}
    629
    630	for_each_online_cpu(cpu) {
    631		ret = iproc_msi_irq_setup(msi, cpu);
    632		if (ret)
    633			goto free_msi_irq;
    634	}
    635
    636	iproc_msi_enable(msi);
    637
    638	return 0;
    639
    640free_msi_irq:
    641	for_each_online_cpu(cpu)
    642		iproc_msi_irq_free(msi, cpu);
    643	iproc_msi_free_domains(msi);
    644
    645free_eq_dma:
    646	dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE,
    647			  msi->eq_cpu, msi->eq_dma);
    648
    649free_irqs:
    650	for (i = 0; i < msi->nr_irqs; i++) {
    651		if (msi->grps[i].gic_irq)
    652			irq_dispose_mapping(msi->grps[i].gic_irq);
    653	}
    654	pcie->msi = NULL;
    655	return ret;
    656}
    657EXPORT_SYMBOL(iproc_msi_init);
    658
    659void iproc_msi_exit(struct iproc_pcie *pcie)
    660{
    661	struct iproc_msi *msi = pcie->msi;
    662	unsigned int i, cpu;
    663
    664	if (!msi)
    665		return;
    666
    667	iproc_msi_disable(msi);
    668
    669	for_each_online_cpu(cpu)
    670		iproc_msi_irq_free(msi, cpu);
    671
    672	iproc_msi_free_domains(msi);
    673
    674	dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE,
    675			  msi->eq_cpu, msi->eq_dma);
    676
    677	for (i = 0; i < msi->nr_irqs; i++) {
    678		if (msi->grps[i].gic_irq)
    679			irq_dispose_mapping(msi->grps[i].gic_irq);
    680	}
    681}
    682EXPORT_SYMBOL(iproc_msi_exit);