cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

qcom_iommu.c (23742B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * IOMMU API for QCOM secure IOMMUs.  Somewhat based on arm-smmu.c
      4 *
      5 * Copyright (C) 2013 ARM Limited
      6 * Copyright (C) 2017 Red Hat
      7 */
      8
      9#include <linux/atomic.h>
     10#include <linux/bitfield.h>
     11#include <linux/clk.h>
     12#include <linux/delay.h>
     13#include <linux/dma-mapping.h>
     14#include <linux/err.h>
     15#include <linux/interrupt.h>
     16#include <linux/io.h>
     17#include <linux/io-64-nonatomic-hi-lo.h>
     18#include <linux/io-pgtable.h>
     19#include <linux/iommu.h>
     20#include <linux/iopoll.h>
     21#include <linux/kconfig.h>
     22#include <linux/init.h>
     23#include <linux/mutex.h>
     24#include <linux/of.h>
     25#include <linux/of_address.h>
     26#include <linux/of_device.h>
     27#include <linux/platform_device.h>
     28#include <linux/pm.h>
     29#include <linux/pm_runtime.h>
     30#include <linux/qcom_scm.h>
     31#include <linux/slab.h>
     32#include <linux/spinlock.h>
     33
     34#include "arm-smmu.h"
     35
     36#define SMMU_INTR_SEL_NS     0x2000
     37
     38enum qcom_iommu_clk {
     39	CLK_IFACE,
     40	CLK_BUS,
     41	CLK_TBU,
     42	CLK_NUM,
     43};
     44
     45struct qcom_iommu_ctx;
     46
     47struct qcom_iommu_dev {
     48	/* IOMMU core code handle */
     49	struct iommu_device	 iommu;
     50	struct device		*dev;
     51	struct clk_bulk_data clks[CLK_NUM];
     52	void __iomem		*local_base;
     53	u32			 sec_id;
     54	u8			 num_ctxs;
     55	struct qcom_iommu_ctx	*ctxs[];   /* indexed by asid-1 */
     56};
     57
     58struct qcom_iommu_ctx {
     59	struct device		*dev;
     60	void __iomem		*base;
     61	bool			 secure_init;
     62	u8			 asid;      /* asid and ctx bank # are 1:1 */
     63	struct iommu_domain	*domain;
     64};
     65
     66struct qcom_iommu_domain {
     67	struct io_pgtable_ops	*pgtbl_ops;
     68	spinlock_t		 pgtbl_lock;
     69	struct mutex		 init_mutex; /* Protects iommu pointer */
     70	struct iommu_domain	 domain;
     71	struct qcom_iommu_dev	*iommu;
     72	struct iommu_fwspec	*fwspec;
     73};
     74
     75static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom)
     76{
     77	return container_of(dom, struct qcom_iommu_domain, domain);
     78}
     79
     80static const struct iommu_ops qcom_iommu_ops;
     81
     82static struct qcom_iommu_dev * to_iommu(struct device *dev)
     83{
     84	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
     85
     86	if (!fwspec || fwspec->ops != &qcom_iommu_ops)
     87		return NULL;
     88
     89	return dev_iommu_priv_get(dev);
     90}
     91
     92static struct qcom_iommu_ctx * to_ctx(struct qcom_iommu_domain *d, unsigned asid)
     93{
     94	struct qcom_iommu_dev *qcom_iommu = d->iommu;
     95	if (!qcom_iommu)
     96		return NULL;
     97	return qcom_iommu->ctxs[asid - 1];
     98}
     99
    100static inline void
    101iommu_writel(struct qcom_iommu_ctx *ctx, unsigned reg, u32 val)
    102{
    103	writel_relaxed(val, ctx->base + reg);
    104}
    105
    106static inline void
    107iommu_writeq(struct qcom_iommu_ctx *ctx, unsigned reg, u64 val)
    108{
    109	writeq_relaxed(val, ctx->base + reg);
    110}
    111
    112static inline u32
    113iommu_readl(struct qcom_iommu_ctx *ctx, unsigned reg)
    114{
    115	return readl_relaxed(ctx->base + reg);
    116}
    117
    118static inline u64
    119iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg)
    120{
    121	return readq_relaxed(ctx->base + reg);
    122}
    123
    124static void qcom_iommu_tlb_sync(void *cookie)
    125{
    126	struct qcom_iommu_domain *qcom_domain = cookie;
    127	struct iommu_fwspec *fwspec = qcom_domain->fwspec;
    128	unsigned i;
    129
    130	for (i = 0; i < fwspec->num_ids; i++) {
    131		struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
    132		unsigned int val, ret;
    133
    134		iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0);
    135
    136		ret = readl_poll_timeout(ctx->base + ARM_SMMU_CB_TLBSTATUS, val,
    137					 (val & 0x1) == 0, 0, 5000000);
    138		if (ret)
    139			dev_err(ctx->dev, "timeout waiting for TLB SYNC\n");
    140	}
    141}
    142
    143static void qcom_iommu_tlb_inv_context(void *cookie)
    144{
    145	struct qcom_iommu_domain *qcom_domain = cookie;
    146	struct iommu_fwspec *fwspec = qcom_domain->fwspec;
    147	unsigned i;
    148
    149	for (i = 0; i < fwspec->num_ids; i++) {
    150		struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
    151		iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid);
    152	}
    153
    154	qcom_iommu_tlb_sync(cookie);
    155}
    156
    157static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size,
    158					    size_t granule, bool leaf, void *cookie)
    159{
    160	struct qcom_iommu_domain *qcom_domain = cookie;
    161	struct iommu_fwspec *fwspec = qcom_domain->fwspec;
    162	unsigned i, reg;
    163
    164	reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
    165
    166	for (i = 0; i < fwspec->num_ids; i++) {
    167		struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
    168		size_t s = size;
    169
    170		iova = (iova >> 12) << 12;
    171		iova |= ctx->asid;
    172		do {
    173			iommu_writel(ctx, reg, iova);
    174			iova += granule;
    175		} while (s -= granule);
    176	}
    177}
    178
    179static void qcom_iommu_tlb_flush_walk(unsigned long iova, size_t size,
    180				      size_t granule, void *cookie)
    181{
    182	qcom_iommu_tlb_inv_range_nosync(iova, size, granule, false, cookie);
    183	qcom_iommu_tlb_sync(cookie);
    184}
    185
    186static void qcom_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
    187				    unsigned long iova, size_t granule,
    188				    void *cookie)
    189{
    190	qcom_iommu_tlb_inv_range_nosync(iova, granule, granule, true, cookie);
    191}
    192
    193static const struct iommu_flush_ops qcom_flush_ops = {
    194	.tlb_flush_all	= qcom_iommu_tlb_inv_context,
    195	.tlb_flush_walk = qcom_iommu_tlb_flush_walk,
    196	.tlb_add_page	= qcom_iommu_tlb_add_page,
    197};
    198
    199static irqreturn_t qcom_iommu_fault(int irq, void *dev)
    200{
    201	struct qcom_iommu_ctx *ctx = dev;
    202	u32 fsr, fsynr;
    203	u64 iova;
    204
    205	fsr = iommu_readl(ctx, ARM_SMMU_CB_FSR);
    206
    207	if (!(fsr & ARM_SMMU_FSR_FAULT))
    208		return IRQ_NONE;
    209
    210	fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0);
    211	iova = iommu_readq(ctx, ARM_SMMU_CB_FAR);
    212
    213	if (!report_iommu_fault(ctx->domain, ctx->dev, iova, 0)) {
    214		dev_err_ratelimited(ctx->dev,
    215				    "Unhandled context fault: fsr=0x%x, "
    216				    "iova=0x%016llx, fsynr=0x%x, cb=%d\n",
    217				    fsr, iova, fsynr, ctx->asid);
    218	}
    219
    220	iommu_writel(ctx, ARM_SMMU_CB_FSR, fsr);
    221	iommu_writel(ctx, ARM_SMMU_CB_RESUME, ARM_SMMU_RESUME_TERMINATE);
    222
    223	return IRQ_HANDLED;
    224}
    225
    226static int qcom_iommu_init_domain(struct iommu_domain *domain,
    227				  struct qcom_iommu_dev *qcom_iommu,
    228				  struct device *dev)
    229{
    230	struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
    231	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
    232	struct io_pgtable_ops *pgtbl_ops;
    233	struct io_pgtable_cfg pgtbl_cfg;
    234	int i, ret = 0;
    235	u32 reg;
    236
    237	mutex_lock(&qcom_domain->init_mutex);
    238	if (qcom_domain->iommu)
    239		goto out_unlock;
    240
    241	pgtbl_cfg = (struct io_pgtable_cfg) {
    242		.pgsize_bitmap	= qcom_iommu_ops.pgsize_bitmap,
    243		.ias		= 32,
    244		.oas		= 40,
    245		.tlb		= &qcom_flush_ops,
    246		.iommu_dev	= qcom_iommu->dev,
    247	};
    248
    249	qcom_domain->iommu = qcom_iommu;
    250	qcom_domain->fwspec = fwspec;
    251
    252	pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, qcom_domain);
    253	if (!pgtbl_ops) {
    254		dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n");
    255		ret = -ENOMEM;
    256		goto out_clear_iommu;
    257	}
    258
    259	/* Update the domain's page sizes to reflect the page table format */
    260	domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
    261	domain->geometry.aperture_end = (1ULL << pgtbl_cfg.ias) - 1;
    262	domain->geometry.force_aperture = true;
    263
    264	for (i = 0; i < fwspec->num_ids; i++) {
    265		struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
    266
    267		if (!ctx->secure_init) {
    268			ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid);
    269			if (ret) {
    270				dev_err(qcom_iommu->dev, "secure init failed: %d\n", ret);
    271				goto out_clear_iommu;
    272			}
    273			ctx->secure_init = true;
    274		}
    275
    276		/* TTBRs */
    277		iommu_writeq(ctx, ARM_SMMU_CB_TTBR0,
    278				pgtbl_cfg.arm_lpae_s1_cfg.ttbr |
    279				FIELD_PREP(ARM_SMMU_TTBRn_ASID, ctx->asid));
    280		iommu_writeq(ctx, ARM_SMMU_CB_TTBR1, 0);
    281
    282		/* TCR */
    283		iommu_writel(ctx, ARM_SMMU_CB_TCR2,
    284				arm_smmu_lpae_tcr2(&pgtbl_cfg));
    285		iommu_writel(ctx, ARM_SMMU_CB_TCR,
    286			     arm_smmu_lpae_tcr(&pgtbl_cfg) | ARM_SMMU_TCR_EAE);
    287
    288		/* MAIRs (stage-1 only) */
    289		iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR0,
    290				pgtbl_cfg.arm_lpae_s1_cfg.mair);
    291		iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR1,
    292				pgtbl_cfg.arm_lpae_s1_cfg.mair >> 32);
    293
    294		/* SCTLR */
    295		reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE |
    296		      ARM_SMMU_SCTLR_AFE | ARM_SMMU_SCTLR_TRE |
    297		      ARM_SMMU_SCTLR_M | ARM_SMMU_SCTLR_S1_ASIDPNE |
    298		      ARM_SMMU_SCTLR_CFCFG;
    299
    300		if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
    301			reg |= ARM_SMMU_SCTLR_E;
    302
    303		iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg);
    304
    305		ctx->domain = domain;
    306	}
    307
    308	mutex_unlock(&qcom_domain->init_mutex);
    309
    310	/* Publish page table ops for map/unmap */
    311	qcom_domain->pgtbl_ops = pgtbl_ops;
    312
    313	return 0;
    314
    315out_clear_iommu:
    316	qcom_domain->iommu = NULL;
    317out_unlock:
    318	mutex_unlock(&qcom_domain->init_mutex);
    319	return ret;
    320}
    321
    322static struct iommu_domain *qcom_iommu_domain_alloc(unsigned type)
    323{
    324	struct qcom_iommu_domain *qcom_domain;
    325
    326	if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
    327		return NULL;
    328	/*
    329	 * Allocate the domain and initialise some of its data structures.
    330	 * We can't really do anything meaningful until we've added a
    331	 * master.
    332	 */
    333	qcom_domain = kzalloc(sizeof(*qcom_domain), GFP_KERNEL);
    334	if (!qcom_domain)
    335		return NULL;
    336
    337	mutex_init(&qcom_domain->init_mutex);
    338	spin_lock_init(&qcom_domain->pgtbl_lock);
    339
    340	return &qcom_domain->domain;
    341}
    342
    343static void qcom_iommu_domain_free(struct iommu_domain *domain)
    344{
    345	struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
    346
    347	if (qcom_domain->iommu) {
    348		/*
    349		 * NOTE: unmap can be called after client device is powered
    350		 * off, for example, with GPUs or anything involving dma-buf.
    351		 * So we cannot rely on the device_link.  Make sure the IOMMU
    352		 * is on to avoid unclocked accesses in the TLB inv path:
    353		 */
    354		pm_runtime_get_sync(qcom_domain->iommu->dev);
    355		free_io_pgtable_ops(qcom_domain->pgtbl_ops);
    356		pm_runtime_put_sync(qcom_domain->iommu->dev);
    357	}
    358
    359	kfree(qcom_domain);
    360}
    361
    362static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
    363{
    364	struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
    365	struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
    366	int ret;
    367
    368	if (!qcom_iommu) {
    369		dev_err(dev, "cannot attach to IOMMU, is it on the same bus?\n");
    370		return -ENXIO;
    371	}
    372
    373	/* Ensure that the domain is finalized */
    374	pm_runtime_get_sync(qcom_iommu->dev);
    375	ret = qcom_iommu_init_domain(domain, qcom_iommu, dev);
    376	pm_runtime_put_sync(qcom_iommu->dev);
    377	if (ret < 0)
    378		return ret;
    379
    380	/*
    381	 * Sanity check the domain. We don't support domains across
    382	 * different IOMMUs.
    383	 */
    384	if (qcom_domain->iommu != qcom_iommu) {
    385		dev_err(dev, "cannot attach to IOMMU %s while already "
    386			"attached to domain on IOMMU %s\n",
    387			dev_name(qcom_domain->iommu->dev),
    388			dev_name(qcom_iommu->dev));
    389		return -EINVAL;
    390	}
    391
    392	return 0;
    393}
    394
    395static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *dev)
    396{
    397	struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
    398	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
    399	struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
    400	unsigned i;
    401
    402	if (WARN_ON(!qcom_domain->iommu))
    403		return;
    404
    405	pm_runtime_get_sync(qcom_iommu->dev);
    406	for (i = 0; i < fwspec->num_ids; i++) {
    407		struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
    408
    409		/* Disable the context bank: */
    410		iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
    411
    412		ctx->domain = NULL;
    413	}
    414	pm_runtime_put_sync(qcom_iommu->dev);
    415}
    416
    417static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
    418			  phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
    419{
    420	int ret;
    421	unsigned long flags;
    422	struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
    423	struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops;
    424
    425	if (!ops)
    426		return -ENODEV;
    427
    428	spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
    429	ret = ops->map(ops, iova, paddr, size, prot, GFP_ATOMIC);
    430	spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
    431	return ret;
    432}
    433
    434static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
    435			       size_t size, struct iommu_iotlb_gather *gather)
    436{
    437	size_t ret;
    438	unsigned long flags;
    439	struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
    440	struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops;
    441
    442	if (!ops)
    443		return 0;
    444
    445	/* NOTE: unmap can be called after client device is powered off,
    446	 * for example, with GPUs or anything involving dma-buf.  So we
    447	 * cannot rely on the device_link.  Make sure the IOMMU is on to
    448	 * avoid unclocked accesses in the TLB inv path:
    449	 */
    450	pm_runtime_get_sync(qcom_domain->iommu->dev);
    451	spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
    452	ret = ops->unmap(ops, iova, size, gather);
    453	spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
    454	pm_runtime_put_sync(qcom_domain->iommu->dev);
    455
    456	return ret;
    457}
    458
    459static void qcom_iommu_flush_iotlb_all(struct iommu_domain *domain)
    460{
    461	struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
    462	struct io_pgtable *pgtable = container_of(qcom_domain->pgtbl_ops,
    463						  struct io_pgtable, ops);
    464	if (!qcom_domain->pgtbl_ops)
    465		return;
    466
    467	pm_runtime_get_sync(qcom_domain->iommu->dev);
    468	qcom_iommu_tlb_sync(pgtable->cookie);
    469	pm_runtime_put_sync(qcom_domain->iommu->dev);
    470}
    471
    472static void qcom_iommu_iotlb_sync(struct iommu_domain *domain,
    473				  struct iommu_iotlb_gather *gather)
    474{
    475	qcom_iommu_flush_iotlb_all(domain);
    476}
    477
    478static phys_addr_t qcom_iommu_iova_to_phys(struct iommu_domain *domain,
    479					   dma_addr_t iova)
    480{
    481	phys_addr_t ret;
    482	unsigned long flags;
    483	struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
    484	struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops;
    485
    486	if (!ops)
    487		return 0;
    488
    489	spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
    490	ret = ops->iova_to_phys(ops, iova);
    491	spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
    492
    493	return ret;
    494}
    495
    496static bool qcom_iommu_capable(enum iommu_cap cap)
    497{
    498	switch (cap) {
    499	case IOMMU_CAP_CACHE_COHERENCY:
    500		/*
    501		 * Return true here as the SMMU can always send out coherent
    502		 * requests.
    503		 */
    504		return true;
    505	case IOMMU_CAP_NOEXEC:
    506		return true;
    507	default:
    508		return false;
    509	}
    510}
    511
    512static struct iommu_device *qcom_iommu_probe_device(struct device *dev)
    513{
    514	struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
    515	struct device_link *link;
    516
    517	if (!qcom_iommu)
    518		return ERR_PTR(-ENODEV);
    519
    520	/*
    521	 * Establish the link between iommu and master, so that the
    522	 * iommu gets runtime enabled/disabled as per the master's
    523	 * needs.
    524	 */
    525	link = device_link_add(dev, qcom_iommu->dev, DL_FLAG_PM_RUNTIME);
    526	if (!link) {
    527		dev_err(qcom_iommu->dev, "Unable to create device link between %s and %s\n",
    528			dev_name(qcom_iommu->dev), dev_name(dev));
    529		return ERR_PTR(-ENODEV);
    530	}
    531
    532	return &qcom_iommu->iommu;
    533}
    534
    535static void qcom_iommu_release_device(struct device *dev)
    536{
    537	struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
    538
    539	if (!qcom_iommu)
    540		return;
    541
    542	iommu_fwspec_free(dev);
    543}
    544
    545static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
    546{
    547	struct qcom_iommu_dev *qcom_iommu;
    548	struct platform_device *iommu_pdev;
    549	unsigned asid = args->args[0];
    550
    551	if (args->args_count != 1) {
    552		dev_err(dev, "incorrect number of iommu params found for %s "
    553			"(found %d, expected 1)\n",
    554			args->np->full_name, args->args_count);
    555		return -EINVAL;
    556	}
    557
    558	iommu_pdev = of_find_device_by_node(args->np);
    559	if (WARN_ON(!iommu_pdev))
    560		return -EINVAL;
    561
    562	qcom_iommu = platform_get_drvdata(iommu_pdev);
    563
    564	/* make sure the asid specified in dt is valid, so we don't have
    565	 * to sanity check this elsewhere, since 'asid - 1' is used to
    566	 * index into qcom_iommu->ctxs:
    567	 */
    568	if (WARN_ON(asid < 1) ||
    569	    WARN_ON(asid > qcom_iommu->num_ctxs)) {
    570		put_device(&iommu_pdev->dev);
    571		return -EINVAL;
    572	}
    573
    574	if (!dev_iommu_priv_get(dev)) {
    575		dev_iommu_priv_set(dev, qcom_iommu);
    576	} else {
    577		/* make sure devices iommus dt node isn't referring to
    578		 * multiple different iommu devices.  Multiple context
    579		 * banks are ok, but multiple devices are not:
    580		 */
    581		if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev))) {
    582			put_device(&iommu_pdev->dev);
    583			return -EINVAL;
    584		}
    585	}
    586
    587	return iommu_fwspec_add_ids(dev, &asid, 1);
    588}
    589
    590static const struct iommu_ops qcom_iommu_ops = {
    591	.capable	= qcom_iommu_capable,
    592	.domain_alloc	= qcom_iommu_domain_alloc,
    593	.probe_device	= qcom_iommu_probe_device,
    594	.release_device	= qcom_iommu_release_device,
    595	.device_group	= generic_device_group,
    596	.of_xlate	= qcom_iommu_of_xlate,
    597	.pgsize_bitmap	= SZ_4K | SZ_64K | SZ_1M | SZ_16M,
    598	.default_domain_ops = &(const struct iommu_domain_ops) {
    599		.attach_dev	= qcom_iommu_attach_dev,
    600		.detach_dev	= qcom_iommu_detach_dev,
    601		.map		= qcom_iommu_map,
    602		.unmap		= qcom_iommu_unmap,
    603		.flush_iotlb_all = qcom_iommu_flush_iotlb_all,
    604		.iotlb_sync	= qcom_iommu_iotlb_sync,
    605		.iova_to_phys	= qcom_iommu_iova_to_phys,
    606		.free		= qcom_iommu_domain_free,
    607	}
    608};
    609
    610static int qcom_iommu_sec_ptbl_init(struct device *dev)
    611{
    612	size_t psize = 0;
    613	unsigned int spare = 0;
    614	void *cpu_addr;
    615	dma_addr_t paddr;
    616	unsigned long attrs;
    617	static bool allocated = false;
    618	int ret;
    619
    620	if (allocated)
    621		return 0;
    622
    623	ret = qcom_scm_iommu_secure_ptbl_size(spare, &psize);
    624	if (ret) {
    625		dev_err(dev, "failed to get iommu secure pgtable size (%d)\n",
    626			ret);
    627		return ret;
    628	}
    629
    630	dev_info(dev, "iommu sec: pgtable size: %zu\n", psize);
    631
    632	attrs = DMA_ATTR_NO_KERNEL_MAPPING;
    633
    634	cpu_addr = dma_alloc_attrs(dev, psize, &paddr, GFP_KERNEL, attrs);
    635	if (!cpu_addr) {
    636		dev_err(dev, "failed to allocate %zu bytes for pgtable\n",
    637			psize);
    638		return -ENOMEM;
    639	}
    640
    641	ret = qcom_scm_iommu_secure_ptbl_init(paddr, psize, spare);
    642	if (ret) {
    643		dev_err(dev, "failed to init iommu pgtable (%d)\n", ret);
    644		goto free_mem;
    645	}
    646
    647	allocated = true;
    648	return 0;
    649
    650free_mem:
    651	dma_free_attrs(dev, psize, cpu_addr, paddr, attrs);
    652	return ret;
    653}
    654
    655static int get_asid(const struct device_node *np)
    656{
    657	u32 reg;
    658
    659	/* read the "reg" property directly to get the relative address
    660	 * of the context bank, and calculate the asid from that:
    661	 */
    662	if (of_property_read_u32_index(np, "reg", 0, &reg))
    663		return -ENODEV;
    664
    665	return reg / 0x1000;      /* context banks are 0x1000 apart */
    666}
    667
    668static int qcom_iommu_ctx_probe(struct platform_device *pdev)
    669{
    670	struct qcom_iommu_ctx *ctx;
    671	struct device *dev = &pdev->dev;
    672	struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev->parent);
    673	struct resource *res;
    674	int ret, irq;
    675
    676	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
    677	if (!ctx)
    678		return -ENOMEM;
    679
    680	ctx->dev = dev;
    681	platform_set_drvdata(pdev, ctx);
    682
    683	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
    684	ctx->base = devm_ioremap_resource(dev, res);
    685	if (IS_ERR(ctx->base))
    686		return PTR_ERR(ctx->base);
    687
    688	irq = platform_get_irq(pdev, 0);
    689	if (irq < 0)
    690		return -ENODEV;
    691
    692	/* clear IRQs before registering fault handler, just in case the
    693	 * boot-loader left us a surprise:
    694	 */
    695	iommu_writel(ctx, ARM_SMMU_CB_FSR, iommu_readl(ctx, ARM_SMMU_CB_FSR));
    696
    697	ret = devm_request_irq(dev, irq,
    698			       qcom_iommu_fault,
    699			       IRQF_SHARED,
    700			       "qcom-iommu-fault",
    701			       ctx);
    702	if (ret) {
    703		dev_err(dev, "failed to request IRQ %u\n", irq);
    704		return ret;
    705	}
    706
    707	ret = get_asid(dev->of_node);
    708	if (ret < 0) {
    709		dev_err(dev, "missing reg property\n");
    710		return ret;
    711	}
    712
    713	ctx->asid = ret;
    714
    715	dev_dbg(dev, "found asid %u\n", ctx->asid);
    716
    717	qcom_iommu->ctxs[ctx->asid - 1] = ctx;
    718
    719	return 0;
    720}
    721
    722static int qcom_iommu_ctx_remove(struct platform_device *pdev)
    723{
    724	struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(pdev->dev.parent);
    725	struct qcom_iommu_ctx *ctx = platform_get_drvdata(pdev);
    726
    727	platform_set_drvdata(pdev, NULL);
    728
    729	qcom_iommu->ctxs[ctx->asid - 1] = NULL;
    730
    731	return 0;
    732}
    733
    734static const struct of_device_id ctx_of_match[] = {
    735	{ .compatible = "qcom,msm-iommu-v1-ns" },
    736	{ .compatible = "qcom,msm-iommu-v1-sec" },
    737	{ /* sentinel */ }
    738};
    739
    740static struct platform_driver qcom_iommu_ctx_driver = {
    741	.driver	= {
    742		.name		= "qcom-iommu-ctx",
    743		.of_match_table	= ctx_of_match,
    744	},
    745	.probe	= qcom_iommu_ctx_probe,
    746	.remove = qcom_iommu_ctx_remove,
    747};
    748
    749static bool qcom_iommu_has_secure_context(struct qcom_iommu_dev *qcom_iommu)
    750{
    751	struct device_node *child;
    752
    753	for_each_child_of_node(qcom_iommu->dev->of_node, child)
    754		if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec"))
    755			return true;
    756
    757	return false;
    758}
    759
    760static int qcom_iommu_device_probe(struct platform_device *pdev)
    761{
    762	struct device_node *child;
    763	struct qcom_iommu_dev *qcom_iommu;
    764	struct device *dev = &pdev->dev;
    765	struct resource *res;
    766	struct clk *clk;
    767	int ret, max_asid = 0;
    768
    769	/* find the max asid (which is 1:1 to ctx bank idx), so we know how
    770	 * many child ctx devices we have:
    771	 */
    772	for_each_child_of_node(dev->of_node, child)
    773		max_asid = max(max_asid, get_asid(child));
    774
    775	qcom_iommu = devm_kzalloc(dev, struct_size(qcom_iommu, ctxs, max_asid),
    776				  GFP_KERNEL);
    777	if (!qcom_iommu)
    778		return -ENOMEM;
    779	qcom_iommu->num_ctxs = max_asid;
    780	qcom_iommu->dev = dev;
    781
    782	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
    783	if (res) {
    784		qcom_iommu->local_base = devm_ioremap_resource(dev, res);
    785		if (IS_ERR(qcom_iommu->local_base))
    786			return PTR_ERR(qcom_iommu->local_base);
    787	}
    788
    789	clk = devm_clk_get(dev, "iface");
    790	if (IS_ERR(clk)) {
    791		dev_err(dev, "failed to get iface clock\n");
    792		return PTR_ERR(clk);
    793	}
    794	qcom_iommu->clks[CLK_IFACE].clk = clk;
    795
    796	clk = devm_clk_get(dev, "bus");
    797	if (IS_ERR(clk)) {
    798		dev_err(dev, "failed to get bus clock\n");
    799		return PTR_ERR(clk);
    800	}
    801	qcom_iommu->clks[CLK_BUS].clk = clk;
    802
    803	clk = devm_clk_get_optional(dev, "tbu");
    804	if (IS_ERR(clk)) {
    805		dev_err(dev, "failed to get tbu clock\n");
    806		return PTR_ERR(clk);
    807	}
    808	qcom_iommu->clks[CLK_TBU].clk = clk;
    809
    810	if (of_property_read_u32(dev->of_node, "qcom,iommu-secure-id",
    811				 &qcom_iommu->sec_id)) {
    812		dev_err(dev, "missing qcom,iommu-secure-id property\n");
    813		return -ENODEV;
    814	}
    815
    816	if (qcom_iommu_has_secure_context(qcom_iommu)) {
    817		ret = qcom_iommu_sec_ptbl_init(dev);
    818		if (ret) {
    819			dev_err(dev, "cannot init secure pg table(%d)\n", ret);
    820			return ret;
    821		}
    822	}
    823
    824	platform_set_drvdata(pdev, qcom_iommu);
    825
    826	pm_runtime_enable(dev);
    827
    828	/* register context bank devices, which are child nodes: */
    829	ret = devm_of_platform_populate(dev);
    830	if (ret) {
    831		dev_err(dev, "Failed to populate iommu contexts\n");
    832		goto err_pm_disable;
    833	}
    834
    835	ret = iommu_device_sysfs_add(&qcom_iommu->iommu, dev, NULL,
    836				     dev_name(dev));
    837	if (ret) {
    838		dev_err(dev, "Failed to register iommu in sysfs\n");
    839		goto err_pm_disable;
    840	}
    841
    842	ret = iommu_device_register(&qcom_iommu->iommu, &qcom_iommu_ops, dev);
    843	if (ret) {
    844		dev_err(dev, "Failed to register iommu\n");
    845		goto err_pm_disable;
    846	}
    847
    848	bus_set_iommu(&platform_bus_type, &qcom_iommu_ops);
    849
    850	if (qcom_iommu->local_base) {
    851		pm_runtime_get_sync(dev);
    852		writel_relaxed(0xffffffff, qcom_iommu->local_base + SMMU_INTR_SEL_NS);
    853		pm_runtime_put_sync(dev);
    854	}
    855
    856	return 0;
    857
    858err_pm_disable:
    859	pm_runtime_disable(dev);
    860	return ret;
    861}
    862
    863static int qcom_iommu_device_remove(struct platform_device *pdev)
    864{
    865	struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev);
    866
    867	bus_set_iommu(&platform_bus_type, NULL);
    868
    869	pm_runtime_force_suspend(&pdev->dev);
    870	platform_set_drvdata(pdev, NULL);
    871	iommu_device_sysfs_remove(&qcom_iommu->iommu);
    872	iommu_device_unregister(&qcom_iommu->iommu);
    873
    874	return 0;
    875}
    876
    877static int __maybe_unused qcom_iommu_resume(struct device *dev)
    878{
    879	struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
    880
    881	return clk_bulk_prepare_enable(CLK_NUM, qcom_iommu->clks);
    882}
    883
    884static int __maybe_unused qcom_iommu_suspend(struct device *dev)
    885{
    886	struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
    887
    888	clk_bulk_disable_unprepare(CLK_NUM, qcom_iommu->clks);
    889
    890	return 0;
    891}
    892
    893static const struct dev_pm_ops qcom_iommu_pm_ops = {
    894	SET_RUNTIME_PM_OPS(qcom_iommu_suspend, qcom_iommu_resume, NULL)
    895	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
    896				pm_runtime_force_resume)
    897};
    898
    899static const struct of_device_id qcom_iommu_of_match[] = {
    900	{ .compatible = "qcom,msm-iommu-v1" },
    901	{ /* sentinel */ }
    902};
    903
    904static struct platform_driver qcom_iommu_driver = {
    905	.driver	= {
    906		.name		= "qcom-iommu",
    907		.of_match_table	= qcom_iommu_of_match,
    908		.pm		= &qcom_iommu_pm_ops,
    909	},
    910	.probe	= qcom_iommu_device_probe,
    911	.remove	= qcom_iommu_device_remove,
    912};
    913
    914static int __init qcom_iommu_init(void)
    915{
    916	int ret;
    917
    918	ret = platform_driver_register(&qcom_iommu_ctx_driver);
    919	if (ret)
    920		return ret;
    921
    922	ret = platform_driver_register(&qcom_iommu_driver);
    923	if (ret)
    924		platform_driver_unregister(&qcom_iommu_ctx_driver);
    925
    926	return ret;
    927}
    928device_initcall(qcom_iommu_init);