cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mtk_iommu_v1.c (21220B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * IOMMU API for MTK architected m4u v1 implementations
      4 *
      5 * Copyright (c) 2015-2016 MediaTek Inc.
      6 * Author: Honghui Zhang <honghui.zhang@mediatek.com>
      7 *
      8 * Based on driver/iommu/mtk_iommu.c
      9 */
     10#include <linux/bug.h>
     11#include <linux/clk.h>
     12#include <linux/component.h>
     13#include <linux/device.h>
     14#include <linux/dma-mapping.h>
     15#include <linux/err.h>
     16#include <linux/interrupt.h>
     17#include <linux/io.h>
     18#include <linux/iommu.h>
     19#include <linux/iopoll.h>
     20#include <linux/list.h>
     21#include <linux/module.h>
     22#include <linux/of_address.h>
     23#include <linux/of_irq.h>
     24#include <linux/of_platform.h>
     25#include <linux/platform_device.h>
     26#include <linux/slab.h>
     27#include <linux/spinlock.h>
     28#include <asm/barrier.h>
     29#include <asm/dma-iommu.h>
     30#include <dt-bindings/memory/mtk-memory-port.h>
     31#include <dt-bindings/memory/mt2701-larb-port.h>
     32#include <soc/mediatek/smi.h>
     33
     34#define REG_MMU_PT_BASE_ADDR			0x000
     35
     36#define F_ALL_INVLD				0x2
     37#define F_MMU_INV_RANGE				0x1
     38#define F_INVLD_EN0				BIT(0)
     39#define F_INVLD_EN1				BIT(1)
     40
     41#define F_MMU_FAULT_VA_MSK			0xfffff000
     42#define MTK_PROTECT_PA_ALIGN			128
     43
     44#define REG_MMU_CTRL_REG			0x210
     45#define F_MMU_CTRL_COHERENT_EN			BIT(8)
     46#define REG_MMU_IVRP_PADDR			0x214
     47#define REG_MMU_INT_CONTROL			0x220
     48#define F_INT_TRANSLATION_FAULT			BIT(0)
     49#define F_INT_MAIN_MULTI_HIT_FAULT		BIT(1)
     50#define F_INT_INVALID_PA_FAULT			BIT(2)
     51#define F_INT_ENTRY_REPLACEMENT_FAULT		BIT(3)
     52#define F_INT_TABLE_WALK_FAULT			BIT(4)
     53#define F_INT_TLB_MISS_FAULT			BIT(5)
     54#define F_INT_PFH_DMA_FIFO_OVERFLOW		BIT(6)
     55#define F_INT_MISS_DMA_FIFO_OVERFLOW		BIT(7)
     56
     57#define F_MMU_TF_PROTECT_SEL(prot)		(((prot) & 0x3) << 5)
     58#define F_INT_CLR_BIT				BIT(12)
     59
     60#define REG_MMU_FAULT_ST			0x224
     61#define REG_MMU_FAULT_VA			0x228
     62#define REG_MMU_INVLD_PA			0x22C
     63#define REG_MMU_INT_ID				0x388
     64#define REG_MMU_INVALIDATE			0x5c0
     65#define REG_MMU_INVLD_START_A			0x5c4
     66#define REG_MMU_INVLD_END_A			0x5c8
     67
     68#define REG_MMU_INV_SEL				0x5d8
     69#define REG_MMU_STANDARD_AXI_MODE		0x5e8
     70
     71#define REG_MMU_DCM				0x5f0
     72#define F_MMU_DCM_ON				BIT(1)
     73#define REG_MMU_CPE_DONE			0x60c
     74#define F_DESC_VALID				0x2
     75#define F_DESC_NONSEC				BIT(3)
     76#define MT2701_M4U_TF_LARB(TF)			(6 - (((TF) >> 13) & 0x7))
     77#define MT2701_M4U_TF_PORT(TF)			(((TF) >> 8) & 0xF)
     78/* MTK generation one iommu HW only support 4K size mapping */
     79#define MT2701_IOMMU_PAGE_SHIFT			12
     80#define MT2701_IOMMU_PAGE_SIZE			(1UL << MT2701_IOMMU_PAGE_SHIFT)
     81#define MT2701_LARB_NR_MAX			3
     82
     83/*
     84 * MTK m4u support 4GB iova address space, and only support 4K page
     85 * mapping. So the pagetable size should be exactly as 4M.
     86 */
     87#define M2701_IOMMU_PGT_SIZE			SZ_4M
     88
     89struct mtk_iommu_v1_suspend_reg {
     90	u32			standard_axi_mode;
     91	u32			dcm_dis;
     92	u32			ctrl_reg;
     93	u32			int_control0;
     94};
     95
     96struct mtk_iommu_v1_data {
     97	void __iomem			*base;
     98	int				irq;
     99	struct device			*dev;
    100	struct clk			*bclk;
    101	phys_addr_t			protect_base; /* protect memory base */
    102	struct mtk_iommu_v1_domain	*m4u_dom;
    103
    104	struct iommu_device		iommu;
    105	struct dma_iommu_mapping	*mapping;
    106	struct mtk_smi_larb_iommu	larb_imu[MTK_LARB_NR_MAX];
    107
    108	struct mtk_iommu_v1_suspend_reg	reg;
    109};
    110
    111struct mtk_iommu_v1_domain {
    112	spinlock_t			pgtlock; /* lock for page table */
    113	struct iommu_domain		domain;
    114	u32				*pgt_va;
    115	dma_addr_t			pgt_pa;
    116	struct mtk_iommu_v1_data	*data;
    117};
    118
    119static int mtk_iommu_v1_bind(struct device *dev)
    120{
    121	struct mtk_iommu_v1_data *data = dev_get_drvdata(dev);
    122
    123	return component_bind_all(dev, &data->larb_imu);
    124}
    125
    126static void mtk_iommu_v1_unbind(struct device *dev)
    127{
    128	struct mtk_iommu_v1_data *data = dev_get_drvdata(dev);
    129
    130	component_unbind_all(dev, &data->larb_imu);
    131}
    132
    133static struct mtk_iommu_v1_domain *to_mtk_domain(struct iommu_domain *dom)
    134{
    135	return container_of(dom, struct mtk_iommu_v1_domain, domain);
    136}
    137
    138static const int mt2701_m4u_in_larb[] = {
    139	LARB0_PORT_OFFSET, LARB1_PORT_OFFSET,
    140	LARB2_PORT_OFFSET, LARB3_PORT_OFFSET
    141};
    142
    143static inline int mt2701_m4u_to_larb(int id)
    144{
    145	int i;
    146
    147	for (i = ARRAY_SIZE(mt2701_m4u_in_larb) - 1; i >= 0; i--)
    148		if ((id) >= mt2701_m4u_in_larb[i])
    149			return i;
    150
    151	return 0;
    152}
    153
    154static inline int mt2701_m4u_to_port(int id)
    155{
    156	int larb = mt2701_m4u_to_larb(id);
    157
    158	return id - mt2701_m4u_in_larb[larb];
    159}
    160
    161static void mtk_iommu_v1_tlb_flush_all(struct mtk_iommu_v1_data *data)
    162{
    163	writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
    164			data->base + REG_MMU_INV_SEL);
    165	writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
    166	wmb(); /* Make sure the tlb flush all done */
    167}
    168
    169static void mtk_iommu_v1_tlb_flush_range(struct mtk_iommu_v1_data *data,
    170					 unsigned long iova, size_t size)
    171{
    172	int ret;
    173	u32 tmp;
    174
    175	writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
    176		data->base + REG_MMU_INV_SEL);
    177	writel_relaxed(iova & F_MMU_FAULT_VA_MSK,
    178		data->base + REG_MMU_INVLD_START_A);
    179	writel_relaxed((iova + size - 1) & F_MMU_FAULT_VA_MSK,
    180		data->base + REG_MMU_INVLD_END_A);
    181	writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE);
    182
    183	ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
    184				tmp, tmp != 0, 10, 100000);
    185	if (ret) {
    186		dev_warn(data->dev,
    187			 "Partial TLB flush timed out, falling back to full flush\n");
    188		mtk_iommu_v1_tlb_flush_all(data);
    189	}
    190	/* Clear the CPE status */
    191	writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
    192}
    193
    194static irqreturn_t mtk_iommu_v1_isr(int irq, void *dev_id)
    195{
    196	struct mtk_iommu_v1_data *data = dev_id;
    197	struct mtk_iommu_v1_domain *dom = data->m4u_dom;
    198	u32 int_state, regval, fault_iova, fault_pa;
    199	unsigned int fault_larb, fault_port;
    200
    201	/* Read error information from registers */
    202	int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST);
    203	fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA);
    204
    205	fault_iova &= F_MMU_FAULT_VA_MSK;
    206	fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA);
    207	regval = readl_relaxed(data->base + REG_MMU_INT_ID);
    208	fault_larb = MT2701_M4U_TF_LARB(regval);
    209	fault_port = MT2701_M4U_TF_PORT(regval);
    210
    211	/*
    212	 * MTK v1 iommu HW could not determine whether the fault is read or
    213	 * write fault, report as read fault.
    214	 */
    215	if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
    216			IOMMU_FAULT_READ))
    217		dev_err_ratelimited(data->dev,
    218			"fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d\n",
    219			int_state, fault_iova, fault_pa,
    220			fault_larb, fault_port);
    221
    222	/* Interrupt clear */
    223	regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL);
    224	regval |= F_INT_CLR_BIT;
    225	writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL);
    226
    227	mtk_iommu_v1_tlb_flush_all(data);
    228
    229	return IRQ_HANDLED;
    230}
    231
    232static void mtk_iommu_v1_config(struct mtk_iommu_v1_data *data,
    233				struct device *dev, bool enable)
    234{
    235	struct mtk_smi_larb_iommu    *larb_mmu;
    236	unsigned int                 larbid, portid;
    237	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
    238	int i;
    239
    240	for (i = 0; i < fwspec->num_ids; ++i) {
    241		larbid = mt2701_m4u_to_larb(fwspec->ids[i]);
    242		portid = mt2701_m4u_to_port(fwspec->ids[i]);
    243		larb_mmu = &data->larb_imu[larbid];
    244
    245		dev_dbg(dev, "%s iommu port: %d\n",
    246			enable ? "enable" : "disable", portid);
    247
    248		if (enable)
    249			larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
    250		else
    251			larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
    252	}
    253}
    254
    255static int mtk_iommu_v1_domain_finalise(struct mtk_iommu_v1_data *data)
    256{
    257	struct mtk_iommu_v1_domain *dom = data->m4u_dom;
    258
    259	spin_lock_init(&dom->pgtlock);
    260
    261	dom->pgt_va = dma_alloc_coherent(data->dev, M2701_IOMMU_PGT_SIZE,
    262					 &dom->pgt_pa, GFP_KERNEL);
    263	if (!dom->pgt_va)
    264		return -ENOMEM;
    265
    266	writel(dom->pgt_pa, data->base + REG_MMU_PT_BASE_ADDR);
    267
    268	dom->data = data;
    269
    270	return 0;
    271}
    272
    273static struct iommu_domain *mtk_iommu_v1_domain_alloc(unsigned type)
    274{
    275	struct mtk_iommu_v1_domain *dom;
    276
    277	if (type != IOMMU_DOMAIN_UNMANAGED)
    278		return NULL;
    279
    280	dom = kzalloc(sizeof(*dom), GFP_KERNEL);
    281	if (!dom)
    282		return NULL;
    283
    284	return &dom->domain;
    285}
    286
    287static void mtk_iommu_v1_domain_free(struct iommu_domain *domain)
    288{
    289	struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
    290	struct mtk_iommu_v1_data *data = dom->data;
    291
    292	dma_free_coherent(data->dev, M2701_IOMMU_PGT_SIZE,
    293			dom->pgt_va, dom->pgt_pa);
    294	kfree(to_mtk_domain(domain));
    295}
    296
    297static int mtk_iommu_v1_attach_device(struct iommu_domain *domain, struct device *dev)
    298{
    299	struct mtk_iommu_v1_data *data = dev_iommu_priv_get(dev);
    300	struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
    301	struct dma_iommu_mapping *mtk_mapping;
    302	int ret;
    303
    304	/* Only allow the domain created internally. */
    305	mtk_mapping = data->mapping;
    306	if (mtk_mapping->domain != domain)
    307		return 0;
    308
    309	if (!data->m4u_dom) {
    310		data->m4u_dom = dom;
    311		ret = mtk_iommu_v1_domain_finalise(data);
    312		if (ret) {
    313			data->m4u_dom = NULL;
    314			return ret;
    315		}
    316	}
    317
    318	mtk_iommu_v1_config(data, dev, true);
    319	return 0;
    320}
    321
    322static void mtk_iommu_v1_detach_device(struct iommu_domain *domain, struct device *dev)
    323{
    324	struct mtk_iommu_v1_data *data = dev_iommu_priv_get(dev);
    325
    326	mtk_iommu_v1_config(data, dev, false);
    327}
    328
    329static int mtk_iommu_v1_map(struct iommu_domain *domain, unsigned long iova,
    330			    phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
    331{
    332	struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
    333	unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
    334	unsigned long flags;
    335	unsigned int i;
    336	u32 *pgt_base_iova = dom->pgt_va + (iova  >> MT2701_IOMMU_PAGE_SHIFT);
    337	u32 pabase = (u32)paddr;
    338	int map_size = 0;
    339
    340	spin_lock_irqsave(&dom->pgtlock, flags);
    341	for (i = 0; i < page_num; i++) {
    342		if (pgt_base_iova[i]) {
    343			memset(pgt_base_iova, 0, i * sizeof(u32));
    344			break;
    345		}
    346		pgt_base_iova[i] = pabase | F_DESC_VALID | F_DESC_NONSEC;
    347		pabase += MT2701_IOMMU_PAGE_SIZE;
    348		map_size += MT2701_IOMMU_PAGE_SIZE;
    349	}
    350
    351	spin_unlock_irqrestore(&dom->pgtlock, flags);
    352
    353	mtk_iommu_v1_tlb_flush_range(dom->data, iova, size);
    354
    355	return map_size == size ? 0 : -EEXIST;
    356}
    357
    358static size_t mtk_iommu_v1_unmap(struct iommu_domain *domain, unsigned long iova,
    359				 size_t size, struct iommu_iotlb_gather *gather)
    360{
    361	struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
    362	unsigned long flags;
    363	u32 *pgt_base_iova = dom->pgt_va + (iova  >> MT2701_IOMMU_PAGE_SHIFT);
    364	unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
    365
    366	spin_lock_irqsave(&dom->pgtlock, flags);
    367	memset(pgt_base_iova, 0, page_num * sizeof(u32));
    368	spin_unlock_irqrestore(&dom->pgtlock, flags);
    369
    370	mtk_iommu_v1_tlb_flush_range(dom->data, iova, size);
    371
    372	return size;
    373}
    374
    375static phys_addr_t mtk_iommu_v1_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
    376{
    377	struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
    378	unsigned long flags;
    379	phys_addr_t pa;
    380
    381	spin_lock_irqsave(&dom->pgtlock, flags);
    382	pa = *(dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT));
    383	pa = pa & (~(MT2701_IOMMU_PAGE_SIZE - 1));
    384	spin_unlock_irqrestore(&dom->pgtlock, flags);
    385
    386	return pa;
    387}
    388
    389static const struct iommu_ops mtk_iommu_v1_ops;
    390
    391/*
    392 * MTK generation one iommu HW only support one iommu domain, and all the client
    393 * sharing the same iova address space.
    394 */
    395static int mtk_iommu_v1_create_mapping(struct device *dev, struct of_phandle_args *args)
    396{
    397	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
    398	struct mtk_iommu_v1_data *data;
    399	struct platform_device *m4updev;
    400	struct dma_iommu_mapping *mtk_mapping;
    401	int ret;
    402
    403	if (args->args_count != 1) {
    404		dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n",
    405			args->args_count);
    406		return -EINVAL;
    407	}
    408
    409	if (!fwspec) {
    410		ret = iommu_fwspec_init(dev, &args->np->fwnode, &mtk_iommu_v1_ops);
    411		if (ret)
    412			return ret;
    413		fwspec = dev_iommu_fwspec_get(dev);
    414	} else if (dev_iommu_fwspec_get(dev)->ops != &mtk_iommu_v1_ops) {
    415		return -EINVAL;
    416	}
    417
    418	if (!dev_iommu_priv_get(dev)) {
    419		/* Get the m4u device */
    420		m4updev = of_find_device_by_node(args->np);
    421		if (WARN_ON(!m4updev))
    422			return -EINVAL;
    423
    424		dev_iommu_priv_set(dev, platform_get_drvdata(m4updev));
    425	}
    426
    427	ret = iommu_fwspec_add_ids(dev, args->args, 1);
    428	if (ret)
    429		return ret;
    430
    431	data = dev_iommu_priv_get(dev);
    432	mtk_mapping = data->mapping;
    433	if (!mtk_mapping) {
    434		/* MTK iommu support 4GB iova address space. */
    435		mtk_mapping = arm_iommu_create_mapping(&platform_bus_type,
    436						0, 1ULL << 32);
    437		if (IS_ERR(mtk_mapping))
    438			return PTR_ERR(mtk_mapping);
    439
    440		data->mapping = mtk_mapping;
    441	}
    442
    443	return 0;
    444}
    445
    446static int mtk_iommu_v1_def_domain_type(struct device *dev)
    447{
    448	return IOMMU_DOMAIN_UNMANAGED;
    449}
    450
    451static struct iommu_device *mtk_iommu_v1_probe_device(struct device *dev)
    452{
    453	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
    454	struct of_phandle_args iommu_spec;
    455	struct mtk_iommu_v1_data *data;
    456	int err, idx = 0, larbid, larbidx;
    457	struct device_link *link;
    458	struct device *larbdev;
    459
    460	/*
    461	 * In the deferred case, free the existed fwspec.
    462	 * Always initialize the fwspec internally.
    463	 */
    464	if (fwspec) {
    465		iommu_fwspec_free(dev);
    466		fwspec = dev_iommu_fwspec_get(dev);
    467	}
    468
    469	while (!of_parse_phandle_with_args(dev->of_node, "iommus",
    470					   "#iommu-cells",
    471					   idx, &iommu_spec)) {
    472
    473		err = mtk_iommu_v1_create_mapping(dev, &iommu_spec);
    474		of_node_put(iommu_spec.np);
    475		if (err)
    476			return ERR_PTR(err);
    477
    478		/* dev->iommu_fwspec might have changed */
    479		fwspec = dev_iommu_fwspec_get(dev);
    480		idx++;
    481	}
    482
    483	if (!fwspec || fwspec->ops != &mtk_iommu_v1_ops)
    484		return ERR_PTR(-ENODEV); /* Not a iommu client device */
    485
    486	data = dev_iommu_priv_get(dev);
    487
    488	/* Link the consumer device with the smi-larb device(supplier) */
    489	larbid = mt2701_m4u_to_larb(fwspec->ids[0]);
    490	if (larbid >= MT2701_LARB_NR_MAX)
    491		return ERR_PTR(-EINVAL);
    492
    493	for (idx = 1; idx < fwspec->num_ids; idx++) {
    494		larbidx = mt2701_m4u_to_larb(fwspec->ids[idx]);
    495		if (larbid != larbidx) {
    496			dev_err(dev, "Can only use one larb. Fail@larb%d-%d.\n",
    497				larbid, larbidx);
    498			return ERR_PTR(-EINVAL);
    499		}
    500	}
    501
    502	larbdev = data->larb_imu[larbid].dev;
    503	if (!larbdev)
    504		return ERR_PTR(-EINVAL);
    505
    506	link = device_link_add(dev, larbdev,
    507			       DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
    508	if (!link)
    509		dev_err(dev, "Unable to link %s\n", dev_name(larbdev));
    510
    511	return &data->iommu;
    512}
    513
    514static void mtk_iommu_v1_probe_finalize(struct device *dev)
    515{
    516	struct dma_iommu_mapping *mtk_mapping;
    517	struct mtk_iommu_v1_data *data;
    518	int err;
    519
    520	data        = dev_iommu_priv_get(dev);
    521	mtk_mapping = data->mapping;
    522
    523	err = arm_iommu_attach_device(dev, mtk_mapping);
    524	if (err)
    525		dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n");
    526}
    527
    528static void mtk_iommu_v1_release_device(struct device *dev)
    529{
    530	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
    531	struct mtk_iommu_v1_data *data;
    532	struct device *larbdev;
    533	unsigned int larbid;
    534
    535	if (!fwspec || fwspec->ops != &mtk_iommu_v1_ops)
    536		return;
    537
    538	data = dev_iommu_priv_get(dev);
    539	larbid = mt2701_m4u_to_larb(fwspec->ids[0]);
    540	larbdev = data->larb_imu[larbid].dev;
    541	device_link_remove(dev, larbdev);
    542
    543	iommu_fwspec_free(dev);
    544}
    545
    546static int mtk_iommu_v1_hw_init(const struct mtk_iommu_v1_data *data)
    547{
    548	u32 regval;
    549	int ret;
    550
    551	ret = clk_prepare_enable(data->bclk);
    552	if (ret) {
    553		dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
    554		return ret;
    555	}
    556
    557	regval = F_MMU_CTRL_COHERENT_EN | F_MMU_TF_PROTECT_SEL(2);
    558	writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
    559
    560	regval = F_INT_TRANSLATION_FAULT |
    561		F_INT_MAIN_MULTI_HIT_FAULT |
    562		F_INT_INVALID_PA_FAULT |
    563		F_INT_ENTRY_REPLACEMENT_FAULT |
    564		F_INT_TABLE_WALK_FAULT |
    565		F_INT_TLB_MISS_FAULT |
    566		F_INT_PFH_DMA_FIFO_OVERFLOW |
    567		F_INT_MISS_DMA_FIFO_OVERFLOW;
    568	writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL);
    569
    570	/* protect memory,hw will write here while translation fault */
    571	writel_relaxed(data->protect_base,
    572			data->base + REG_MMU_IVRP_PADDR);
    573
    574	writel_relaxed(F_MMU_DCM_ON, data->base + REG_MMU_DCM);
    575
    576	if (devm_request_irq(data->dev, data->irq, mtk_iommu_v1_isr, 0,
    577			     dev_name(data->dev), (void *)data)) {
    578		writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
    579		clk_disable_unprepare(data->bclk);
    580		dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
    581		return -ENODEV;
    582	}
    583
    584	return 0;
    585}
    586
    587static const struct iommu_ops mtk_iommu_v1_ops = {
    588	.domain_alloc	= mtk_iommu_v1_domain_alloc,
    589	.probe_device	= mtk_iommu_v1_probe_device,
    590	.probe_finalize = mtk_iommu_v1_probe_finalize,
    591	.release_device	= mtk_iommu_v1_release_device,
    592	.def_domain_type = mtk_iommu_v1_def_domain_type,
    593	.device_group	= generic_device_group,
    594	.pgsize_bitmap	= ~0UL << MT2701_IOMMU_PAGE_SHIFT,
    595	.owner          = THIS_MODULE,
    596	.default_domain_ops = &(const struct iommu_domain_ops) {
    597		.attach_dev	= mtk_iommu_v1_attach_device,
    598		.detach_dev	= mtk_iommu_v1_detach_device,
    599		.map		= mtk_iommu_v1_map,
    600		.unmap		= mtk_iommu_v1_unmap,
    601		.iova_to_phys	= mtk_iommu_v1_iova_to_phys,
    602		.free		= mtk_iommu_v1_domain_free,
    603	}
    604};
    605
    606static const struct of_device_id mtk_iommu_v1_of_ids[] = {
    607	{ .compatible = "mediatek,mt2701-m4u", },
    608	{}
    609};
    610
    611static const struct component_master_ops mtk_iommu_v1_com_ops = {
    612	.bind		= mtk_iommu_v1_bind,
    613	.unbind		= mtk_iommu_v1_unbind,
    614};
    615
    616static int mtk_iommu_v1_probe(struct platform_device *pdev)
    617{
    618	struct device			*dev = &pdev->dev;
    619	struct mtk_iommu_v1_data	*data;
    620	struct resource			*res;
    621	struct component_match		*match = NULL;
    622	void				*protect;
    623	int				larb_nr, ret, i;
    624
    625	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
    626	if (!data)
    627		return -ENOMEM;
    628
    629	data->dev = dev;
    630
    631	/* Protect memory. HW will access here while translation fault.*/
    632	protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2,
    633			GFP_KERNEL | GFP_DMA);
    634	if (!protect)
    635		return -ENOMEM;
    636	data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
    637
    638	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
    639	data->base = devm_ioremap_resource(dev, res);
    640	if (IS_ERR(data->base))
    641		return PTR_ERR(data->base);
    642
    643	data->irq = platform_get_irq(pdev, 0);
    644	if (data->irq < 0)
    645		return data->irq;
    646
    647	data->bclk = devm_clk_get(dev, "bclk");
    648	if (IS_ERR(data->bclk))
    649		return PTR_ERR(data->bclk);
    650
    651	larb_nr = of_count_phandle_with_args(dev->of_node,
    652					     "mediatek,larbs", NULL);
    653	if (larb_nr < 0)
    654		return larb_nr;
    655
    656	for (i = 0; i < larb_nr; i++) {
    657		struct device_node *larbnode;
    658		struct platform_device *plarbdev;
    659
    660		larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
    661		if (!larbnode)
    662			return -EINVAL;
    663
    664		if (!of_device_is_available(larbnode)) {
    665			of_node_put(larbnode);
    666			continue;
    667		}
    668
    669		plarbdev = of_find_device_by_node(larbnode);
    670		if (!plarbdev) {
    671			of_node_put(larbnode);
    672			return -ENODEV;
    673		}
    674		if (!plarbdev->dev.driver) {
    675			of_node_put(larbnode);
    676			return -EPROBE_DEFER;
    677		}
    678		data->larb_imu[i].dev = &plarbdev->dev;
    679
    680		component_match_add_release(dev, &match, component_release_of,
    681					    component_compare_of, larbnode);
    682	}
    683
    684	platform_set_drvdata(pdev, data);
    685
    686	ret = mtk_iommu_v1_hw_init(data);
    687	if (ret)
    688		return ret;
    689
    690	ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
    691				     dev_name(&pdev->dev));
    692	if (ret)
    693		return ret;
    694
    695	ret = iommu_device_register(&data->iommu, &mtk_iommu_v1_ops, dev);
    696	if (ret)
    697		goto out_sysfs_remove;
    698
    699	if (!iommu_present(&platform_bus_type)) {
    700		ret = bus_set_iommu(&platform_bus_type,  &mtk_iommu_v1_ops);
    701		if (ret)
    702			goto out_dev_unreg;
    703	}
    704
    705	ret = component_master_add_with_match(dev, &mtk_iommu_v1_com_ops, match);
    706	if (ret)
    707		goto out_bus_set_null;
    708	return ret;
    709
    710out_bus_set_null:
    711	bus_set_iommu(&platform_bus_type, NULL);
    712out_dev_unreg:
    713	iommu_device_unregister(&data->iommu);
    714out_sysfs_remove:
    715	iommu_device_sysfs_remove(&data->iommu);
    716	return ret;
    717}
    718
    719static int mtk_iommu_v1_remove(struct platform_device *pdev)
    720{
    721	struct mtk_iommu_v1_data *data = platform_get_drvdata(pdev);
    722
    723	iommu_device_sysfs_remove(&data->iommu);
    724	iommu_device_unregister(&data->iommu);
    725
    726	if (iommu_present(&platform_bus_type))
    727		bus_set_iommu(&platform_bus_type, NULL);
    728
    729	clk_disable_unprepare(data->bclk);
    730	devm_free_irq(&pdev->dev, data->irq, data);
    731	component_master_del(&pdev->dev, &mtk_iommu_v1_com_ops);
    732	return 0;
    733}
    734
    735static int __maybe_unused mtk_iommu_v1_suspend(struct device *dev)
    736{
    737	struct mtk_iommu_v1_data *data = dev_get_drvdata(dev);
    738	struct mtk_iommu_v1_suspend_reg *reg = &data->reg;
    739	void __iomem *base = data->base;
    740
    741	reg->standard_axi_mode = readl_relaxed(base +
    742					       REG_MMU_STANDARD_AXI_MODE);
    743	reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM);
    744	reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
    745	reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL);
    746	return 0;
    747}
    748
    749static int __maybe_unused mtk_iommu_v1_resume(struct device *dev)
    750{
    751	struct mtk_iommu_v1_data *data = dev_get_drvdata(dev);
    752	struct mtk_iommu_v1_suspend_reg *reg = &data->reg;
    753	void __iomem *base = data->base;
    754
    755	writel_relaxed(data->m4u_dom->pgt_pa, base + REG_MMU_PT_BASE_ADDR);
    756	writel_relaxed(reg->standard_axi_mode,
    757		       base + REG_MMU_STANDARD_AXI_MODE);
    758	writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM);
    759	writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
    760	writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL);
    761	writel_relaxed(data->protect_base, base + REG_MMU_IVRP_PADDR);
    762	return 0;
    763}
    764
    765static const struct dev_pm_ops mtk_iommu_v1_pm_ops = {
    766	SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_v1_suspend, mtk_iommu_v1_resume)
    767};
    768
    769static struct platform_driver mtk_iommu_v1_driver = {
    770	.probe	= mtk_iommu_v1_probe,
    771	.remove	= mtk_iommu_v1_remove,
    772	.driver	= {
    773		.name = "mtk-iommu-v1",
    774		.of_match_table = mtk_iommu_v1_of_ids,
    775		.pm = &mtk_iommu_v1_pm_ops,
    776	}
    777};
    778module_platform_driver(mtk_iommu_v1_driver);
    779
    780MODULE_DESCRIPTION("IOMMU API for MediaTek M4U v1 implementations");
    781MODULE_LICENSE("GPL v2");