cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

omap-iommu.c (42442B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * omap iommu: tlb and pagetable primitives
      4 *
      5 * Copyright (C) 2008-2010 Nokia Corporation
      6 * Copyright (C) 2013-2017 Texas Instruments Incorporated - https://www.ti.com/
      7 *
      8 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
      9 *		Paul Mundt and Toshihiro Kobayashi
     10 */
     11
     12#include <linux/dma-mapping.h>
     13#include <linux/err.h>
     14#include <linux/slab.h>
     15#include <linux/interrupt.h>
     16#include <linux/ioport.h>
     17#include <linux/platform_device.h>
     18#include <linux/iommu.h>
     19#include <linux/omap-iommu.h>
     20#include <linux/mutex.h>
     21#include <linux/spinlock.h>
     22#include <linux/io.h>
     23#include <linux/pm_runtime.h>
     24#include <linux/of.h>
     25#include <linux/of_irq.h>
     26#include <linux/of_platform.h>
     27#include <linux/regmap.h>
     28#include <linux/mfd/syscon.h>
     29
     30#include <linux/platform_data/iommu-omap.h>
     31
     32#include "omap-iopgtable.h"
     33#include "omap-iommu.h"
     34
     35static const struct iommu_ops omap_iommu_ops;
     36
     37#define to_iommu(dev)	((struct omap_iommu *)dev_get_drvdata(dev))
     38
     39/* bitmap of the page sizes currently supported */
     40#define OMAP_IOMMU_PGSIZES	(SZ_4K | SZ_64K | SZ_1M | SZ_16M)
     41
     42#define MMU_LOCK_BASE_SHIFT	10
     43#define MMU_LOCK_BASE_MASK	(0x1f << MMU_LOCK_BASE_SHIFT)
     44#define MMU_LOCK_BASE(x)	\
     45	((x & MMU_LOCK_BASE_MASK) >> MMU_LOCK_BASE_SHIFT)
     46
     47#define MMU_LOCK_VICT_SHIFT	4
     48#define MMU_LOCK_VICT_MASK	(0x1f << MMU_LOCK_VICT_SHIFT)
     49#define MMU_LOCK_VICT(x)	\
     50	((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT)
     51
     52static struct platform_driver omap_iommu_driver;
     53static struct kmem_cache *iopte_cachep;
     54
     55/**
     56 * to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain
     57 * @dom:	generic iommu domain handle
     58 **/
     59static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom)
     60{
     61	return container_of(dom, struct omap_iommu_domain, domain);
     62}
     63
     64/**
     65 * omap_iommu_save_ctx - Save registers for pm off-mode support
     66 * @dev:	client device
     67 *
     68 * This should be treated as an deprecated API. It is preserved only
     69 * to maintain existing functionality for OMAP3 ISP driver.
     70 **/
     71void omap_iommu_save_ctx(struct device *dev)
     72{
     73	struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
     74	struct omap_iommu *obj;
     75	u32 *p;
     76	int i;
     77
     78	if (!arch_data)
     79		return;
     80
     81	while (arch_data->iommu_dev) {
     82		obj = arch_data->iommu_dev;
     83		p = obj->ctx;
     84		for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
     85			p[i] = iommu_read_reg(obj, i * sizeof(u32));
     86			dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i,
     87				p[i]);
     88		}
     89		arch_data++;
     90	}
     91}
     92EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
     93
     94/**
     95 * omap_iommu_restore_ctx - Restore registers for pm off-mode support
     96 * @dev:	client device
     97 *
     98 * This should be treated as an deprecated API. It is preserved only
     99 * to maintain existing functionality for OMAP3 ISP driver.
    100 **/
    101void omap_iommu_restore_ctx(struct device *dev)
    102{
    103	struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
    104	struct omap_iommu *obj;
    105	u32 *p;
    106	int i;
    107
    108	if (!arch_data)
    109		return;
    110
    111	while (arch_data->iommu_dev) {
    112		obj = arch_data->iommu_dev;
    113		p = obj->ctx;
    114		for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
    115			iommu_write_reg(obj, p[i], i * sizeof(u32));
    116			dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i,
    117				p[i]);
    118		}
    119		arch_data++;
    120	}
    121}
    122EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
    123
    124static void dra7_cfg_dspsys_mmu(struct omap_iommu *obj, bool enable)
    125{
    126	u32 val, mask;
    127
    128	if (!obj->syscfg)
    129		return;
    130
    131	mask = (1 << (obj->id * DSP_SYS_MMU_CONFIG_EN_SHIFT));
    132	val = enable ? mask : 0;
    133	regmap_update_bits(obj->syscfg, DSP_SYS_MMU_CONFIG, mask, val);
    134}
    135
    136static void __iommu_set_twl(struct omap_iommu *obj, bool on)
    137{
    138	u32 l = iommu_read_reg(obj, MMU_CNTL);
    139
    140	if (on)
    141		iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE);
    142	else
    143		iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE);
    144
    145	l &= ~MMU_CNTL_MASK;
    146	if (on)
    147		l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
    148	else
    149		l |= (MMU_CNTL_MMU_EN);
    150
    151	iommu_write_reg(obj, l, MMU_CNTL);
    152}
    153
    154static int omap2_iommu_enable(struct omap_iommu *obj)
    155{
    156	u32 l, pa;
    157
    158	if (!obj->iopgd || !IS_ALIGNED((unsigned long)obj->iopgd,  SZ_16K))
    159		return -EINVAL;
    160
    161	pa = virt_to_phys(obj->iopgd);
    162	if (!IS_ALIGNED(pa, SZ_16K))
    163		return -EINVAL;
    164
    165	l = iommu_read_reg(obj, MMU_REVISION);
    166	dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
    167		 (l >> 4) & 0xf, l & 0xf);
    168
    169	iommu_write_reg(obj, pa, MMU_TTB);
    170
    171	dra7_cfg_dspsys_mmu(obj, true);
    172
    173	if (obj->has_bus_err_back)
    174		iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG);
    175
    176	__iommu_set_twl(obj, true);
    177
    178	return 0;
    179}
    180
    181static void omap2_iommu_disable(struct omap_iommu *obj)
    182{
    183	u32 l = iommu_read_reg(obj, MMU_CNTL);
    184
    185	l &= ~MMU_CNTL_MASK;
    186	iommu_write_reg(obj, l, MMU_CNTL);
    187	dra7_cfg_dspsys_mmu(obj, false);
    188
    189	dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
    190}
    191
    192static int iommu_enable(struct omap_iommu *obj)
    193{
    194	int ret;
    195
    196	ret = pm_runtime_get_sync(obj->dev);
    197	if (ret < 0)
    198		pm_runtime_put_noidle(obj->dev);
    199
    200	return ret < 0 ? ret : 0;
    201}
    202
    203static void iommu_disable(struct omap_iommu *obj)
    204{
    205	pm_runtime_put_sync(obj->dev);
    206}
    207
    208/*
    209 *	TLB operations
    210 */
    211static u32 iotlb_cr_to_virt(struct cr_regs *cr)
    212{
    213	u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
    214	u32 mask = get_cam_va_mask(cr->cam & page_size);
    215
    216	return cr->cam & mask;
    217}
    218
    219static u32 get_iopte_attr(struct iotlb_entry *e)
    220{
    221	u32 attr;
    222
    223	attr = e->mixed << 5;
    224	attr |= e->endian;
    225	attr |= e->elsz >> 3;
    226	attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
    227			(e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
    228	return attr;
    229}
    230
    231static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
    232{
    233	u32 status, fault_addr;
    234
    235	status = iommu_read_reg(obj, MMU_IRQSTATUS);
    236	status &= MMU_IRQ_MASK;
    237	if (!status) {
    238		*da = 0;
    239		return 0;
    240	}
    241
    242	fault_addr = iommu_read_reg(obj, MMU_FAULT_AD);
    243	*da = fault_addr;
    244
    245	iommu_write_reg(obj, status, MMU_IRQSTATUS);
    246
    247	return status;
    248}
    249
    250void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
    251{
    252	u32 val;
    253
    254	val = iommu_read_reg(obj, MMU_LOCK);
    255
    256	l->base = MMU_LOCK_BASE(val);
    257	l->vict = MMU_LOCK_VICT(val);
    258}
    259
    260void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
    261{
    262	u32 val;
    263
    264	val = (l->base << MMU_LOCK_BASE_SHIFT);
    265	val |= (l->vict << MMU_LOCK_VICT_SHIFT);
    266
    267	iommu_write_reg(obj, val, MMU_LOCK);
    268}
    269
    270static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
    271{
    272	cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
    273	cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
    274}
    275
    276static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
    277{
    278	iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
    279	iommu_write_reg(obj, cr->ram, MMU_RAM);
    280
    281	iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
    282	iommu_write_reg(obj, 1, MMU_LD_TLB);
    283}
    284
    285/* only used in iotlb iteration for-loop */
    286struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
    287{
    288	struct cr_regs cr;
    289	struct iotlb_lock l;
    290
    291	iotlb_lock_get(obj, &l);
    292	l.vict = n;
    293	iotlb_lock_set(obj, &l);
    294	iotlb_read_cr(obj, &cr);
    295
    296	return cr;
    297}
    298
    299#ifdef PREFETCH_IOTLB
    300static struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
    301				      struct iotlb_entry *e)
    302{
    303	struct cr_regs *cr;
    304
    305	if (!e)
    306		return NULL;
    307
    308	if (e->da & ~(get_cam_va_mask(e->pgsz))) {
    309		dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__,
    310			e->da);
    311		return ERR_PTR(-EINVAL);
    312	}
    313
    314	cr = kmalloc(sizeof(*cr), GFP_KERNEL);
    315	if (!cr)
    316		return ERR_PTR(-ENOMEM);
    317
    318	cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
    319	cr->ram = e->pa | e->endian | e->elsz | e->mixed;
    320
    321	return cr;
    322}
    323
    324/**
    325 * load_iotlb_entry - Set an iommu tlb entry
    326 * @obj:	target iommu
    327 * @e:		an iommu tlb entry info
    328 **/
    329static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
    330{
    331	int err = 0;
    332	struct iotlb_lock l;
    333	struct cr_regs *cr;
    334
    335	if (!obj || !obj->nr_tlb_entries || !e)
    336		return -EINVAL;
    337
    338	pm_runtime_get_sync(obj->dev);
    339
    340	iotlb_lock_get(obj, &l);
    341	if (l.base == obj->nr_tlb_entries) {
    342		dev_warn(obj->dev, "%s: preserve entries full\n", __func__);
    343		err = -EBUSY;
    344		goto out;
    345	}
    346	if (!e->prsvd) {
    347		int i;
    348		struct cr_regs tmp;
    349
    350		for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp)
    351			if (!iotlb_cr_valid(&tmp))
    352				break;
    353
    354		if (i == obj->nr_tlb_entries) {
    355			dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
    356			err = -EBUSY;
    357			goto out;
    358		}
    359
    360		iotlb_lock_get(obj, &l);
    361	} else {
    362		l.vict = l.base;
    363		iotlb_lock_set(obj, &l);
    364	}
    365
    366	cr = iotlb_alloc_cr(obj, e);
    367	if (IS_ERR(cr)) {
    368		pm_runtime_put_sync(obj->dev);
    369		return PTR_ERR(cr);
    370	}
    371
    372	iotlb_load_cr(obj, cr);
    373	kfree(cr);
    374
    375	if (e->prsvd)
    376		l.base++;
    377	/* increment victim for next tlb load */
    378	if (++l.vict == obj->nr_tlb_entries)
    379		l.vict = l.base;
    380	iotlb_lock_set(obj, &l);
    381out:
    382	pm_runtime_put_sync(obj->dev);
    383	return err;
    384}
    385
    386#else /* !PREFETCH_IOTLB */
    387
    388static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
    389{
    390	return 0;
    391}
    392
    393#endif /* !PREFETCH_IOTLB */
    394
    395static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
    396{
    397	return load_iotlb_entry(obj, e);
    398}
    399
    400/**
    401 * flush_iotlb_page - Clear an iommu tlb entry
    402 * @obj:	target iommu
    403 * @da:		iommu device virtual address
    404 *
    405 * Clear an iommu tlb entry which includes 'da' address.
    406 **/
    407static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
    408{
    409	int i;
    410	struct cr_regs cr;
    411
    412	pm_runtime_get_sync(obj->dev);
    413
    414	for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
    415		u32 start;
    416		size_t bytes;
    417
    418		if (!iotlb_cr_valid(&cr))
    419			continue;
    420
    421		start = iotlb_cr_to_virt(&cr);
    422		bytes = iopgsz_to_bytes(cr.cam & 3);
    423
    424		if ((start <= da) && (da < start + bytes)) {
    425			dev_dbg(obj->dev, "%s: %08x<=%08x(%zx)\n",
    426				__func__, start, da, bytes);
    427			iotlb_load_cr(obj, &cr);
    428			iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
    429			break;
    430		}
    431	}
    432	pm_runtime_put_sync(obj->dev);
    433
    434	if (i == obj->nr_tlb_entries)
    435		dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
    436}
    437
    438/**
    439 * flush_iotlb_all - Clear all iommu tlb entries
    440 * @obj:	target iommu
    441 **/
    442static void flush_iotlb_all(struct omap_iommu *obj)
    443{
    444	struct iotlb_lock l;
    445
    446	pm_runtime_get_sync(obj->dev);
    447
    448	l.base = 0;
    449	l.vict = 0;
    450	iotlb_lock_set(obj, &l);
    451
    452	iommu_write_reg(obj, 1, MMU_GFLUSH);
    453
    454	pm_runtime_put_sync(obj->dev);
    455}
    456
    457/*
    458 *	H/W pagetable operations
    459 */
    460static void flush_iopte_range(struct device *dev, dma_addr_t dma,
    461			      unsigned long offset, int num_entries)
    462{
    463	size_t size = num_entries * sizeof(u32);
    464
    465	dma_sync_single_range_for_device(dev, dma, offset, size, DMA_TO_DEVICE);
    466}
    467
    468static void iopte_free(struct omap_iommu *obj, u32 *iopte, bool dma_valid)
    469{
    470	dma_addr_t pt_dma;
    471
    472	/* Note: freed iopte's must be clean ready for re-use */
    473	if (iopte) {
    474		if (dma_valid) {
    475			pt_dma = virt_to_phys(iopte);
    476			dma_unmap_single(obj->dev, pt_dma, IOPTE_TABLE_SIZE,
    477					 DMA_TO_DEVICE);
    478		}
    479
    480		kmem_cache_free(iopte_cachep, iopte);
    481	}
    482}
    483
    484static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd,
    485			dma_addr_t *pt_dma, u32 da)
    486{
    487	u32 *iopte;
    488	unsigned long offset = iopgd_index(da) * sizeof(da);
    489
    490	/* a table has already existed */
    491	if (*iopgd)
    492		goto pte_ready;
    493
    494	/*
    495	 * do the allocation outside the page table lock
    496	 */
    497	spin_unlock(&obj->page_table_lock);
    498	iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL);
    499	spin_lock(&obj->page_table_lock);
    500
    501	if (!*iopgd) {
    502		if (!iopte)
    503			return ERR_PTR(-ENOMEM);
    504
    505		*pt_dma = dma_map_single(obj->dev, iopte, IOPTE_TABLE_SIZE,
    506					 DMA_TO_DEVICE);
    507		if (dma_mapping_error(obj->dev, *pt_dma)) {
    508			dev_err(obj->dev, "DMA map error for L2 table\n");
    509			iopte_free(obj, iopte, false);
    510			return ERR_PTR(-ENOMEM);
    511		}
    512
    513		/*
    514		 * we rely on dma address and the physical address to be
    515		 * the same for mapping the L2 table
    516		 */
    517		if (WARN_ON(*pt_dma != virt_to_phys(iopte))) {
    518			dev_err(obj->dev, "DMA translation error for L2 table\n");
    519			dma_unmap_single(obj->dev, *pt_dma, IOPTE_TABLE_SIZE,
    520					 DMA_TO_DEVICE);
    521			iopte_free(obj, iopte, false);
    522			return ERR_PTR(-ENOMEM);
    523		}
    524
    525		*iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
    526
    527		flush_iopte_range(obj->dev, obj->pd_dma, offset, 1);
    528		dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
    529	} else {
    530		/* We raced, free the reduniovant table */
    531		iopte_free(obj, iopte, false);
    532	}
    533
    534pte_ready:
    535	iopte = iopte_offset(iopgd, da);
    536	*pt_dma = iopgd_page_paddr(iopgd);
    537	dev_vdbg(obj->dev,
    538		 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
    539		 __func__, da, iopgd, *iopgd, iopte, *iopte);
    540
    541	return iopte;
    542}
    543
    544static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
    545{
    546	u32 *iopgd = iopgd_offset(obj, da);
    547	unsigned long offset = iopgd_index(da) * sizeof(da);
    548
    549	if ((da | pa) & ~IOSECTION_MASK) {
    550		dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
    551			__func__, da, pa, IOSECTION_SIZE);
    552		return -EINVAL;
    553	}
    554
    555	*iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
    556	flush_iopte_range(obj->dev, obj->pd_dma, offset, 1);
    557	return 0;
    558}
    559
    560static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
    561{
    562	u32 *iopgd = iopgd_offset(obj, da);
    563	unsigned long offset = iopgd_index(da) * sizeof(da);
    564	int i;
    565
    566	if ((da | pa) & ~IOSUPER_MASK) {
    567		dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
    568			__func__, da, pa, IOSUPER_SIZE);
    569		return -EINVAL;
    570	}
    571
    572	for (i = 0; i < 16; i++)
    573		*(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
    574	flush_iopte_range(obj->dev, obj->pd_dma, offset, 16);
    575	return 0;
    576}
    577
    578static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
    579{
    580	u32 *iopgd = iopgd_offset(obj, da);
    581	dma_addr_t pt_dma;
    582	u32 *iopte = iopte_alloc(obj, iopgd, &pt_dma, da);
    583	unsigned long offset = iopte_index(da) * sizeof(da);
    584
    585	if (IS_ERR(iopte))
    586		return PTR_ERR(iopte);
    587
    588	*iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
    589	flush_iopte_range(obj->dev, pt_dma, offset, 1);
    590
    591	dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
    592		 __func__, da, pa, iopte, *iopte);
    593
    594	return 0;
    595}
    596
    597static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
    598{
    599	u32 *iopgd = iopgd_offset(obj, da);
    600	dma_addr_t pt_dma;
    601	u32 *iopte = iopte_alloc(obj, iopgd, &pt_dma, da);
    602	unsigned long offset = iopte_index(da) * sizeof(da);
    603	int i;
    604
    605	if ((da | pa) & ~IOLARGE_MASK) {
    606		dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
    607			__func__, da, pa, IOLARGE_SIZE);
    608		return -EINVAL;
    609	}
    610
    611	if (IS_ERR(iopte))
    612		return PTR_ERR(iopte);
    613
    614	for (i = 0; i < 16; i++)
    615		*(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
    616	flush_iopte_range(obj->dev, pt_dma, offset, 16);
    617	return 0;
    618}
    619
    620static int
    621iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e)
    622{
    623	int (*fn)(struct omap_iommu *, u32, u32, u32);
    624	u32 prot;
    625	int err;
    626
    627	if (!obj || !e)
    628		return -EINVAL;
    629
    630	switch (e->pgsz) {
    631	case MMU_CAM_PGSZ_16M:
    632		fn = iopgd_alloc_super;
    633		break;
    634	case MMU_CAM_PGSZ_1M:
    635		fn = iopgd_alloc_section;
    636		break;
    637	case MMU_CAM_PGSZ_64K:
    638		fn = iopte_alloc_large;
    639		break;
    640	case MMU_CAM_PGSZ_4K:
    641		fn = iopte_alloc_page;
    642		break;
    643	default:
    644		fn = NULL;
    645		break;
    646	}
    647
    648	if (WARN_ON(!fn))
    649		return -EINVAL;
    650
    651	prot = get_iopte_attr(e);
    652
    653	spin_lock(&obj->page_table_lock);
    654	err = fn(obj, e->da, e->pa, prot);
    655	spin_unlock(&obj->page_table_lock);
    656
    657	return err;
    658}
    659
    660/**
    661 * omap_iopgtable_store_entry - Make an iommu pte entry
    662 * @obj:	target iommu
    663 * @e:		an iommu tlb entry info
    664 **/
    665static int
    666omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
    667{
    668	int err;
    669
    670	flush_iotlb_page(obj, e->da);
    671	err = iopgtable_store_entry_core(obj, e);
    672	if (!err)
    673		prefetch_iotlb_entry(obj, e);
    674	return err;
    675}
    676
    677/**
    678 * iopgtable_lookup_entry - Lookup an iommu pte entry
    679 * @obj:	target iommu
    680 * @da:		iommu device virtual address
    681 * @ppgd:	iommu pgd entry pointer to be returned
    682 * @ppte:	iommu pte entry pointer to be returned
    683 **/
    684static void
    685iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
    686{
    687	u32 *iopgd, *iopte = NULL;
    688
    689	iopgd = iopgd_offset(obj, da);
    690	if (!*iopgd)
    691		goto out;
    692
    693	if (iopgd_is_table(*iopgd))
    694		iopte = iopte_offset(iopgd, da);
    695out:
    696	*ppgd = iopgd;
    697	*ppte = iopte;
    698}
    699
    700static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
    701{
    702	size_t bytes;
    703	u32 *iopgd = iopgd_offset(obj, da);
    704	int nent = 1;
    705	dma_addr_t pt_dma;
    706	unsigned long pd_offset = iopgd_index(da) * sizeof(da);
    707	unsigned long pt_offset = iopte_index(da) * sizeof(da);
    708
    709	if (!*iopgd)
    710		return 0;
    711
    712	if (iopgd_is_table(*iopgd)) {
    713		int i;
    714		u32 *iopte = iopte_offset(iopgd, da);
    715
    716		bytes = IOPTE_SIZE;
    717		if (*iopte & IOPTE_LARGE) {
    718			nent *= 16;
    719			/* rewind to the 1st entry */
    720			iopte = iopte_offset(iopgd, (da & IOLARGE_MASK));
    721		}
    722		bytes *= nent;
    723		memset(iopte, 0, nent * sizeof(*iopte));
    724		pt_dma = iopgd_page_paddr(iopgd);
    725		flush_iopte_range(obj->dev, pt_dma, pt_offset, nent);
    726
    727		/*
    728		 * do table walk to check if this table is necessary or not
    729		 */
    730		iopte = iopte_offset(iopgd, 0);
    731		for (i = 0; i < PTRS_PER_IOPTE; i++)
    732			if (iopte[i])
    733				goto out;
    734
    735		iopte_free(obj, iopte, true);
    736		nent = 1; /* for the next L1 entry */
    737	} else {
    738		bytes = IOPGD_SIZE;
    739		if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
    740			nent *= 16;
    741			/* rewind to the 1st entry */
    742			iopgd = iopgd_offset(obj, (da & IOSUPER_MASK));
    743		}
    744		bytes *= nent;
    745	}
    746	memset(iopgd, 0, nent * sizeof(*iopgd));
    747	flush_iopte_range(obj->dev, obj->pd_dma, pd_offset, nent);
    748out:
    749	return bytes;
    750}
    751
    752/**
    753 * iopgtable_clear_entry - Remove an iommu pte entry
    754 * @obj:	target iommu
    755 * @da:		iommu device virtual address
    756 **/
    757static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da)
    758{
    759	size_t bytes;
    760
    761	spin_lock(&obj->page_table_lock);
    762
    763	bytes = iopgtable_clear_entry_core(obj, da);
    764	flush_iotlb_page(obj, da);
    765
    766	spin_unlock(&obj->page_table_lock);
    767
    768	return bytes;
    769}
    770
    771static void iopgtable_clear_entry_all(struct omap_iommu *obj)
    772{
    773	unsigned long offset;
    774	int i;
    775
    776	spin_lock(&obj->page_table_lock);
    777
    778	for (i = 0; i < PTRS_PER_IOPGD; i++) {
    779		u32 da;
    780		u32 *iopgd;
    781
    782		da = i << IOPGD_SHIFT;
    783		iopgd = iopgd_offset(obj, da);
    784		offset = iopgd_index(da) * sizeof(da);
    785
    786		if (!*iopgd)
    787			continue;
    788
    789		if (iopgd_is_table(*iopgd))
    790			iopte_free(obj, iopte_offset(iopgd, 0), true);
    791
    792		*iopgd = 0;
    793		flush_iopte_range(obj->dev, obj->pd_dma, offset, 1);
    794	}
    795
    796	flush_iotlb_all(obj);
    797
    798	spin_unlock(&obj->page_table_lock);
    799}
    800
    801/*
    802 *	Device IOMMU generic operations
    803 */
    804static irqreturn_t iommu_fault_handler(int irq, void *data)
    805{
    806	u32 da, errs;
    807	u32 *iopgd, *iopte;
    808	struct omap_iommu *obj = data;
    809	struct iommu_domain *domain = obj->domain;
    810	struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
    811
    812	if (!omap_domain->dev)
    813		return IRQ_NONE;
    814
    815	errs = iommu_report_fault(obj, &da);
    816	if (errs == 0)
    817		return IRQ_HANDLED;
    818
    819	/* Fault callback or TLB/PTE Dynamic loading */
    820	if (!report_iommu_fault(domain, obj->dev, da, 0))
    821		return IRQ_HANDLED;
    822
    823	iommu_write_reg(obj, 0, MMU_IRQENABLE);
    824
    825	iopgd = iopgd_offset(obj, da);
    826
    827	if (!iopgd_is_table(*iopgd)) {
    828		dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n",
    829			obj->name, errs, da, iopgd, *iopgd);
    830		return IRQ_NONE;
    831	}
    832
    833	iopte = iopte_offset(iopgd, da);
    834
    835	dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n",
    836		obj->name, errs, da, iopgd, *iopgd, iopte, *iopte);
    837
    838	return IRQ_NONE;
    839}
    840
    841/**
    842 * omap_iommu_attach() - attach iommu device to an iommu domain
    843 * @obj:	target omap iommu device
    844 * @iopgd:	page table
    845 **/
    846static int omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd)
    847{
    848	int err;
    849
    850	spin_lock(&obj->iommu_lock);
    851
    852	obj->pd_dma = dma_map_single(obj->dev, iopgd, IOPGD_TABLE_SIZE,
    853				     DMA_TO_DEVICE);
    854	if (dma_mapping_error(obj->dev, obj->pd_dma)) {
    855		dev_err(obj->dev, "DMA map error for L1 table\n");
    856		err = -ENOMEM;
    857		goto out_err;
    858	}
    859
    860	obj->iopgd = iopgd;
    861	err = iommu_enable(obj);
    862	if (err)
    863		goto out_err;
    864	flush_iotlb_all(obj);
    865
    866	spin_unlock(&obj->iommu_lock);
    867
    868	dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
    869
    870	return 0;
    871
    872out_err:
    873	spin_unlock(&obj->iommu_lock);
    874
    875	return err;
    876}
    877
    878/**
    879 * omap_iommu_detach - release iommu device
    880 * @obj:	target iommu
    881 **/
    882static void omap_iommu_detach(struct omap_iommu *obj)
    883{
    884	if (!obj || IS_ERR(obj))
    885		return;
    886
    887	spin_lock(&obj->iommu_lock);
    888
    889	dma_unmap_single(obj->dev, obj->pd_dma, IOPGD_TABLE_SIZE,
    890			 DMA_TO_DEVICE);
    891	obj->pd_dma = 0;
    892	obj->iopgd = NULL;
    893	iommu_disable(obj);
    894
    895	spin_unlock(&obj->iommu_lock);
    896
    897	dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
    898}
    899
    900static void omap_iommu_save_tlb_entries(struct omap_iommu *obj)
    901{
    902	struct iotlb_lock lock;
    903	struct cr_regs cr;
    904	struct cr_regs *tmp;
    905	int i;
    906
    907	/* check if there are any locked tlbs to save */
    908	iotlb_lock_get(obj, &lock);
    909	obj->num_cr_ctx = lock.base;
    910	if (!obj->num_cr_ctx)
    911		return;
    912
    913	tmp = obj->cr_ctx;
    914	for_each_iotlb_cr(obj, obj->num_cr_ctx, i, cr)
    915		* tmp++ = cr;
    916}
    917
    918static void omap_iommu_restore_tlb_entries(struct omap_iommu *obj)
    919{
    920	struct iotlb_lock l;
    921	struct cr_regs *tmp;
    922	int i;
    923
    924	/* no locked tlbs to restore */
    925	if (!obj->num_cr_ctx)
    926		return;
    927
    928	l.base = 0;
    929	tmp = obj->cr_ctx;
    930	for (i = 0; i < obj->num_cr_ctx; i++, tmp++) {
    931		l.vict = i;
    932		iotlb_lock_set(obj, &l);
    933		iotlb_load_cr(obj, tmp);
    934	}
    935	l.base = obj->num_cr_ctx;
    936	l.vict = i;
    937	iotlb_lock_set(obj, &l);
    938}
    939
    940/**
    941 * omap_iommu_domain_deactivate - deactivate attached iommu devices
    942 * @domain: iommu domain attached to the target iommu device
    943 *
    944 * This API allows the client devices of IOMMU devices to suspend
    945 * the IOMMUs they control at runtime, after they are idled and
    946 * suspended all activity. System Suspend will leverage the PM
    947 * driver late callbacks.
    948 **/
    949int omap_iommu_domain_deactivate(struct iommu_domain *domain)
    950{
    951	struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
    952	struct omap_iommu_device *iommu;
    953	struct omap_iommu *oiommu;
    954	int i;
    955
    956	if (!omap_domain->dev)
    957		return 0;
    958
    959	iommu = omap_domain->iommus;
    960	iommu += (omap_domain->num_iommus - 1);
    961	for (i = 0; i < omap_domain->num_iommus; i++, iommu--) {
    962		oiommu = iommu->iommu_dev;
    963		pm_runtime_put_sync(oiommu->dev);
    964	}
    965
    966	return 0;
    967}
    968EXPORT_SYMBOL_GPL(omap_iommu_domain_deactivate);
    969
    970/**
    971 * omap_iommu_domain_activate - activate attached iommu devices
    972 * @domain: iommu domain attached to the target iommu device
    973 *
    974 * This API allows the client devices of IOMMU devices to resume the
    975 * IOMMUs they control at runtime, before they can resume operations.
    976 * System Resume will leverage the PM driver late callbacks.
    977 **/
    978int omap_iommu_domain_activate(struct iommu_domain *domain)
    979{
    980	struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
    981	struct omap_iommu_device *iommu;
    982	struct omap_iommu *oiommu;
    983	int i;
    984
    985	if (!omap_domain->dev)
    986		return 0;
    987
    988	iommu = omap_domain->iommus;
    989	for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
    990		oiommu = iommu->iommu_dev;
    991		pm_runtime_get_sync(oiommu->dev);
    992	}
    993
    994	return 0;
    995}
    996EXPORT_SYMBOL_GPL(omap_iommu_domain_activate);
    997
    998/**
    999 * omap_iommu_runtime_suspend - disable an iommu device
   1000 * @dev:	iommu device
   1001 *
   1002 * This function performs all that is necessary to disable an
   1003 * IOMMU device, either during final detachment from a client
   1004 * device, or during system/runtime suspend of the device. This
   1005 * includes programming all the appropriate IOMMU registers, and
   1006 * managing the associated omap_hwmod's state and the device's
   1007 * reset line. This function also saves the context of any
   1008 * locked TLBs if suspending.
   1009 **/
   1010static __maybe_unused int omap_iommu_runtime_suspend(struct device *dev)
   1011{
   1012	struct platform_device *pdev = to_platform_device(dev);
   1013	struct iommu_platform_data *pdata = dev_get_platdata(dev);
   1014	struct omap_iommu *obj = to_iommu(dev);
   1015	int ret;
   1016
   1017	/* save the TLBs only during suspend, and not for power down */
   1018	if (obj->domain && obj->iopgd)
   1019		omap_iommu_save_tlb_entries(obj);
   1020
   1021	omap2_iommu_disable(obj);
   1022
   1023	if (pdata && pdata->device_idle)
   1024		pdata->device_idle(pdev);
   1025
   1026	if (pdata && pdata->assert_reset)
   1027		pdata->assert_reset(pdev, pdata->reset_name);
   1028
   1029	if (pdata && pdata->set_pwrdm_constraint) {
   1030		ret = pdata->set_pwrdm_constraint(pdev, false, &obj->pwrst);
   1031		if (ret) {
   1032			dev_warn(obj->dev, "pwrdm_constraint failed to be reset, status = %d\n",
   1033				 ret);
   1034		}
   1035	}
   1036
   1037	return 0;
   1038}
   1039
   1040/**
   1041 * omap_iommu_runtime_resume - enable an iommu device
   1042 * @dev:	iommu device
   1043 *
   1044 * This function performs all that is necessary to enable an
   1045 * IOMMU device, either during initial attachment to a client
   1046 * device, or during system/runtime resume of the device. This
   1047 * includes programming all the appropriate IOMMU registers, and
   1048 * managing the associated omap_hwmod's state and the device's
   1049 * reset line. The function also restores any locked TLBs if
   1050 * resuming after a suspend.
   1051 **/
   1052static __maybe_unused int omap_iommu_runtime_resume(struct device *dev)
   1053{
   1054	struct platform_device *pdev = to_platform_device(dev);
   1055	struct iommu_platform_data *pdata = dev_get_platdata(dev);
   1056	struct omap_iommu *obj = to_iommu(dev);
   1057	int ret = 0;
   1058
   1059	if (pdata && pdata->set_pwrdm_constraint) {
   1060		ret = pdata->set_pwrdm_constraint(pdev, true, &obj->pwrst);
   1061		if (ret) {
   1062			dev_warn(obj->dev, "pwrdm_constraint failed to be set, status = %d\n",
   1063				 ret);
   1064		}
   1065	}
   1066
   1067	if (pdata && pdata->deassert_reset) {
   1068		ret = pdata->deassert_reset(pdev, pdata->reset_name);
   1069		if (ret) {
   1070			dev_err(dev, "deassert_reset failed: %d\n", ret);
   1071			return ret;
   1072		}
   1073	}
   1074
   1075	if (pdata && pdata->device_enable)
   1076		pdata->device_enable(pdev);
   1077
   1078	/* restore the TLBs only during resume, and not for power up */
   1079	if (obj->domain)
   1080		omap_iommu_restore_tlb_entries(obj);
   1081
   1082	ret = omap2_iommu_enable(obj);
   1083
   1084	return ret;
   1085}
   1086
   1087/**
   1088 * omap_iommu_prepare - prepare() dev_pm_ops implementation
   1089 * @dev:	iommu device
   1090 *
   1091 * This function performs the necessary checks to determine if the IOMMU
   1092 * device needs suspending or not. The function checks if the runtime_pm
   1093 * status of the device is suspended, and returns 1 in that case. This
   1094 * results in the PM core to skip invoking any of the Sleep PM callbacks
   1095 * (suspend, suspend_late, resume, resume_early etc).
   1096 */
   1097static int omap_iommu_prepare(struct device *dev)
   1098{
   1099	if (pm_runtime_status_suspended(dev))
   1100		return 1;
   1101	return 0;
   1102}
   1103
   1104static bool omap_iommu_can_register(struct platform_device *pdev)
   1105{
   1106	struct device_node *np = pdev->dev.of_node;
   1107
   1108	if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu"))
   1109		return true;
   1110
   1111	/*
   1112	 * restrict IOMMU core registration only for processor-port MDMA MMUs
   1113	 * on DRA7 DSPs
   1114	 */
   1115	if ((!strcmp(dev_name(&pdev->dev), "40d01000.mmu")) ||
   1116	    (!strcmp(dev_name(&pdev->dev), "41501000.mmu")))
   1117		return true;
   1118
   1119	return false;
   1120}
   1121
   1122static int omap_iommu_dra7_get_dsp_system_cfg(struct platform_device *pdev,
   1123					      struct omap_iommu *obj)
   1124{
   1125	struct device_node *np = pdev->dev.of_node;
   1126	int ret;
   1127
   1128	if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu"))
   1129		return 0;
   1130
   1131	if (!of_property_read_bool(np, "ti,syscon-mmuconfig")) {
   1132		dev_err(&pdev->dev, "ti,syscon-mmuconfig property is missing\n");
   1133		return -EINVAL;
   1134	}
   1135
   1136	obj->syscfg =
   1137		syscon_regmap_lookup_by_phandle(np, "ti,syscon-mmuconfig");
   1138	if (IS_ERR(obj->syscfg)) {
   1139		/* can fail with -EPROBE_DEFER */
   1140		ret = PTR_ERR(obj->syscfg);
   1141		return ret;
   1142	}
   1143
   1144	if (of_property_read_u32_index(np, "ti,syscon-mmuconfig", 1,
   1145				       &obj->id)) {
   1146		dev_err(&pdev->dev, "couldn't get the IOMMU instance id within subsystem\n");
   1147		return -EINVAL;
   1148	}
   1149
   1150	if (obj->id != 0 && obj->id != 1) {
   1151		dev_err(&pdev->dev, "invalid IOMMU instance id\n");
   1152		return -EINVAL;
   1153	}
   1154
   1155	return 0;
   1156}
   1157
   1158/*
   1159 *	OMAP Device MMU(IOMMU) detection
   1160 */
   1161static int omap_iommu_probe(struct platform_device *pdev)
   1162{
   1163	int err = -ENODEV;
   1164	int irq;
   1165	struct omap_iommu *obj;
   1166	struct resource *res;
   1167	struct device_node *of = pdev->dev.of_node;
   1168
   1169	if (!of) {
   1170		pr_err("%s: only DT-based devices are supported\n", __func__);
   1171		return -ENODEV;
   1172	}
   1173
   1174	obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
   1175	if (!obj)
   1176		return -ENOMEM;
   1177
   1178	/*
   1179	 * self-manage the ordering dependencies between omap_device_enable/idle
   1180	 * and omap_device_assert/deassert_hardreset API
   1181	 */
   1182	if (pdev->dev.pm_domain) {
   1183		dev_dbg(&pdev->dev, "device pm_domain is being reset\n");
   1184		pdev->dev.pm_domain = NULL;
   1185	}
   1186
   1187	obj->name = dev_name(&pdev->dev);
   1188	obj->nr_tlb_entries = 32;
   1189	err = of_property_read_u32(of, "ti,#tlb-entries", &obj->nr_tlb_entries);
   1190	if (err && err != -EINVAL)
   1191		return err;
   1192	if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8)
   1193		return -EINVAL;
   1194	if (of_find_property(of, "ti,iommu-bus-err-back", NULL))
   1195		obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN;
   1196
   1197	obj->dev = &pdev->dev;
   1198	obj->ctx = (void *)obj + sizeof(*obj);
   1199	obj->cr_ctx = devm_kzalloc(&pdev->dev,
   1200				   sizeof(*obj->cr_ctx) * obj->nr_tlb_entries,
   1201				   GFP_KERNEL);
   1202	if (!obj->cr_ctx)
   1203		return -ENOMEM;
   1204
   1205	spin_lock_init(&obj->iommu_lock);
   1206	spin_lock_init(&obj->page_table_lock);
   1207
   1208	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
   1209	obj->regbase = devm_ioremap_resource(obj->dev, res);
   1210	if (IS_ERR(obj->regbase))
   1211		return PTR_ERR(obj->regbase);
   1212
   1213	err = omap_iommu_dra7_get_dsp_system_cfg(pdev, obj);
   1214	if (err)
   1215		return err;
   1216
   1217	irq = platform_get_irq(pdev, 0);
   1218	if (irq < 0)
   1219		return -ENODEV;
   1220
   1221	err = devm_request_irq(obj->dev, irq, iommu_fault_handler, IRQF_SHARED,
   1222			       dev_name(obj->dev), obj);
   1223	if (err < 0)
   1224		return err;
   1225	platform_set_drvdata(pdev, obj);
   1226
   1227	if (omap_iommu_can_register(pdev)) {
   1228		obj->group = iommu_group_alloc();
   1229		if (IS_ERR(obj->group))
   1230			return PTR_ERR(obj->group);
   1231
   1232		err = iommu_device_sysfs_add(&obj->iommu, obj->dev, NULL,
   1233					     obj->name);
   1234		if (err)
   1235			goto out_group;
   1236
   1237		err = iommu_device_register(&obj->iommu, &omap_iommu_ops, &pdev->dev);
   1238		if (err)
   1239			goto out_sysfs;
   1240	}
   1241
   1242	pm_runtime_enable(obj->dev);
   1243
   1244	omap_iommu_debugfs_add(obj);
   1245
   1246	dev_info(&pdev->dev, "%s registered\n", obj->name);
   1247
   1248	/* Re-probe bus to probe device attached to this IOMMU */
   1249	bus_iommu_probe(&platform_bus_type);
   1250
   1251	return 0;
   1252
   1253out_sysfs:
   1254	iommu_device_sysfs_remove(&obj->iommu);
   1255out_group:
   1256	iommu_group_put(obj->group);
   1257	return err;
   1258}
   1259
   1260static int omap_iommu_remove(struct platform_device *pdev)
   1261{
   1262	struct omap_iommu *obj = platform_get_drvdata(pdev);
   1263
   1264	if (obj->group) {
   1265		iommu_group_put(obj->group);
   1266		obj->group = NULL;
   1267
   1268		iommu_device_sysfs_remove(&obj->iommu);
   1269		iommu_device_unregister(&obj->iommu);
   1270	}
   1271
   1272	omap_iommu_debugfs_remove(obj);
   1273
   1274	pm_runtime_disable(obj->dev);
   1275
   1276	dev_info(&pdev->dev, "%s removed\n", obj->name);
   1277	return 0;
   1278}
   1279
   1280static const struct dev_pm_ops omap_iommu_pm_ops = {
   1281	.prepare = omap_iommu_prepare,
   1282	SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
   1283				     pm_runtime_force_resume)
   1284	SET_RUNTIME_PM_OPS(omap_iommu_runtime_suspend,
   1285			   omap_iommu_runtime_resume, NULL)
   1286};
   1287
   1288static const struct of_device_id omap_iommu_of_match[] = {
   1289	{ .compatible = "ti,omap2-iommu" },
   1290	{ .compatible = "ti,omap4-iommu" },
   1291	{ .compatible = "ti,dra7-iommu"	},
   1292	{ .compatible = "ti,dra7-dsp-iommu" },
   1293	{},
   1294};
   1295
   1296static struct platform_driver omap_iommu_driver = {
   1297	.probe	= omap_iommu_probe,
   1298	.remove	= omap_iommu_remove,
   1299	.driver	= {
   1300		.name	= "omap-iommu",
   1301		.pm	= &omap_iommu_pm_ops,
   1302		.of_match_table = of_match_ptr(omap_iommu_of_match),
   1303	},
   1304};
   1305
   1306static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
   1307{
   1308	memset(e, 0, sizeof(*e));
   1309
   1310	e->da		= da;
   1311	e->pa		= pa;
   1312	e->valid	= MMU_CAM_V;
   1313	e->pgsz		= pgsz;
   1314	e->endian	= MMU_RAM_ENDIAN_LITTLE;
   1315	e->elsz		= MMU_RAM_ELSZ_8;
   1316	e->mixed	= 0;
   1317
   1318	return iopgsz_to_bytes(e->pgsz);
   1319}
   1320
   1321static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
   1322			  phys_addr_t pa, size_t bytes, int prot, gfp_t gfp)
   1323{
   1324	struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
   1325	struct device *dev = omap_domain->dev;
   1326	struct omap_iommu_device *iommu;
   1327	struct omap_iommu *oiommu;
   1328	struct iotlb_entry e;
   1329	int omap_pgsz;
   1330	u32 ret = -EINVAL;
   1331	int i;
   1332
   1333	omap_pgsz = bytes_to_iopgsz(bytes);
   1334	if (omap_pgsz < 0) {
   1335		dev_err(dev, "invalid size to map: %zu\n", bytes);
   1336		return -EINVAL;
   1337	}
   1338
   1339	dev_dbg(dev, "mapping da 0x%lx to pa %pa size 0x%zx\n", da, &pa, bytes);
   1340
   1341	iotlb_init_entry(&e, da, pa, omap_pgsz);
   1342
   1343	iommu = omap_domain->iommus;
   1344	for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
   1345		oiommu = iommu->iommu_dev;
   1346		ret = omap_iopgtable_store_entry(oiommu, &e);
   1347		if (ret) {
   1348			dev_err(dev, "omap_iopgtable_store_entry failed: %d\n",
   1349				ret);
   1350			break;
   1351		}
   1352	}
   1353
   1354	if (ret) {
   1355		while (i--) {
   1356			iommu--;
   1357			oiommu = iommu->iommu_dev;
   1358			iopgtable_clear_entry(oiommu, da);
   1359		}
   1360	}
   1361
   1362	return ret;
   1363}
   1364
   1365static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
   1366			       size_t size, struct iommu_iotlb_gather *gather)
   1367{
   1368	struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
   1369	struct device *dev = omap_domain->dev;
   1370	struct omap_iommu_device *iommu;
   1371	struct omap_iommu *oiommu;
   1372	bool error = false;
   1373	size_t bytes = 0;
   1374	int i;
   1375
   1376	dev_dbg(dev, "unmapping da 0x%lx size %zu\n", da, size);
   1377
   1378	iommu = omap_domain->iommus;
   1379	for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
   1380		oiommu = iommu->iommu_dev;
   1381		bytes = iopgtable_clear_entry(oiommu, da);
   1382		if (!bytes)
   1383			error = true;
   1384	}
   1385
   1386	/*
   1387	 * simplify return - we are only checking if any of the iommus
   1388	 * reported an error, but not if all of them are unmapping the
   1389	 * same number of entries. This should not occur due to the
   1390	 * mirror programming.
   1391	 */
   1392	return error ? 0 : bytes;
   1393}
   1394
   1395static int omap_iommu_count(struct device *dev)
   1396{
   1397	struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
   1398	int count = 0;
   1399
   1400	while (arch_data->iommu_dev) {
   1401		count++;
   1402		arch_data++;
   1403	}
   1404
   1405	return count;
   1406}
   1407
   1408/* caller should call cleanup if this function fails */
   1409static int omap_iommu_attach_init(struct device *dev,
   1410				  struct omap_iommu_domain *odomain)
   1411{
   1412	struct omap_iommu_device *iommu;
   1413	int i;
   1414
   1415	odomain->num_iommus = omap_iommu_count(dev);
   1416	if (!odomain->num_iommus)
   1417		return -EINVAL;
   1418
   1419	odomain->iommus = kcalloc(odomain->num_iommus, sizeof(*iommu),
   1420				  GFP_ATOMIC);
   1421	if (!odomain->iommus)
   1422		return -ENOMEM;
   1423
   1424	iommu = odomain->iommus;
   1425	for (i = 0; i < odomain->num_iommus; i++, iommu++) {
   1426		iommu->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_ATOMIC);
   1427		if (!iommu->pgtable)
   1428			return -ENOMEM;
   1429
   1430		/*
   1431		 * should never fail, but please keep this around to ensure
   1432		 * we keep the hardware happy
   1433		 */
   1434		if (WARN_ON(!IS_ALIGNED((long)iommu->pgtable,
   1435					IOPGD_TABLE_SIZE)))
   1436			return -EINVAL;
   1437	}
   1438
   1439	return 0;
   1440}
   1441
   1442static void omap_iommu_detach_fini(struct omap_iommu_domain *odomain)
   1443{
   1444	int i;
   1445	struct omap_iommu_device *iommu = odomain->iommus;
   1446
   1447	for (i = 0; iommu && i < odomain->num_iommus; i++, iommu++)
   1448		kfree(iommu->pgtable);
   1449
   1450	kfree(odomain->iommus);
   1451	odomain->num_iommus = 0;
   1452	odomain->iommus = NULL;
   1453}
   1454
   1455static int
   1456omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
   1457{
   1458	struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
   1459	struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
   1460	struct omap_iommu_device *iommu;
   1461	struct omap_iommu *oiommu;
   1462	int ret = 0;
   1463	int i;
   1464
   1465	if (!arch_data || !arch_data->iommu_dev) {
   1466		dev_err(dev, "device doesn't have an associated iommu\n");
   1467		return -EINVAL;
   1468	}
   1469
   1470	spin_lock(&omap_domain->lock);
   1471
   1472	/* only a single client device can be attached to a domain */
   1473	if (omap_domain->dev) {
   1474		dev_err(dev, "iommu domain is already attached\n");
   1475		ret = -EBUSY;
   1476		goto out;
   1477	}
   1478
   1479	ret = omap_iommu_attach_init(dev, omap_domain);
   1480	if (ret) {
   1481		dev_err(dev, "failed to allocate required iommu data %d\n",
   1482			ret);
   1483		goto init_fail;
   1484	}
   1485
   1486	iommu = omap_domain->iommus;
   1487	for (i = 0; i < omap_domain->num_iommus; i++, iommu++, arch_data++) {
   1488		/* configure and enable the omap iommu */
   1489		oiommu = arch_data->iommu_dev;
   1490		ret = omap_iommu_attach(oiommu, iommu->pgtable);
   1491		if (ret) {
   1492			dev_err(dev, "can't get omap iommu: %d\n", ret);
   1493			goto attach_fail;
   1494		}
   1495
   1496		oiommu->domain = domain;
   1497		iommu->iommu_dev = oiommu;
   1498	}
   1499
   1500	omap_domain->dev = dev;
   1501
   1502	goto out;
   1503
   1504attach_fail:
   1505	while (i--) {
   1506		iommu--;
   1507		arch_data--;
   1508		oiommu = iommu->iommu_dev;
   1509		omap_iommu_detach(oiommu);
   1510		iommu->iommu_dev = NULL;
   1511		oiommu->domain = NULL;
   1512	}
   1513init_fail:
   1514	omap_iommu_detach_fini(omap_domain);
   1515out:
   1516	spin_unlock(&omap_domain->lock);
   1517	return ret;
   1518}
   1519
   1520static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
   1521				   struct device *dev)
   1522{
   1523	struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
   1524	struct omap_iommu_device *iommu = omap_domain->iommus;
   1525	struct omap_iommu *oiommu;
   1526	int i;
   1527
   1528	if (!omap_domain->dev) {
   1529		dev_err(dev, "domain has no attached device\n");
   1530		return;
   1531	}
   1532
   1533	/* only a single device is supported per domain for now */
   1534	if (omap_domain->dev != dev) {
   1535		dev_err(dev, "invalid attached device\n");
   1536		return;
   1537	}
   1538
   1539	/*
   1540	 * cleanup in the reverse order of attachment - this addresses
   1541	 * any h/w dependencies between multiple instances, if any
   1542	 */
   1543	iommu += (omap_domain->num_iommus - 1);
   1544	arch_data += (omap_domain->num_iommus - 1);
   1545	for (i = 0; i < omap_domain->num_iommus; i++, iommu--, arch_data--) {
   1546		oiommu = iommu->iommu_dev;
   1547		iopgtable_clear_entry_all(oiommu);
   1548
   1549		omap_iommu_detach(oiommu);
   1550		iommu->iommu_dev = NULL;
   1551		oiommu->domain = NULL;
   1552	}
   1553
   1554	omap_iommu_detach_fini(omap_domain);
   1555
   1556	omap_domain->dev = NULL;
   1557}
   1558
   1559static void omap_iommu_detach_dev(struct iommu_domain *domain,
   1560				  struct device *dev)
   1561{
   1562	struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
   1563
   1564	spin_lock(&omap_domain->lock);
   1565	_omap_iommu_detach_dev(omap_domain, dev);
   1566	spin_unlock(&omap_domain->lock);
   1567}
   1568
   1569static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
   1570{
   1571	struct omap_iommu_domain *omap_domain;
   1572
   1573	if (type != IOMMU_DOMAIN_UNMANAGED)
   1574		return NULL;
   1575
   1576	omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
   1577	if (!omap_domain)
   1578		return NULL;
   1579
   1580	spin_lock_init(&omap_domain->lock);
   1581
   1582	omap_domain->domain.geometry.aperture_start = 0;
   1583	omap_domain->domain.geometry.aperture_end   = (1ULL << 32) - 1;
   1584	omap_domain->domain.geometry.force_aperture = true;
   1585
   1586	return &omap_domain->domain;
   1587}
   1588
   1589static void omap_iommu_domain_free(struct iommu_domain *domain)
   1590{
   1591	struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
   1592
   1593	/*
   1594	 * An iommu device is still attached
   1595	 * (currently, only one device can be attached) ?
   1596	 */
   1597	if (omap_domain->dev)
   1598		_omap_iommu_detach_dev(omap_domain, omap_domain->dev);
   1599
   1600	kfree(omap_domain);
   1601}
   1602
   1603static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
   1604					   dma_addr_t da)
   1605{
   1606	struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
   1607	struct omap_iommu_device *iommu = omap_domain->iommus;
   1608	struct omap_iommu *oiommu = iommu->iommu_dev;
   1609	struct device *dev = oiommu->dev;
   1610	u32 *pgd, *pte;
   1611	phys_addr_t ret = 0;
   1612
   1613	/*
   1614	 * all the iommus within the domain will have identical programming,
   1615	 * so perform the lookup using just the first iommu
   1616	 */
   1617	iopgtable_lookup_entry(oiommu, da, &pgd, &pte);
   1618
   1619	if (pte) {
   1620		if (iopte_is_small(*pte))
   1621			ret = omap_iommu_translate(*pte, da, IOPTE_MASK);
   1622		else if (iopte_is_large(*pte))
   1623			ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
   1624		else
   1625			dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte,
   1626				(unsigned long long)da);
   1627	} else {
   1628		if (iopgd_is_section(*pgd))
   1629			ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
   1630		else if (iopgd_is_super(*pgd))
   1631			ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
   1632		else
   1633			dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd,
   1634				(unsigned long long)da);
   1635	}
   1636
   1637	return ret;
   1638}
   1639
   1640static struct iommu_device *omap_iommu_probe_device(struct device *dev)
   1641{
   1642	struct omap_iommu_arch_data *arch_data, *tmp;
   1643	struct platform_device *pdev;
   1644	struct omap_iommu *oiommu;
   1645	struct device_node *np;
   1646	int num_iommus, i;
   1647
   1648	/*
   1649	 * Allocate the per-device iommu structure for DT-based devices.
   1650	 *
   1651	 * TODO: Simplify this when removing non-DT support completely from the
   1652	 * IOMMU users.
   1653	 */
   1654	if (!dev->of_node)
   1655		return ERR_PTR(-ENODEV);
   1656
   1657	/*
   1658	 * retrieve the count of IOMMU nodes using phandle size as element size
   1659	 * since #iommu-cells = 0 for OMAP
   1660	 */
   1661	num_iommus = of_property_count_elems_of_size(dev->of_node, "iommus",
   1662						     sizeof(phandle));
   1663	if (num_iommus < 0)
   1664		return ERR_PTR(-ENODEV);
   1665
   1666	arch_data = kcalloc(num_iommus + 1, sizeof(*arch_data), GFP_KERNEL);
   1667	if (!arch_data)
   1668		return ERR_PTR(-ENOMEM);
   1669
   1670	for (i = 0, tmp = arch_data; i < num_iommus; i++, tmp++) {
   1671		np = of_parse_phandle(dev->of_node, "iommus", i);
   1672		if (!np) {
   1673			kfree(arch_data);
   1674			return ERR_PTR(-EINVAL);
   1675		}
   1676
   1677		pdev = of_find_device_by_node(np);
   1678		if (!pdev) {
   1679			of_node_put(np);
   1680			kfree(arch_data);
   1681			return ERR_PTR(-ENODEV);
   1682		}
   1683
   1684		oiommu = platform_get_drvdata(pdev);
   1685		if (!oiommu) {
   1686			of_node_put(np);
   1687			kfree(arch_data);
   1688			return ERR_PTR(-EINVAL);
   1689		}
   1690
   1691		tmp->iommu_dev = oiommu;
   1692		tmp->dev = &pdev->dev;
   1693
   1694		of_node_put(np);
   1695	}
   1696
   1697	dev_iommu_priv_set(dev, arch_data);
   1698
   1699	/*
   1700	 * use the first IOMMU alone for the sysfs device linking.
   1701	 * TODO: Evaluate if a single iommu_group needs to be
   1702	 * maintained for both IOMMUs
   1703	 */
   1704	oiommu = arch_data->iommu_dev;
   1705
   1706	return &oiommu->iommu;
   1707}
   1708
   1709static void omap_iommu_release_device(struct device *dev)
   1710{
   1711	struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
   1712
   1713	if (!dev->of_node || !arch_data)
   1714		return;
   1715
   1716	dev_iommu_priv_set(dev, NULL);
   1717	kfree(arch_data);
   1718
   1719}
   1720
   1721static struct iommu_group *omap_iommu_device_group(struct device *dev)
   1722{
   1723	struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
   1724	struct iommu_group *group = ERR_PTR(-EINVAL);
   1725
   1726	if (!arch_data)
   1727		return ERR_PTR(-ENODEV);
   1728
   1729	if (arch_data->iommu_dev)
   1730		group = iommu_group_ref_get(arch_data->iommu_dev->group);
   1731
   1732	return group;
   1733}
   1734
   1735static const struct iommu_ops omap_iommu_ops = {
   1736	.domain_alloc	= omap_iommu_domain_alloc,
   1737	.probe_device	= omap_iommu_probe_device,
   1738	.release_device	= omap_iommu_release_device,
   1739	.device_group	= omap_iommu_device_group,
   1740	.pgsize_bitmap	= OMAP_IOMMU_PGSIZES,
   1741	.default_domain_ops = &(const struct iommu_domain_ops) {
   1742		.attach_dev	= omap_iommu_attach_dev,
   1743		.detach_dev	= omap_iommu_detach_dev,
   1744		.map		= omap_iommu_map,
   1745		.unmap		= omap_iommu_unmap,
   1746		.iova_to_phys	= omap_iommu_iova_to_phys,
   1747		.free		= omap_iommu_domain_free,
   1748	}
   1749};
   1750
   1751static int __init omap_iommu_init(void)
   1752{
   1753	struct kmem_cache *p;
   1754	const slab_flags_t flags = SLAB_HWCACHE_ALIGN;
   1755	size_t align = 1 << 10; /* L2 pagetable alignement */
   1756	struct device_node *np;
   1757	int ret;
   1758
   1759	np = of_find_matching_node(NULL, omap_iommu_of_match);
   1760	if (!np)
   1761		return 0;
   1762
   1763	of_node_put(np);
   1764
   1765	p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
   1766			      NULL);
   1767	if (!p)
   1768		return -ENOMEM;
   1769	iopte_cachep = p;
   1770
   1771	omap_iommu_debugfs_init();
   1772
   1773	ret = platform_driver_register(&omap_iommu_driver);
   1774	if (ret) {
   1775		pr_err("%s: failed to register driver\n", __func__);
   1776		goto fail_driver;
   1777	}
   1778
   1779	ret = bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
   1780	if (ret)
   1781		goto fail_bus;
   1782
   1783	return 0;
   1784
   1785fail_bus:
   1786	platform_driver_unregister(&omap_iommu_driver);
   1787fail_driver:
   1788	kmem_cache_destroy(iopte_cachep);
   1789	return ret;
   1790}
   1791subsys_initcall(omap_iommu_init);
   1792/* must be ready before omap3isp is probed */