cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sun50i-iommu.c (27683B)


      1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
      2// Copyright (C) 2016-2018, Allwinner Technology CO., LTD.
      3// Copyright (C) 2019-2020, Cerno
      4
      5#include <linux/bitfield.h>
      6#include <linux/bug.h>
      7#include <linux/clk.h>
      8#include <linux/device.h>
      9#include <linux/dma-direction.h>
     10#include <linux/dma-mapping.h>
     11#include <linux/err.h>
     12#include <linux/errno.h>
     13#include <linux/interrupt.h>
     14#include <linux/iommu.h>
     15#include <linux/iopoll.h>
     16#include <linux/ioport.h>
     17#include <linux/log2.h>
     18#include <linux/module.h>
     19#include <linux/of_platform.h>
     20#include <linux/platform_device.h>
     21#include <linux/pm.h>
     22#include <linux/pm_runtime.h>
     23#include <linux/reset.h>
     24#include <linux/sizes.h>
     25#include <linux/slab.h>
     26#include <linux/spinlock.h>
     27#include <linux/types.h>
     28
     29#define IOMMU_RESET_REG			0x010
     30#define IOMMU_ENABLE_REG		0x020
     31#define IOMMU_ENABLE_ENABLE			BIT(0)
     32
     33#define IOMMU_BYPASS_REG		0x030
     34#define IOMMU_AUTO_GATING_REG		0x040
     35#define IOMMU_AUTO_GATING_ENABLE		BIT(0)
     36
     37#define IOMMU_WBUF_CTRL_REG		0x044
     38#define IOMMU_OOO_CTRL_REG		0x048
     39#define IOMMU_4KB_BDY_PRT_CTRL_REG	0x04c
     40#define IOMMU_TTB_REG			0x050
     41#define IOMMU_TLB_ENABLE_REG		0x060
     42#define IOMMU_TLB_PREFETCH_REG		0x070
     43#define IOMMU_TLB_PREFETCH_MASTER_ENABLE(m)	BIT(m)
     44
     45#define IOMMU_TLB_FLUSH_REG		0x080
     46#define IOMMU_TLB_FLUSH_PTW_CACHE		BIT(17)
     47#define IOMMU_TLB_FLUSH_MACRO_TLB		BIT(16)
     48#define IOMMU_TLB_FLUSH_MICRO_TLB(i)		(BIT(i) & GENMASK(5, 0))
     49
     50#define IOMMU_TLB_IVLD_ADDR_REG		0x090
     51#define IOMMU_TLB_IVLD_ADDR_MASK_REG	0x094
     52#define IOMMU_TLB_IVLD_ENABLE_REG	0x098
     53#define IOMMU_TLB_IVLD_ENABLE_ENABLE		BIT(0)
     54
     55#define IOMMU_PC_IVLD_ADDR_REG		0x0a0
     56#define IOMMU_PC_IVLD_ENABLE_REG	0x0a8
     57#define IOMMU_PC_IVLD_ENABLE_ENABLE		BIT(0)
     58
     59#define IOMMU_DM_AUT_CTRL_REG(d)	(0x0b0 + ((d) / 2) * 4)
     60#define IOMMU_DM_AUT_CTRL_RD_UNAVAIL(d, m)	(1 << (((d & 1) * 16) + ((m) * 2)))
     61#define IOMMU_DM_AUT_CTRL_WR_UNAVAIL(d, m)	(1 << (((d & 1) * 16) + ((m) * 2) + 1))
     62
     63#define IOMMU_DM_AUT_OVWT_REG		0x0d0
     64#define IOMMU_INT_ENABLE_REG		0x100
     65#define IOMMU_INT_CLR_REG		0x104
     66#define IOMMU_INT_STA_REG		0x108
     67#define IOMMU_INT_ERR_ADDR_REG(i)	(0x110 + (i) * 4)
     68#define IOMMU_INT_ERR_ADDR_L1_REG	0x130
     69#define IOMMU_INT_ERR_ADDR_L2_REG	0x134
     70#define IOMMU_INT_ERR_DATA_REG(i)	(0x150 + (i) * 4)
     71#define IOMMU_L1PG_INT_REG		0x0180
     72#define IOMMU_L2PG_INT_REG		0x0184
     73
     74#define IOMMU_INT_INVALID_L2PG			BIT(17)
     75#define IOMMU_INT_INVALID_L1PG			BIT(16)
     76#define IOMMU_INT_MASTER_PERMISSION(m)		BIT(m)
     77#define IOMMU_INT_MASTER_MASK			(IOMMU_INT_MASTER_PERMISSION(0) | \
     78						 IOMMU_INT_MASTER_PERMISSION(1) | \
     79						 IOMMU_INT_MASTER_PERMISSION(2) | \
     80						 IOMMU_INT_MASTER_PERMISSION(3) | \
     81						 IOMMU_INT_MASTER_PERMISSION(4) | \
     82						 IOMMU_INT_MASTER_PERMISSION(5))
     83#define IOMMU_INT_MASK				(IOMMU_INT_INVALID_L1PG | \
     84						 IOMMU_INT_INVALID_L2PG | \
     85						 IOMMU_INT_MASTER_MASK)
     86
     87#define PT_ENTRY_SIZE			sizeof(u32)
     88
     89#define NUM_DT_ENTRIES			4096
     90#define DT_SIZE				(NUM_DT_ENTRIES * PT_ENTRY_SIZE)
     91
     92#define NUM_PT_ENTRIES			256
     93#define PT_SIZE				(NUM_PT_ENTRIES * PT_ENTRY_SIZE)
     94
     95struct sun50i_iommu {
     96	struct iommu_device iommu;
     97
     98	/* Lock to modify the IOMMU registers */
     99	spinlock_t iommu_lock;
    100
    101	struct device *dev;
    102	void __iomem *base;
    103	struct reset_control *reset;
    104	struct clk *clk;
    105
    106	struct iommu_domain *domain;
    107	struct iommu_group *group;
    108	struct kmem_cache *pt_pool;
    109};
    110
    111struct sun50i_iommu_domain {
    112	struct iommu_domain domain;
    113
    114	/* Number of devices attached to the domain */
    115	refcount_t refcnt;
    116
    117	/* L1 Page Table */
    118	u32 *dt;
    119	dma_addr_t dt_dma;
    120
    121	struct sun50i_iommu *iommu;
    122};
    123
    124static struct sun50i_iommu_domain *to_sun50i_domain(struct iommu_domain *domain)
    125{
    126	return container_of(domain, struct sun50i_iommu_domain, domain);
    127}
    128
    129static struct sun50i_iommu *sun50i_iommu_from_dev(struct device *dev)
    130{
    131	return dev_iommu_priv_get(dev);
    132}
    133
    134static u32 iommu_read(struct sun50i_iommu *iommu, u32 offset)
    135{
    136	return readl(iommu->base + offset);
    137}
    138
    139static void iommu_write(struct sun50i_iommu *iommu, u32 offset, u32 value)
    140{
    141	writel(value, iommu->base + offset);
    142}
    143
    144/*
    145 * The Allwinner H6 IOMMU uses a 2-level page table.
    146 *
    147 * The first level is the usual Directory Table (DT), that consists of
    148 * 4096 4-bytes Directory Table Entries (DTE), each pointing to a Page
    149 * Table (PT).
    150 *
    151 * Each PT consits of 256 4-bytes Page Table Entries (PTE), each
    152 * pointing to a 4kB page of physical memory.
    153 *
    154 * The IOMMU supports a single DT, pointed by the IOMMU_TTB_REG
    155 * register that contains its physical address.
    156 */
    157
    158#define SUN50I_IOVA_DTE_MASK	GENMASK(31, 20)
    159#define SUN50I_IOVA_PTE_MASK	GENMASK(19, 12)
    160#define SUN50I_IOVA_PAGE_MASK	GENMASK(11, 0)
    161
    162static u32 sun50i_iova_get_dte_index(dma_addr_t iova)
    163{
    164	return FIELD_GET(SUN50I_IOVA_DTE_MASK, iova);
    165}
    166
    167static u32 sun50i_iova_get_pte_index(dma_addr_t iova)
    168{
    169	return FIELD_GET(SUN50I_IOVA_PTE_MASK, iova);
    170}
    171
    172static u32 sun50i_iova_get_page_offset(dma_addr_t iova)
    173{
    174	return FIELD_GET(SUN50I_IOVA_PAGE_MASK, iova);
    175}
    176
    177/*
    178 * Each Directory Table Entry has a Page Table address and a valid
    179 * bit:
    180
    181 * +---------------------+-----------+-+
    182 * | PT address          | Reserved  |V|
    183 * +---------------------+-----------+-+
    184 *  31:10 - Page Table address
    185 *   9:2  - Reserved
    186 *   1:0  - 1 if the entry is valid
    187 */
    188
    189#define SUN50I_DTE_PT_ADDRESS_MASK	GENMASK(31, 10)
    190#define SUN50I_DTE_PT_ATTRS		GENMASK(1, 0)
    191#define SUN50I_DTE_PT_VALID		1
    192
    193static phys_addr_t sun50i_dte_get_pt_address(u32 dte)
    194{
    195	return (phys_addr_t)dte & SUN50I_DTE_PT_ADDRESS_MASK;
    196}
    197
    198static bool sun50i_dte_is_pt_valid(u32 dte)
    199{
    200	return (dte & SUN50I_DTE_PT_ATTRS) == SUN50I_DTE_PT_VALID;
    201}
    202
    203static u32 sun50i_mk_dte(dma_addr_t pt_dma)
    204{
    205	return (pt_dma & SUN50I_DTE_PT_ADDRESS_MASK) | SUN50I_DTE_PT_VALID;
    206}
    207
    208/*
    209 * Each PTE has a Page address, an authority index and a valid bit:
    210 *
    211 * +----------------+-----+-----+-----+---+-----+
    212 * | Page address   | Rsv | ACI | Rsv | V | Rsv |
    213 * +----------------+-----+-----+-----+---+-----+
    214 *  31:12 - Page address
    215 *  11:8  - Reserved
    216 *   7:4  - Authority Control Index
    217 *   3:2  - Reserved
    218 *     1  - 1 if the entry is valid
    219 *     0  - Reserved
    220 *
    221 * The way permissions work is that the IOMMU has 16 "domains" that
    222 * can be configured to give each masters either read or write
    223 * permissions through the IOMMU_DM_AUT_CTRL_REG registers. The domain
    224 * 0 seems like the default domain, and its permissions in the
    225 * IOMMU_DM_AUT_CTRL_REG are only read-only, so it's not really
    226 * useful to enforce any particular permission.
    227 *
    228 * Each page entry will then have a reference to the domain they are
    229 * affected to, so that we can actually enforce them on a per-page
    230 * basis.
    231 *
    232 * In order to make it work with the IOMMU framework, we will be using
    233 * 4 different domains, starting at 1: RD_WR, RD, WR and NONE
    234 * depending on the permission we want to enforce. Each domain will
    235 * have each master setup in the same way, since the IOMMU framework
    236 * doesn't seem to restrict page access on a per-device basis. And
    237 * then we will use the relevant domain index when generating the page
    238 * table entry depending on the permissions we want to be enforced.
    239 */
    240
    241enum sun50i_iommu_aci {
    242	SUN50I_IOMMU_ACI_DO_NOT_USE = 0,
    243	SUN50I_IOMMU_ACI_NONE,
    244	SUN50I_IOMMU_ACI_RD,
    245	SUN50I_IOMMU_ACI_WR,
    246	SUN50I_IOMMU_ACI_RD_WR,
    247};
    248
    249#define SUN50I_PTE_PAGE_ADDRESS_MASK	GENMASK(31, 12)
    250#define SUN50I_PTE_ACI_MASK		GENMASK(7, 4)
    251#define SUN50I_PTE_PAGE_VALID		BIT(1)
    252
    253static phys_addr_t sun50i_pte_get_page_address(u32 pte)
    254{
    255	return (phys_addr_t)pte & SUN50I_PTE_PAGE_ADDRESS_MASK;
    256}
    257
    258static enum sun50i_iommu_aci sun50i_get_pte_aci(u32 pte)
    259{
    260	return FIELD_GET(SUN50I_PTE_ACI_MASK, pte);
    261}
    262
    263static bool sun50i_pte_is_page_valid(u32 pte)
    264{
    265	return pte & SUN50I_PTE_PAGE_VALID;
    266}
    267
    268static u32 sun50i_mk_pte(phys_addr_t page, int prot)
    269{
    270	enum sun50i_iommu_aci aci;
    271	u32 flags = 0;
    272
    273	if (prot & (IOMMU_READ | IOMMU_WRITE))
    274		aci = SUN50I_IOMMU_ACI_RD_WR;
    275	else if (prot & IOMMU_READ)
    276		aci = SUN50I_IOMMU_ACI_RD;
    277	else if (prot & IOMMU_WRITE)
    278		aci = SUN50I_IOMMU_ACI_WR;
    279	else
    280		aci = SUN50I_IOMMU_ACI_NONE;
    281
    282	flags |= FIELD_PREP(SUN50I_PTE_ACI_MASK, aci);
    283	page &= SUN50I_PTE_PAGE_ADDRESS_MASK;
    284	return page | flags | SUN50I_PTE_PAGE_VALID;
    285}
    286
    287static void sun50i_table_flush(struct sun50i_iommu_domain *sun50i_domain,
    288			       void *vaddr, unsigned int count)
    289{
    290	struct sun50i_iommu *iommu = sun50i_domain->iommu;
    291	dma_addr_t dma = virt_to_phys(vaddr);
    292	size_t size = count * PT_ENTRY_SIZE;
    293
    294	dma_sync_single_for_device(iommu->dev, dma, size, DMA_TO_DEVICE);
    295}
    296
    297static int sun50i_iommu_flush_all_tlb(struct sun50i_iommu *iommu)
    298{
    299	u32 reg;
    300	int ret;
    301
    302	assert_spin_locked(&iommu->iommu_lock);
    303
    304	iommu_write(iommu,
    305		    IOMMU_TLB_FLUSH_REG,
    306		    IOMMU_TLB_FLUSH_PTW_CACHE |
    307		    IOMMU_TLB_FLUSH_MACRO_TLB |
    308		    IOMMU_TLB_FLUSH_MICRO_TLB(5) |
    309		    IOMMU_TLB_FLUSH_MICRO_TLB(4) |
    310		    IOMMU_TLB_FLUSH_MICRO_TLB(3) |
    311		    IOMMU_TLB_FLUSH_MICRO_TLB(2) |
    312		    IOMMU_TLB_FLUSH_MICRO_TLB(1) |
    313		    IOMMU_TLB_FLUSH_MICRO_TLB(0));
    314
    315	ret = readl_poll_timeout_atomic(iommu->base + IOMMU_TLB_FLUSH_REG,
    316					reg, !reg,
    317					1, 2000);
    318	if (ret)
    319		dev_warn(iommu->dev, "TLB Flush timed out!\n");
    320
    321	return ret;
    322}
    323
    324static void sun50i_iommu_flush_iotlb_all(struct iommu_domain *domain)
    325{
    326	struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
    327	struct sun50i_iommu *iommu = sun50i_domain->iommu;
    328	unsigned long flags;
    329
    330	/*
    331	 * At boot, we'll have a first call into .flush_iotlb_all right after
    332	 * .probe_device, and since we link our (single) domain to our iommu in
    333	 * the .attach_device callback, we don't have that pointer set.
    334	 *
    335	 * It shouldn't really be any trouble to ignore it though since we flush
    336	 * all caches as part of the device powerup.
    337	 */
    338	if (!iommu)
    339		return;
    340
    341	spin_lock_irqsave(&iommu->iommu_lock, flags);
    342	sun50i_iommu_flush_all_tlb(iommu);
    343	spin_unlock_irqrestore(&iommu->iommu_lock, flags);
    344}
    345
    346static void sun50i_iommu_iotlb_sync(struct iommu_domain *domain,
    347				    struct iommu_iotlb_gather *gather)
    348{
    349	sun50i_iommu_flush_iotlb_all(domain);
    350}
    351
    352static int sun50i_iommu_enable(struct sun50i_iommu *iommu)
    353{
    354	struct sun50i_iommu_domain *sun50i_domain;
    355	unsigned long flags;
    356	int ret;
    357
    358	if (!iommu->domain)
    359		return 0;
    360
    361	sun50i_domain = to_sun50i_domain(iommu->domain);
    362
    363	ret = reset_control_deassert(iommu->reset);
    364	if (ret)
    365		return ret;
    366
    367	ret = clk_prepare_enable(iommu->clk);
    368	if (ret)
    369		goto err_reset_assert;
    370
    371	spin_lock_irqsave(&iommu->iommu_lock, flags);
    372
    373	iommu_write(iommu, IOMMU_TTB_REG, sun50i_domain->dt_dma);
    374	iommu_write(iommu, IOMMU_TLB_PREFETCH_REG,
    375		    IOMMU_TLB_PREFETCH_MASTER_ENABLE(0) |
    376		    IOMMU_TLB_PREFETCH_MASTER_ENABLE(1) |
    377		    IOMMU_TLB_PREFETCH_MASTER_ENABLE(2) |
    378		    IOMMU_TLB_PREFETCH_MASTER_ENABLE(3) |
    379		    IOMMU_TLB_PREFETCH_MASTER_ENABLE(4) |
    380		    IOMMU_TLB_PREFETCH_MASTER_ENABLE(5));
    381	iommu_write(iommu, IOMMU_INT_ENABLE_REG, IOMMU_INT_MASK);
    382	iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_NONE),
    383		    IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) |
    384		    IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) |
    385		    IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 1) |
    386		    IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 1) |
    387		    IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 2) |
    388		    IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 2) |
    389		    IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 3) |
    390		    IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 3) |
    391		    IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 4) |
    392		    IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 4) |
    393		    IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 5) |
    394		    IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 5));
    395
    396	iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_RD),
    397		    IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 0) |
    398		    IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 1) |
    399		    IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 2) |
    400		    IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 3) |
    401		    IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 4) |
    402		    IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 5));
    403
    404	iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_WR),
    405		    IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 0) |
    406		    IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 1) |
    407		    IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 2) |
    408		    IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 3) |
    409		    IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 4) |
    410		    IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 5));
    411
    412	ret = sun50i_iommu_flush_all_tlb(iommu);
    413	if (ret) {
    414		spin_unlock_irqrestore(&iommu->iommu_lock, flags);
    415		goto err_clk_disable;
    416	}
    417
    418	iommu_write(iommu, IOMMU_AUTO_GATING_REG, IOMMU_AUTO_GATING_ENABLE);
    419	iommu_write(iommu, IOMMU_ENABLE_REG, IOMMU_ENABLE_ENABLE);
    420
    421	spin_unlock_irqrestore(&iommu->iommu_lock, flags);
    422
    423	return 0;
    424
    425err_clk_disable:
    426	clk_disable_unprepare(iommu->clk);
    427
    428err_reset_assert:
    429	reset_control_assert(iommu->reset);
    430
    431	return ret;
    432}
    433
    434static void sun50i_iommu_disable(struct sun50i_iommu *iommu)
    435{
    436	unsigned long flags;
    437
    438	spin_lock_irqsave(&iommu->iommu_lock, flags);
    439
    440	iommu_write(iommu, IOMMU_ENABLE_REG, 0);
    441	iommu_write(iommu, IOMMU_TTB_REG, 0);
    442
    443	spin_unlock_irqrestore(&iommu->iommu_lock, flags);
    444
    445	clk_disable_unprepare(iommu->clk);
    446	reset_control_assert(iommu->reset);
    447}
    448
    449static void *sun50i_iommu_alloc_page_table(struct sun50i_iommu *iommu,
    450					   gfp_t gfp)
    451{
    452	dma_addr_t pt_dma;
    453	u32 *page_table;
    454
    455	page_table = kmem_cache_zalloc(iommu->pt_pool, gfp);
    456	if (!page_table)
    457		return ERR_PTR(-ENOMEM);
    458
    459	pt_dma = dma_map_single(iommu->dev, page_table, PT_SIZE, DMA_TO_DEVICE);
    460	if (dma_mapping_error(iommu->dev, pt_dma)) {
    461		dev_err(iommu->dev, "Couldn't map L2 Page Table\n");
    462		kmem_cache_free(iommu->pt_pool, page_table);
    463		return ERR_PTR(-ENOMEM);
    464	}
    465
    466	/* We rely on the physical address and DMA address being the same */
    467	WARN_ON(pt_dma != virt_to_phys(page_table));
    468
    469	return page_table;
    470}
    471
    472static void sun50i_iommu_free_page_table(struct sun50i_iommu *iommu,
    473					 u32 *page_table)
    474{
    475	phys_addr_t pt_phys = virt_to_phys(page_table);
    476
    477	dma_unmap_single(iommu->dev, pt_phys, PT_SIZE, DMA_TO_DEVICE);
    478	kmem_cache_free(iommu->pt_pool, page_table);
    479}
    480
    481static u32 *sun50i_dte_get_page_table(struct sun50i_iommu_domain *sun50i_domain,
    482				      dma_addr_t iova, gfp_t gfp)
    483{
    484	struct sun50i_iommu *iommu = sun50i_domain->iommu;
    485	u32 *page_table;
    486	u32 *dte_addr;
    487	u32 old_dte;
    488	u32 dte;
    489
    490	dte_addr = &sun50i_domain->dt[sun50i_iova_get_dte_index(iova)];
    491	dte = *dte_addr;
    492	if (sun50i_dte_is_pt_valid(dte)) {
    493		phys_addr_t pt_phys = sun50i_dte_get_pt_address(dte);
    494		return (u32 *)phys_to_virt(pt_phys);
    495	}
    496
    497	page_table = sun50i_iommu_alloc_page_table(iommu, gfp);
    498	if (IS_ERR(page_table))
    499		return page_table;
    500
    501	dte = sun50i_mk_dte(virt_to_phys(page_table));
    502	old_dte = cmpxchg(dte_addr, 0, dte);
    503	if (old_dte) {
    504		phys_addr_t installed_pt_phys =
    505			sun50i_dte_get_pt_address(old_dte);
    506		u32 *installed_pt = phys_to_virt(installed_pt_phys);
    507		u32 *drop_pt = page_table;
    508
    509		page_table = installed_pt;
    510		dte = old_dte;
    511		sun50i_iommu_free_page_table(iommu, drop_pt);
    512	}
    513
    514	sun50i_table_flush(sun50i_domain, page_table, PT_SIZE);
    515	sun50i_table_flush(sun50i_domain, dte_addr, 1);
    516
    517	return page_table;
    518}
    519
    520static int sun50i_iommu_map(struct iommu_domain *domain, unsigned long iova,
    521			    phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
    522{
    523	struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
    524	struct sun50i_iommu *iommu = sun50i_domain->iommu;
    525	u32 pte_index;
    526	u32 *page_table, *pte_addr;
    527	int ret = 0;
    528
    529	page_table = sun50i_dte_get_page_table(sun50i_domain, iova, gfp);
    530	if (IS_ERR(page_table)) {
    531		ret = PTR_ERR(page_table);
    532		goto out;
    533	}
    534
    535	pte_index = sun50i_iova_get_pte_index(iova);
    536	pte_addr = &page_table[pte_index];
    537	if (unlikely(sun50i_pte_is_page_valid(*pte_addr))) {
    538		phys_addr_t page_phys = sun50i_pte_get_page_address(*pte_addr);
    539		dev_err(iommu->dev,
    540			"iova %pad already mapped to %pa cannot remap to %pa prot: %#x\n",
    541			&iova, &page_phys, &paddr, prot);
    542		ret = -EBUSY;
    543		goto out;
    544	}
    545
    546	*pte_addr = sun50i_mk_pte(paddr, prot);
    547	sun50i_table_flush(sun50i_domain, pte_addr, 1);
    548
    549out:
    550	return ret;
    551}
    552
    553static size_t sun50i_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
    554				 size_t size, struct iommu_iotlb_gather *gather)
    555{
    556	struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
    557	phys_addr_t pt_phys;
    558	u32 *pte_addr;
    559	u32 dte;
    560
    561	dte = sun50i_domain->dt[sun50i_iova_get_dte_index(iova)];
    562	if (!sun50i_dte_is_pt_valid(dte))
    563		return 0;
    564
    565	pt_phys = sun50i_dte_get_pt_address(dte);
    566	pte_addr = (u32 *)phys_to_virt(pt_phys) + sun50i_iova_get_pte_index(iova);
    567
    568	if (!sun50i_pte_is_page_valid(*pte_addr))
    569		return 0;
    570
    571	memset(pte_addr, 0, sizeof(*pte_addr));
    572	sun50i_table_flush(sun50i_domain, pte_addr, 1);
    573
    574	return SZ_4K;
    575}
    576
    577static phys_addr_t sun50i_iommu_iova_to_phys(struct iommu_domain *domain,
    578					     dma_addr_t iova)
    579{
    580	struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
    581	phys_addr_t pt_phys;
    582	u32 *page_table;
    583	u32 dte, pte;
    584
    585	dte = sun50i_domain->dt[sun50i_iova_get_dte_index(iova)];
    586	if (!sun50i_dte_is_pt_valid(dte))
    587		return 0;
    588
    589	pt_phys = sun50i_dte_get_pt_address(dte);
    590	page_table = (u32 *)phys_to_virt(pt_phys);
    591	pte = page_table[sun50i_iova_get_pte_index(iova)];
    592	if (!sun50i_pte_is_page_valid(pte))
    593		return 0;
    594
    595	return sun50i_pte_get_page_address(pte) +
    596		sun50i_iova_get_page_offset(iova);
    597}
    598
    599static struct iommu_domain *sun50i_iommu_domain_alloc(unsigned type)
    600{
    601	struct sun50i_iommu_domain *sun50i_domain;
    602
    603	if (type != IOMMU_DOMAIN_DMA &&
    604	    type != IOMMU_DOMAIN_IDENTITY &&
    605	    type != IOMMU_DOMAIN_UNMANAGED)
    606		return NULL;
    607
    608	sun50i_domain = kzalloc(sizeof(*sun50i_domain), GFP_KERNEL);
    609	if (!sun50i_domain)
    610		return NULL;
    611
    612	sun50i_domain->dt = (u32 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
    613						    get_order(DT_SIZE));
    614	if (!sun50i_domain->dt)
    615		goto err_free_domain;
    616
    617	refcount_set(&sun50i_domain->refcnt, 1);
    618
    619	sun50i_domain->domain.geometry.aperture_start = 0;
    620	sun50i_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32);
    621	sun50i_domain->domain.geometry.force_aperture = true;
    622
    623	return &sun50i_domain->domain;
    624
    625err_free_domain:
    626	kfree(sun50i_domain);
    627
    628	return NULL;
    629}
    630
    631static void sun50i_iommu_domain_free(struct iommu_domain *domain)
    632{
    633	struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
    634
    635	free_pages((unsigned long)sun50i_domain->dt, get_order(DT_SIZE));
    636	sun50i_domain->dt = NULL;
    637
    638	kfree(sun50i_domain);
    639}
    640
    641static int sun50i_iommu_attach_domain(struct sun50i_iommu *iommu,
    642				      struct sun50i_iommu_domain *sun50i_domain)
    643{
    644	iommu->domain = &sun50i_domain->domain;
    645	sun50i_domain->iommu = iommu;
    646
    647	sun50i_domain->dt_dma = dma_map_single(iommu->dev, sun50i_domain->dt,
    648					       DT_SIZE, DMA_TO_DEVICE);
    649	if (dma_mapping_error(iommu->dev, sun50i_domain->dt_dma)) {
    650		dev_err(iommu->dev, "Couldn't map L1 Page Table\n");
    651		return -ENOMEM;
    652	}
    653
    654	return sun50i_iommu_enable(iommu);
    655}
    656
    657static void sun50i_iommu_detach_domain(struct sun50i_iommu *iommu,
    658				       struct sun50i_iommu_domain *sun50i_domain)
    659{
    660	unsigned int i;
    661
    662	for (i = 0; i < NUM_DT_ENTRIES; i++) {
    663		phys_addr_t pt_phys;
    664		u32 *page_table;
    665		u32 *dte_addr;
    666		u32 dte;
    667
    668		dte_addr = &sun50i_domain->dt[i];
    669		dte = *dte_addr;
    670		if (!sun50i_dte_is_pt_valid(dte))
    671			continue;
    672
    673		memset(dte_addr, 0, sizeof(*dte_addr));
    674		sun50i_table_flush(sun50i_domain, dte_addr, 1);
    675
    676		pt_phys = sun50i_dte_get_pt_address(dte);
    677		page_table = phys_to_virt(pt_phys);
    678		sun50i_iommu_free_page_table(iommu, page_table);
    679	}
    680
    681
    682	sun50i_iommu_disable(iommu);
    683
    684	dma_unmap_single(iommu->dev, virt_to_phys(sun50i_domain->dt),
    685			 DT_SIZE, DMA_TO_DEVICE);
    686
    687	iommu->domain = NULL;
    688}
    689
    690static void sun50i_iommu_detach_device(struct iommu_domain *domain,
    691				       struct device *dev)
    692{
    693	struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
    694	struct sun50i_iommu *iommu = dev_iommu_priv_get(dev);
    695
    696	dev_dbg(dev, "Detaching from IOMMU domain\n");
    697
    698	if (iommu->domain != domain)
    699		return;
    700
    701	if (refcount_dec_and_test(&sun50i_domain->refcnt))
    702		sun50i_iommu_detach_domain(iommu, sun50i_domain);
    703}
    704
    705static int sun50i_iommu_attach_device(struct iommu_domain *domain,
    706				      struct device *dev)
    707{
    708	struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
    709	struct sun50i_iommu *iommu;
    710
    711	iommu = sun50i_iommu_from_dev(dev);
    712	if (!iommu)
    713		return -ENODEV;
    714
    715	dev_dbg(dev, "Attaching to IOMMU domain\n");
    716
    717	refcount_inc(&sun50i_domain->refcnt);
    718
    719	if (iommu->domain == domain)
    720		return 0;
    721
    722	if (iommu->domain)
    723		sun50i_iommu_detach_device(iommu->domain, dev);
    724
    725	sun50i_iommu_attach_domain(iommu, sun50i_domain);
    726
    727	return 0;
    728}
    729
    730static struct iommu_device *sun50i_iommu_probe_device(struct device *dev)
    731{
    732	struct sun50i_iommu *iommu;
    733
    734	iommu = sun50i_iommu_from_dev(dev);
    735	if (!iommu)
    736		return ERR_PTR(-ENODEV);
    737
    738	return &iommu->iommu;
    739}
    740
    741static void sun50i_iommu_release_device(struct device *dev) {}
    742
    743static struct iommu_group *sun50i_iommu_device_group(struct device *dev)
    744{
    745	struct sun50i_iommu *iommu = sun50i_iommu_from_dev(dev);
    746
    747	return iommu_group_ref_get(iommu->group);
    748}
    749
    750static int sun50i_iommu_of_xlate(struct device *dev,
    751				 struct of_phandle_args *args)
    752{
    753	struct platform_device *iommu_pdev = of_find_device_by_node(args->np);
    754	unsigned id = args->args[0];
    755
    756	dev_iommu_priv_set(dev, platform_get_drvdata(iommu_pdev));
    757
    758	return iommu_fwspec_add_ids(dev, &id, 1);
    759}
    760
    761static const struct iommu_ops sun50i_iommu_ops = {
    762	.pgsize_bitmap	= SZ_4K,
    763	.device_group	= sun50i_iommu_device_group,
    764	.domain_alloc	= sun50i_iommu_domain_alloc,
    765	.of_xlate	= sun50i_iommu_of_xlate,
    766	.probe_device	= sun50i_iommu_probe_device,
    767	.release_device	= sun50i_iommu_release_device,
    768	.default_domain_ops = &(const struct iommu_domain_ops) {
    769		.attach_dev	= sun50i_iommu_attach_device,
    770		.detach_dev	= sun50i_iommu_detach_device,
    771		.flush_iotlb_all = sun50i_iommu_flush_iotlb_all,
    772		.iotlb_sync	= sun50i_iommu_iotlb_sync,
    773		.iova_to_phys	= sun50i_iommu_iova_to_phys,
    774		.map		= sun50i_iommu_map,
    775		.unmap		= sun50i_iommu_unmap,
    776		.free		= sun50i_iommu_domain_free,
    777	}
    778};
    779
    780static void sun50i_iommu_report_fault(struct sun50i_iommu *iommu,
    781				      unsigned master, phys_addr_t iova,
    782				      unsigned prot)
    783{
    784	dev_err(iommu->dev, "Page fault for %pad (master %d, dir %s)\n",
    785		&iova, master, (prot == IOMMU_FAULT_WRITE) ? "wr" : "rd");
    786
    787	if (iommu->domain)
    788		report_iommu_fault(iommu->domain, iommu->dev, iova, prot);
    789	else
    790		dev_err(iommu->dev, "Page fault while iommu not attached to any domain?\n");
    791}
    792
    793static phys_addr_t sun50i_iommu_handle_pt_irq(struct sun50i_iommu *iommu,
    794					      unsigned addr_reg,
    795					      unsigned blame_reg)
    796{
    797	phys_addr_t iova;
    798	unsigned master;
    799	u32 blame;
    800
    801	assert_spin_locked(&iommu->iommu_lock);
    802
    803	iova = iommu_read(iommu, addr_reg);
    804	blame = iommu_read(iommu, blame_reg);
    805	master = ilog2(blame & IOMMU_INT_MASTER_MASK);
    806
    807	/*
    808	 * If the address is not in the page table, we can't get what
    809	 * operation triggered the fault. Assume it's a read
    810	 * operation.
    811	 */
    812	sun50i_iommu_report_fault(iommu, master, iova, IOMMU_FAULT_READ);
    813
    814	return iova;
    815}
    816
    817static phys_addr_t sun50i_iommu_handle_perm_irq(struct sun50i_iommu *iommu)
    818{
    819	enum sun50i_iommu_aci aci;
    820	phys_addr_t iova;
    821	unsigned master;
    822	unsigned dir;
    823	u32 blame;
    824
    825	assert_spin_locked(&iommu->iommu_lock);
    826
    827	blame = iommu_read(iommu, IOMMU_INT_STA_REG);
    828	master = ilog2(blame & IOMMU_INT_MASTER_MASK);
    829	iova = iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG(master));
    830	aci = sun50i_get_pte_aci(iommu_read(iommu,
    831					    IOMMU_INT_ERR_DATA_REG(master)));
    832
    833	switch (aci) {
    834		/*
    835		 * If we are in the read-only domain, then it means we
    836		 * tried to write.
    837		 */
    838	case SUN50I_IOMMU_ACI_RD:
    839		dir = IOMMU_FAULT_WRITE;
    840		break;
    841
    842		/*
    843		 * If we are in the write-only domain, then it means
    844		 * we tried to read.
    845		 */
    846	case SUN50I_IOMMU_ACI_WR:
    847
    848		/*
    849		 * If we are in the domain without any permission, we
    850		 * can't really tell. Let's default to a read
    851		 * operation.
    852		 */
    853	case SUN50I_IOMMU_ACI_NONE:
    854
    855		/* WTF? */
    856	case SUN50I_IOMMU_ACI_RD_WR:
    857	default:
    858		dir = IOMMU_FAULT_READ;
    859		break;
    860	}
    861
    862	/*
    863	 * If the address is not in the page table, we can't get what
    864	 * operation triggered the fault. Assume it's a read
    865	 * operation.
    866	 */
    867	sun50i_iommu_report_fault(iommu, master, iova, dir);
    868
    869	return iova;
    870}
    871
    872static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id)
    873{
    874	struct sun50i_iommu *iommu = dev_id;
    875	u32 status;
    876
    877	spin_lock(&iommu->iommu_lock);
    878
    879	status = iommu_read(iommu, IOMMU_INT_STA_REG);
    880	if (!(status & IOMMU_INT_MASK)) {
    881		spin_unlock(&iommu->iommu_lock);
    882		return IRQ_NONE;
    883	}
    884
    885	if (status & IOMMU_INT_INVALID_L2PG)
    886		sun50i_iommu_handle_pt_irq(iommu,
    887					    IOMMU_INT_ERR_ADDR_L2_REG,
    888					    IOMMU_L2PG_INT_REG);
    889	else if (status & IOMMU_INT_INVALID_L1PG)
    890		sun50i_iommu_handle_pt_irq(iommu,
    891					   IOMMU_INT_ERR_ADDR_L1_REG,
    892					   IOMMU_L1PG_INT_REG);
    893	else
    894		sun50i_iommu_handle_perm_irq(iommu);
    895
    896	iommu_write(iommu, IOMMU_INT_CLR_REG, status);
    897
    898	iommu_write(iommu, IOMMU_RESET_REG, ~status);
    899	iommu_write(iommu, IOMMU_RESET_REG, status);
    900
    901	spin_unlock(&iommu->iommu_lock);
    902
    903	return IRQ_HANDLED;
    904}
    905
    906static int sun50i_iommu_probe(struct platform_device *pdev)
    907{
    908	struct sun50i_iommu *iommu;
    909	int ret, irq;
    910
    911	iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
    912	if (!iommu)
    913		return -ENOMEM;
    914	spin_lock_init(&iommu->iommu_lock);
    915	platform_set_drvdata(pdev, iommu);
    916	iommu->dev = &pdev->dev;
    917
    918	iommu->pt_pool = kmem_cache_create(dev_name(&pdev->dev),
    919					   PT_SIZE, PT_SIZE,
    920					   SLAB_HWCACHE_ALIGN,
    921					   NULL);
    922	if (!iommu->pt_pool)
    923		return -ENOMEM;
    924
    925	iommu->group = iommu_group_alloc();
    926	if (IS_ERR(iommu->group)) {
    927		ret = PTR_ERR(iommu->group);
    928		goto err_free_cache;
    929	}
    930
    931	iommu->base = devm_platform_ioremap_resource(pdev, 0);
    932	if (IS_ERR(iommu->base)) {
    933		ret = PTR_ERR(iommu->base);
    934		goto err_free_group;
    935	}
    936
    937	irq = platform_get_irq(pdev, 0);
    938	if (irq < 0) {
    939		ret = irq;
    940		goto err_free_group;
    941	}
    942
    943	iommu->clk = devm_clk_get(&pdev->dev, NULL);
    944	if (IS_ERR(iommu->clk)) {
    945		dev_err(&pdev->dev, "Couldn't get our clock.\n");
    946		ret = PTR_ERR(iommu->clk);
    947		goto err_free_group;
    948	}
    949
    950	iommu->reset = devm_reset_control_get(&pdev->dev, NULL);
    951	if (IS_ERR(iommu->reset)) {
    952		dev_err(&pdev->dev, "Couldn't get our reset line.\n");
    953		ret = PTR_ERR(iommu->reset);
    954		goto err_free_group;
    955	}
    956
    957	ret = iommu_device_sysfs_add(&iommu->iommu, &pdev->dev,
    958				     NULL, dev_name(&pdev->dev));
    959	if (ret)
    960		goto err_free_group;
    961
    962	ret = iommu_device_register(&iommu->iommu, &sun50i_iommu_ops, &pdev->dev);
    963	if (ret)
    964		goto err_remove_sysfs;
    965
    966	ret = devm_request_irq(&pdev->dev, irq, sun50i_iommu_irq, 0,
    967			       dev_name(&pdev->dev), iommu);
    968	if (ret < 0)
    969		goto err_unregister;
    970
    971	bus_set_iommu(&platform_bus_type, &sun50i_iommu_ops);
    972
    973	return 0;
    974
    975err_unregister:
    976	iommu_device_unregister(&iommu->iommu);
    977
    978err_remove_sysfs:
    979	iommu_device_sysfs_remove(&iommu->iommu);
    980
    981err_free_group:
    982	iommu_group_put(iommu->group);
    983
    984err_free_cache:
    985	kmem_cache_destroy(iommu->pt_pool);
    986
    987	return ret;
    988}
    989
    990static const struct of_device_id sun50i_iommu_dt[] = {
    991	{ .compatible = "allwinner,sun50i-h6-iommu", },
    992	{ /* sentinel */ },
    993};
    994MODULE_DEVICE_TABLE(of, sun50i_iommu_dt);
    995
    996static struct platform_driver sun50i_iommu_driver = {
    997	.driver		= {
    998		.name			= "sun50i-iommu",
    999		.of_match_table 	= sun50i_iommu_dt,
   1000		.suppress_bind_attrs	= true,
   1001	}
   1002};
   1003builtin_platform_driver_probe(sun50i_iommu_driver, sun50i_iommu_probe);
   1004
   1005MODULE_DESCRIPTION("Allwinner H6 IOMMU driver");
   1006MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>");
   1007MODULE_AUTHOR("zhuxianbin <zhuxianbin@allwinnertech.com>");
   1008MODULE_LICENSE("Dual BSD/GPL");