cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

apple-dart.c (25987B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Apple DART (Device Address Resolution Table) IOMMU driver
      4 *
      5 * Copyright (C) 2021 The Asahi Linux Contributors
      6 *
      7 * Based on arm/arm-smmu/arm-ssmu.c and arm/arm-smmu-v3/arm-smmu-v3.c
      8 *  Copyright (C) 2013 ARM Limited
      9 *  Copyright (C) 2015 ARM Limited
     10 * and on exynos-iommu.c
     11 *  Copyright (c) 2011,2016 Samsung Electronics Co., Ltd.
     12 */
     13
     14#include <linux/atomic.h>
     15#include <linux/bitfield.h>
     16#include <linux/clk.h>
     17#include <linux/dev_printk.h>
     18#include <linux/dma-iommu.h>
     19#include <linux/dma-mapping.h>
     20#include <linux/err.h>
     21#include <linux/interrupt.h>
     22#include <linux/io-pgtable.h>
     23#include <linux/iommu.h>
     24#include <linux/iopoll.h>
     25#include <linux/module.h>
     26#include <linux/of.h>
     27#include <linux/of_address.h>
     28#include <linux/of_iommu.h>
     29#include <linux/of_platform.h>
     30#include <linux/pci.h>
     31#include <linux/platform_device.h>
     32#include <linux/slab.h>
     33#include <linux/swab.h>
     34#include <linux/types.h>
     35
     36#define DART_MAX_STREAMS 16
     37#define DART_MAX_TTBR 4
     38#define MAX_DARTS_PER_DEVICE 2
     39
     40#define DART_STREAM_ALL 0xffff
     41
     42#define DART_PARAMS1 0x00
     43#define DART_PARAMS_PAGE_SHIFT GENMASK(27, 24)
     44
     45#define DART_PARAMS2 0x04
     46#define DART_PARAMS_BYPASS_SUPPORT BIT(0)
     47
     48#define DART_STREAM_COMMAND 0x20
     49#define DART_STREAM_COMMAND_BUSY BIT(2)
     50#define DART_STREAM_COMMAND_INVALIDATE BIT(20)
     51
     52#define DART_STREAM_SELECT 0x34
     53
     54#define DART_ERROR 0x40
     55#define DART_ERROR_STREAM GENMASK(27, 24)
     56#define DART_ERROR_CODE GENMASK(11, 0)
     57#define DART_ERROR_FLAG BIT(31)
     58
     59#define DART_ERROR_READ_FAULT BIT(4)
     60#define DART_ERROR_WRITE_FAULT BIT(3)
     61#define DART_ERROR_NO_PTE BIT(2)
     62#define DART_ERROR_NO_PMD BIT(1)
     63#define DART_ERROR_NO_TTBR BIT(0)
     64
     65#define DART_CONFIG 0x60
     66#define DART_CONFIG_LOCK BIT(15)
     67
     68#define DART_STREAM_COMMAND_BUSY_TIMEOUT 100
     69
     70#define DART_ERROR_ADDR_HI 0x54
     71#define DART_ERROR_ADDR_LO 0x50
     72
     73#define DART_STREAMS_ENABLE 0xfc
     74
     75#define DART_TCR(sid) (0x100 + 4 * (sid))
     76#define DART_TCR_TRANSLATE_ENABLE BIT(7)
     77#define DART_TCR_BYPASS0_ENABLE BIT(8)
     78#define DART_TCR_BYPASS1_ENABLE BIT(12)
     79
     80#define DART_TTBR(sid, idx) (0x200 + 16 * (sid) + 4 * (idx))
     81#define DART_TTBR_VALID BIT(31)
     82#define DART_TTBR_SHIFT 12
     83
     84/*
     85 * Private structure associated with each DART device.
     86 *
     87 * @dev: device struct
     88 * @regs: mapped MMIO region
     89 * @irq: interrupt number, can be shared with other DARTs
     90 * @clks: clocks associated with this DART
     91 * @num_clks: number of @clks
     92 * @lock: lock for hardware operations involving this dart
     93 * @pgsize: pagesize supported by this DART
     94 * @supports_bypass: indicates if this DART supports bypass mode
     95 * @force_bypass: force bypass mode due to pagesize mismatch?
     96 * @sid2group: maps stream ids to iommu_groups
     97 * @iommu: iommu core device
     98 */
     99struct apple_dart {
    100	struct device *dev;
    101
    102	void __iomem *regs;
    103
    104	int irq;
    105	struct clk_bulk_data *clks;
    106	int num_clks;
    107
    108	spinlock_t lock;
    109
    110	u32 pgsize;
    111	u32 supports_bypass : 1;
    112	u32 force_bypass : 1;
    113
    114	struct iommu_group *sid2group[DART_MAX_STREAMS];
    115	struct iommu_device iommu;
    116};
    117
    118/*
    119 * Convenience struct to identify streams.
    120 *
    121 * The normal variant is used inside apple_dart_master_cfg which isn't written
    122 * to concurrently.
    123 * The atomic variant is used inside apple_dart_domain where we have to guard
    124 * against races from potential parallel calls to attach/detach_device.
    125 * Note that even inside the atomic variant the apple_dart pointer is not
    126 * protected: This pointer is initialized once under the domain init mutex
    127 * and never changed again afterwards. Devices with different dart pointers
    128 * cannot be attached to the same domain.
    129 *
    130 * @dart dart pointer
    131 * @sid stream id bitmap
    132 */
    133struct apple_dart_stream_map {
    134	struct apple_dart *dart;
    135	unsigned long sidmap;
    136};
    137struct apple_dart_atomic_stream_map {
    138	struct apple_dart *dart;
    139	atomic64_t sidmap;
    140};
    141
    142/*
    143 * This structure is attached to each iommu domain handled by a DART.
    144 *
    145 * @pgtbl_ops: pagetable ops allocated by io-pgtable
    146 * @finalized: true if the domain has been completely initialized
    147 * @init_lock: protects domain initialization
    148 * @stream_maps: streams attached to this domain (valid for DMA/UNMANAGED only)
    149 * @domain: core iommu domain pointer
    150 */
    151struct apple_dart_domain {
    152	struct io_pgtable_ops *pgtbl_ops;
    153
    154	bool finalized;
    155	struct mutex init_lock;
    156	struct apple_dart_atomic_stream_map stream_maps[MAX_DARTS_PER_DEVICE];
    157
    158	struct iommu_domain domain;
    159};
    160
    161/*
    162 * This structure is attached to devices with dev_iommu_priv_set() on of_xlate
    163 * and contains a list of streams bound to this device.
    164 * So far the worst case seen is a single device with two streams
    165 * from different darts, such that this simple static array is enough.
    166 *
    167 * @streams: streams for this device
    168 */
    169struct apple_dart_master_cfg {
    170	struct apple_dart_stream_map stream_maps[MAX_DARTS_PER_DEVICE];
    171};
    172
    173/*
    174 * Helper macro to iterate over apple_dart_master_cfg.stream_maps and
    175 * apple_dart_domain.stream_maps
    176 *
    177 * @i int used as loop variable
    178 * @base pointer to base struct (apple_dart_master_cfg or apple_dart_domain)
    179 * @stream pointer to the apple_dart_streams struct for each loop iteration
    180 */
    181#define for_each_stream_map(i, base, stream_map)                               \
    182	for (i = 0, stream_map = &(base)->stream_maps[0];                      \
    183	     i < MAX_DARTS_PER_DEVICE && stream_map->dart;                     \
    184	     stream_map = &(base)->stream_maps[++i])
    185
    186static struct platform_driver apple_dart_driver;
    187static const struct iommu_ops apple_dart_iommu_ops;
    188
    189static struct apple_dart_domain *to_dart_domain(struct iommu_domain *dom)
    190{
    191	return container_of(dom, struct apple_dart_domain, domain);
    192}
    193
    194static void
    195apple_dart_hw_enable_translation(struct apple_dart_stream_map *stream_map)
    196{
    197	int sid;
    198
    199	for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
    200		writel(DART_TCR_TRANSLATE_ENABLE,
    201		       stream_map->dart->regs + DART_TCR(sid));
    202}
    203
    204static void apple_dart_hw_disable_dma(struct apple_dart_stream_map *stream_map)
    205{
    206	int sid;
    207
    208	for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
    209		writel(0, stream_map->dart->regs + DART_TCR(sid));
    210}
    211
    212static void
    213apple_dart_hw_enable_bypass(struct apple_dart_stream_map *stream_map)
    214{
    215	int sid;
    216
    217	WARN_ON(!stream_map->dart->supports_bypass);
    218	for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
    219		writel(DART_TCR_BYPASS0_ENABLE | DART_TCR_BYPASS1_ENABLE,
    220		       stream_map->dart->regs + DART_TCR(sid));
    221}
    222
    223static void apple_dart_hw_set_ttbr(struct apple_dart_stream_map *stream_map,
    224				   u8 idx, phys_addr_t paddr)
    225{
    226	int sid;
    227
    228	WARN_ON(paddr & ((1 << DART_TTBR_SHIFT) - 1));
    229	for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
    230		writel(DART_TTBR_VALID | (paddr >> DART_TTBR_SHIFT),
    231		       stream_map->dart->regs + DART_TTBR(sid, idx));
    232}
    233
    234static void apple_dart_hw_clear_ttbr(struct apple_dart_stream_map *stream_map,
    235				     u8 idx)
    236{
    237	int sid;
    238
    239	for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
    240		writel(0, stream_map->dart->regs + DART_TTBR(sid, idx));
    241}
    242
    243static void
    244apple_dart_hw_clear_all_ttbrs(struct apple_dart_stream_map *stream_map)
    245{
    246	int i;
    247
    248	for (i = 0; i < DART_MAX_TTBR; ++i)
    249		apple_dart_hw_clear_ttbr(stream_map, i);
    250}
    251
    252static int
    253apple_dart_hw_stream_command(struct apple_dart_stream_map *stream_map,
    254			     u32 command)
    255{
    256	unsigned long flags;
    257	int ret;
    258	u32 command_reg;
    259
    260	spin_lock_irqsave(&stream_map->dart->lock, flags);
    261
    262	writel(stream_map->sidmap, stream_map->dart->regs + DART_STREAM_SELECT);
    263	writel(command, stream_map->dart->regs + DART_STREAM_COMMAND);
    264
    265	ret = readl_poll_timeout_atomic(
    266		stream_map->dart->regs + DART_STREAM_COMMAND, command_reg,
    267		!(command_reg & DART_STREAM_COMMAND_BUSY), 1,
    268		DART_STREAM_COMMAND_BUSY_TIMEOUT);
    269
    270	spin_unlock_irqrestore(&stream_map->dart->lock, flags);
    271
    272	if (ret) {
    273		dev_err(stream_map->dart->dev,
    274			"busy bit did not clear after command %x for streams %lx\n",
    275			command, stream_map->sidmap);
    276		return ret;
    277	}
    278
    279	return 0;
    280}
    281
    282static int
    283apple_dart_hw_invalidate_tlb(struct apple_dart_stream_map *stream_map)
    284{
    285	return apple_dart_hw_stream_command(stream_map,
    286					    DART_STREAM_COMMAND_INVALIDATE);
    287}
    288
    289static int apple_dart_hw_reset(struct apple_dart *dart)
    290{
    291	u32 config;
    292	struct apple_dart_stream_map stream_map;
    293
    294	config = readl(dart->regs + DART_CONFIG);
    295	if (config & DART_CONFIG_LOCK) {
    296		dev_err(dart->dev, "DART is locked down until reboot: %08x\n",
    297			config);
    298		return -EINVAL;
    299	}
    300
    301	stream_map.dart = dart;
    302	stream_map.sidmap = DART_STREAM_ALL;
    303	apple_dart_hw_disable_dma(&stream_map);
    304	apple_dart_hw_clear_all_ttbrs(&stream_map);
    305
    306	/* enable all streams globally since TCR is used to control isolation */
    307	writel(DART_STREAM_ALL, dart->regs + DART_STREAMS_ENABLE);
    308
    309	/* clear any pending errors before the interrupt is unmasked */
    310	writel(readl(dart->regs + DART_ERROR), dart->regs + DART_ERROR);
    311
    312	return apple_dart_hw_invalidate_tlb(&stream_map);
    313}
    314
    315static void apple_dart_domain_flush_tlb(struct apple_dart_domain *domain)
    316{
    317	int i;
    318	struct apple_dart_atomic_stream_map *domain_stream_map;
    319	struct apple_dart_stream_map stream_map;
    320
    321	for_each_stream_map(i, domain, domain_stream_map) {
    322		stream_map.dart = domain_stream_map->dart;
    323		stream_map.sidmap = atomic64_read(&domain_stream_map->sidmap);
    324		apple_dart_hw_invalidate_tlb(&stream_map);
    325	}
    326}
    327
    328static void apple_dart_flush_iotlb_all(struct iommu_domain *domain)
    329{
    330	apple_dart_domain_flush_tlb(to_dart_domain(domain));
    331}
    332
    333static void apple_dart_iotlb_sync(struct iommu_domain *domain,
    334				  struct iommu_iotlb_gather *gather)
    335{
    336	apple_dart_domain_flush_tlb(to_dart_domain(domain));
    337}
    338
    339static void apple_dart_iotlb_sync_map(struct iommu_domain *domain,
    340				      unsigned long iova, size_t size)
    341{
    342	apple_dart_domain_flush_tlb(to_dart_domain(domain));
    343}
    344
    345static phys_addr_t apple_dart_iova_to_phys(struct iommu_domain *domain,
    346					   dma_addr_t iova)
    347{
    348	struct apple_dart_domain *dart_domain = to_dart_domain(domain);
    349	struct io_pgtable_ops *ops = dart_domain->pgtbl_ops;
    350
    351	if (!ops)
    352		return 0;
    353
    354	return ops->iova_to_phys(ops, iova);
    355}
    356
    357static int apple_dart_map_pages(struct iommu_domain *domain, unsigned long iova,
    358				phys_addr_t paddr, size_t pgsize,
    359				size_t pgcount, int prot, gfp_t gfp,
    360				size_t *mapped)
    361{
    362	struct apple_dart_domain *dart_domain = to_dart_domain(domain);
    363	struct io_pgtable_ops *ops = dart_domain->pgtbl_ops;
    364
    365	if (!ops)
    366		return -ENODEV;
    367
    368	return ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp,
    369			      mapped);
    370}
    371
    372static size_t apple_dart_unmap_pages(struct iommu_domain *domain,
    373				     unsigned long iova, size_t pgsize,
    374				     size_t pgcount,
    375				     struct iommu_iotlb_gather *gather)
    376{
    377	struct apple_dart_domain *dart_domain = to_dart_domain(domain);
    378	struct io_pgtable_ops *ops = dart_domain->pgtbl_ops;
    379
    380	return ops->unmap_pages(ops, iova, pgsize, pgcount, gather);
    381}
    382
    383static void
    384apple_dart_setup_translation(struct apple_dart_domain *domain,
    385			     struct apple_dart_stream_map *stream_map)
    386{
    387	int i;
    388	struct io_pgtable_cfg *pgtbl_cfg =
    389		&io_pgtable_ops_to_pgtable(domain->pgtbl_ops)->cfg;
    390
    391	for (i = 0; i < pgtbl_cfg->apple_dart_cfg.n_ttbrs; ++i)
    392		apple_dart_hw_set_ttbr(stream_map, i,
    393				       pgtbl_cfg->apple_dart_cfg.ttbr[i]);
    394	for (; i < DART_MAX_TTBR; ++i)
    395		apple_dart_hw_clear_ttbr(stream_map, i);
    396
    397	apple_dart_hw_enable_translation(stream_map);
    398	apple_dart_hw_invalidate_tlb(stream_map);
    399}
    400
    401static int apple_dart_finalize_domain(struct iommu_domain *domain,
    402				      struct apple_dart_master_cfg *cfg)
    403{
    404	struct apple_dart_domain *dart_domain = to_dart_domain(domain);
    405	struct apple_dart *dart = cfg->stream_maps[0].dart;
    406	struct io_pgtable_cfg pgtbl_cfg;
    407	int ret = 0;
    408	int i;
    409
    410	mutex_lock(&dart_domain->init_lock);
    411
    412	if (dart_domain->finalized)
    413		goto done;
    414
    415	for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
    416		dart_domain->stream_maps[i].dart = cfg->stream_maps[i].dart;
    417		atomic64_set(&dart_domain->stream_maps[i].sidmap,
    418			     cfg->stream_maps[i].sidmap);
    419	}
    420
    421	pgtbl_cfg = (struct io_pgtable_cfg){
    422		.pgsize_bitmap = dart->pgsize,
    423		.ias = 32,
    424		.oas = 36,
    425		.coherent_walk = 1,
    426		.iommu_dev = dart->dev,
    427	};
    428
    429	dart_domain->pgtbl_ops =
    430		alloc_io_pgtable_ops(APPLE_DART, &pgtbl_cfg, domain);
    431	if (!dart_domain->pgtbl_ops) {
    432		ret = -ENOMEM;
    433		goto done;
    434	}
    435
    436	domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
    437	domain->geometry.aperture_start = 0;
    438	domain->geometry.aperture_end = DMA_BIT_MASK(32);
    439	domain->geometry.force_aperture = true;
    440
    441	dart_domain->finalized = true;
    442
    443done:
    444	mutex_unlock(&dart_domain->init_lock);
    445	return ret;
    446}
    447
    448static int
    449apple_dart_mod_streams(struct apple_dart_atomic_stream_map *domain_maps,
    450		       struct apple_dart_stream_map *master_maps,
    451		       bool add_streams)
    452{
    453	int i;
    454
    455	for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
    456		if (domain_maps[i].dart != master_maps[i].dart)
    457			return -EINVAL;
    458	}
    459
    460	for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
    461		if (!domain_maps[i].dart)
    462			break;
    463		if (add_streams)
    464			atomic64_or(master_maps[i].sidmap,
    465				    &domain_maps[i].sidmap);
    466		else
    467			atomic64_and(~master_maps[i].sidmap,
    468				     &domain_maps[i].sidmap);
    469	}
    470
    471	return 0;
    472}
    473
    474static int apple_dart_domain_add_streams(struct apple_dart_domain *domain,
    475					 struct apple_dart_master_cfg *cfg)
    476{
    477	return apple_dart_mod_streams(domain->stream_maps, cfg->stream_maps,
    478				      true);
    479}
    480
    481static int apple_dart_domain_remove_streams(struct apple_dart_domain *domain,
    482					    struct apple_dart_master_cfg *cfg)
    483{
    484	return apple_dart_mod_streams(domain->stream_maps, cfg->stream_maps,
    485				      false);
    486}
    487
    488static int apple_dart_attach_dev(struct iommu_domain *domain,
    489				 struct device *dev)
    490{
    491	int ret, i;
    492	struct apple_dart_stream_map *stream_map;
    493	struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
    494	struct apple_dart_domain *dart_domain = to_dart_domain(domain);
    495
    496	if (cfg->stream_maps[0].dart->force_bypass &&
    497	    domain->type != IOMMU_DOMAIN_IDENTITY)
    498		return -EINVAL;
    499	if (!cfg->stream_maps[0].dart->supports_bypass &&
    500	    domain->type == IOMMU_DOMAIN_IDENTITY)
    501		return -EINVAL;
    502
    503	ret = apple_dart_finalize_domain(domain, cfg);
    504	if (ret)
    505		return ret;
    506
    507	switch (domain->type) {
    508	case IOMMU_DOMAIN_DMA:
    509	case IOMMU_DOMAIN_UNMANAGED:
    510		ret = apple_dart_domain_add_streams(dart_domain, cfg);
    511		if (ret)
    512			return ret;
    513
    514		for_each_stream_map(i, cfg, stream_map)
    515			apple_dart_setup_translation(dart_domain, stream_map);
    516		break;
    517	case IOMMU_DOMAIN_BLOCKED:
    518		for_each_stream_map(i, cfg, stream_map)
    519			apple_dart_hw_disable_dma(stream_map);
    520		break;
    521	case IOMMU_DOMAIN_IDENTITY:
    522		for_each_stream_map(i, cfg, stream_map)
    523			apple_dart_hw_enable_bypass(stream_map);
    524		break;
    525	}
    526
    527	return ret;
    528}
    529
    530static void apple_dart_detach_dev(struct iommu_domain *domain,
    531				  struct device *dev)
    532{
    533	int i;
    534	struct apple_dart_stream_map *stream_map;
    535	struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
    536	struct apple_dart_domain *dart_domain = to_dart_domain(domain);
    537
    538	for_each_stream_map(i, cfg, stream_map)
    539		apple_dart_hw_disable_dma(stream_map);
    540
    541	if (domain->type == IOMMU_DOMAIN_DMA ||
    542	    domain->type == IOMMU_DOMAIN_UNMANAGED)
    543		apple_dart_domain_remove_streams(dart_domain, cfg);
    544}
    545
    546static struct iommu_device *apple_dart_probe_device(struct device *dev)
    547{
    548	struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
    549	struct apple_dart_stream_map *stream_map;
    550	int i;
    551
    552	if (!cfg)
    553		return ERR_PTR(-ENODEV);
    554
    555	for_each_stream_map(i, cfg, stream_map)
    556		device_link_add(
    557			dev, stream_map->dart->dev,
    558			DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
    559
    560	return &cfg->stream_maps[0].dart->iommu;
    561}
    562
    563static void apple_dart_release_device(struct device *dev)
    564{
    565	struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
    566
    567	if (!cfg)
    568		return;
    569
    570	dev_iommu_priv_set(dev, NULL);
    571	kfree(cfg);
    572}
    573
    574static struct iommu_domain *apple_dart_domain_alloc(unsigned int type)
    575{
    576	struct apple_dart_domain *dart_domain;
    577
    578	if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED &&
    579	    type != IOMMU_DOMAIN_IDENTITY && type != IOMMU_DOMAIN_BLOCKED)
    580		return NULL;
    581
    582	dart_domain = kzalloc(sizeof(*dart_domain), GFP_KERNEL);
    583	if (!dart_domain)
    584		return NULL;
    585
    586	mutex_init(&dart_domain->init_lock);
    587
    588	/* no need to allocate pgtbl_ops or do any other finalization steps */
    589	if (type == IOMMU_DOMAIN_IDENTITY || type == IOMMU_DOMAIN_BLOCKED)
    590		dart_domain->finalized = true;
    591
    592	return &dart_domain->domain;
    593}
    594
    595static void apple_dart_domain_free(struct iommu_domain *domain)
    596{
    597	struct apple_dart_domain *dart_domain = to_dart_domain(domain);
    598
    599	if (dart_domain->pgtbl_ops)
    600		free_io_pgtable_ops(dart_domain->pgtbl_ops);
    601
    602	kfree(dart_domain);
    603}
    604
    605static int apple_dart_of_xlate(struct device *dev, struct of_phandle_args *args)
    606{
    607	struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
    608	struct platform_device *iommu_pdev = of_find_device_by_node(args->np);
    609	struct apple_dart *dart = platform_get_drvdata(iommu_pdev);
    610	struct apple_dart *cfg_dart;
    611	int i, sid;
    612
    613	if (args->args_count != 1)
    614		return -EINVAL;
    615	sid = args->args[0];
    616
    617	if (!cfg)
    618		cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
    619	if (!cfg)
    620		return -ENOMEM;
    621	dev_iommu_priv_set(dev, cfg);
    622
    623	cfg_dart = cfg->stream_maps[0].dart;
    624	if (cfg_dart) {
    625		if (cfg_dart->supports_bypass != dart->supports_bypass)
    626			return -EINVAL;
    627		if (cfg_dart->force_bypass != dart->force_bypass)
    628			return -EINVAL;
    629		if (cfg_dart->pgsize != dart->pgsize)
    630			return -EINVAL;
    631	}
    632
    633	for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
    634		if (cfg->stream_maps[i].dart == dart) {
    635			cfg->stream_maps[i].sidmap |= 1 << sid;
    636			return 0;
    637		}
    638	}
    639	for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
    640		if (!cfg->stream_maps[i].dart) {
    641			cfg->stream_maps[i].dart = dart;
    642			cfg->stream_maps[i].sidmap = 1 << sid;
    643			return 0;
    644		}
    645	}
    646
    647	return -EINVAL;
    648}
    649
    650static DEFINE_MUTEX(apple_dart_groups_lock);
    651
    652static void apple_dart_release_group(void *iommu_data)
    653{
    654	int i, sid;
    655	struct apple_dart_stream_map *stream_map;
    656	struct apple_dart_master_cfg *group_master_cfg = iommu_data;
    657
    658	mutex_lock(&apple_dart_groups_lock);
    659
    660	for_each_stream_map(i, group_master_cfg, stream_map)
    661		for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
    662			stream_map->dart->sid2group[sid] = NULL;
    663
    664	kfree(iommu_data);
    665	mutex_unlock(&apple_dart_groups_lock);
    666}
    667
    668static struct iommu_group *apple_dart_device_group(struct device *dev)
    669{
    670	int i, sid;
    671	struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
    672	struct apple_dart_stream_map *stream_map;
    673	struct apple_dart_master_cfg *group_master_cfg;
    674	struct iommu_group *group = NULL;
    675	struct iommu_group *res = ERR_PTR(-EINVAL);
    676
    677	mutex_lock(&apple_dart_groups_lock);
    678
    679	for_each_stream_map(i, cfg, stream_map) {
    680		for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS) {
    681			struct iommu_group *stream_group =
    682				stream_map->dart->sid2group[sid];
    683
    684			if (group && group != stream_group) {
    685				res = ERR_PTR(-EINVAL);
    686				goto out;
    687			}
    688
    689			group = stream_group;
    690		}
    691	}
    692
    693	if (group) {
    694		res = iommu_group_ref_get(group);
    695		goto out;
    696	}
    697
    698#ifdef CONFIG_PCI
    699	if (dev_is_pci(dev))
    700		group = pci_device_group(dev);
    701	else
    702#endif
    703		group = generic_device_group(dev);
    704
    705	res = ERR_PTR(-ENOMEM);
    706	if (!group)
    707		goto out;
    708
    709	group_master_cfg = kmemdup(cfg, sizeof(*group_master_cfg), GFP_KERNEL);
    710	if (!group_master_cfg) {
    711		iommu_group_put(group);
    712		goto out;
    713	}
    714
    715	iommu_group_set_iommudata(group, group_master_cfg,
    716		apple_dart_release_group);
    717
    718	for_each_stream_map(i, cfg, stream_map)
    719		for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
    720			stream_map->dart->sid2group[sid] = group;
    721
    722	res = group;
    723
    724out:
    725	mutex_unlock(&apple_dart_groups_lock);
    726	return res;
    727}
    728
    729static int apple_dart_def_domain_type(struct device *dev)
    730{
    731	struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
    732
    733	if (cfg->stream_maps[0].dart->force_bypass)
    734		return IOMMU_DOMAIN_IDENTITY;
    735	if (!cfg->stream_maps[0].dart->supports_bypass)
    736		return IOMMU_DOMAIN_DMA;
    737
    738	return 0;
    739}
    740
    741#ifndef CONFIG_PCIE_APPLE_MSI_DOORBELL_ADDR
    742/* Keep things compiling when CONFIG_PCI_APPLE isn't selected */
    743#define CONFIG_PCIE_APPLE_MSI_DOORBELL_ADDR	0
    744#endif
    745#define DOORBELL_ADDR	(CONFIG_PCIE_APPLE_MSI_DOORBELL_ADDR & PAGE_MASK)
    746
    747static void apple_dart_get_resv_regions(struct device *dev,
    748					struct list_head *head)
    749{
    750	if (IS_ENABLED(CONFIG_PCIE_APPLE) && dev_is_pci(dev)) {
    751		struct iommu_resv_region *region;
    752		int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
    753
    754		region = iommu_alloc_resv_region(DOORBELL_ADDR,
    755						 PAGE_SIZE, prot,
    756						 IOMMU_RESV_MSI);
    757		if (!region)
    758			return;
    759
    760		list_add_tail(&region->list, head);
    761	}
    762
    763	iommu_dma_get_resv_regions(dev, head);
    764}
    765
    766static const struct iommu_ops apple_dart_iommu_ops = {
    767	.domain_alloc = apple_dart_domain_alloc,
    768	.probe_device = apple_dart_probe_device,
    769	.release_device = apple_dart_release_device,
    770	.device_group = apple_dart_device_group,
    771	.of_xlate = apple_dart_of_xlate,
    772	.def_domain_type = apple_dart_def_domain_type,
    773	.get_resv_regions = apple_dart_get_resv_regions,
    774	.put_resv_regions = generic_iommu_put_resv_regions,
    775	.pgsize_bitmap = -1UL, /* Restricted during dart probe */
    776	.owner = THIS_MODULE,
    777	.default_domain_ops = &(const struct iommu_domain_ops) {
    778		.attach_dev	= apple_dart_attach_dev,
    779		.detach_dev	= apple_dart_detach_dev,
    780		.map_pages	= apple_dart_map_pages,
    781		.unmap_pages	= apple_dart_unmap_pages,
    782		.flush_iotlb_all = apple_dart_flush_iotlb_all,
    783		.iotlb_sync	= apple_dart_iotlb_sync,
    784		.iotlb_sync_map	= apple_dart_iotlb_sync_map,
    785		.iova_to_phys	= apple_dart_iova_to_phys,
    786		.free		= apple_dart_domain_free,
    787	}
    788};
    789
    790static irqreturn_t apple_dart_irq(int irq, void *dev)
    791{
    792	struct apple_dart *dart = dev;
    793	const char *fault_name = NULL;
    794	u32 error = readl(dart->regs + DART_ERROR);
    795	u32 error_code = FIELD_GET(DART_ERROR_CODE, error);
    796	u32 addr_lo = readl(dart->regs + DART_ERROR_ADDR_LO);
    797	u32 addr_hi = readl(dart->regs + DART_ERROR_ADDR_HI);
    798	u64 addr = addr_lo | (((u64)addr_hi) << 32);
    799	u8 stream_idx = FIELD_GET(DART_ERROR_STREAM, error);
    800
    801	if (!(error & DART_ERROR_FLAG))
    802		return IRQ_NONE;
    803
    804	/* there should only be a single bit set but let's use == to be sure */
    805	if (error_code == DART_ERROR_READ_FAULT)
    806		fault_name = "READ FAULT";
    807	else if (error_code == DART_ERROR_WRITE_FAULT)
    808		fault_name = "WRITE FAULT";
    809	else if (error_code == DART_ERROR_NO_PTE)
    810		fault_name = "NO PTE FOR IOVA";
    811	else if (error_code == DART_ERROR_NO_PMD)
    812		fault_name = "NO PMD FOR IOVA";
    813	else if (error_code == DART_ERROR_NO_TTBR)
    814		fault_name = "NO TTBR FOR IOVA";
    815	else
    816		fault_name = "unknown";
    817
    818	dev_err_ratelimited(
    819		dart->dev,
    820		"translation fault: status:0x%x stream:%d code:0x%x (%s) at 0x%llx",
    821		error, stream_idx, error_code, fault_name, addr);
    822
    823	writel(error, dart->regs + DART_ERROR);
    824	return IRQ_HANDLED;
    825}
    826
    827static int apple_dart_set_bus_ops(const struct iommu_ops *ops)
    828{
    829	int ret;
    830
    831	if (!iommu_present(&platform_bus_type)) {
    832		ret = bus_set_iommu(&platform_bus_type, ops);
    833		if (ret)
    834			return ret;
    835	}
    836#ifdef CONFIG_PCI
    837	if (!iommu_present(&pci_bus_type)) {
    838		ret = bus_set_iommu(&pci_bus_type, ops);
    839		if (ret) {
    840			bus_set_iommu(&platform_bus_type, NULL);
    841			return ret;
    842		}
    843	}
    844#endif
    845	return 0;
    846}
    847
    848static int apple_dart_probe(struct platform_device *pdev)
    849{
    850	int ret;
    851	u32 dart_params[2];
    852	struct resource *res;
    853	struct apple_dart *dart;
    854	struct device *dev = &pdev->dev;
    855
    856	dart = devm_kzalloc(dev, sizeof(*dart), GFP_KERNEL);
    857	if (!dart)
    858		return -ENOMEM;
    859
    860	dart->dev = dev;
    861	spin_lock_init(&dart->lock);
    862
    863	dart->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
    864	if (IS_ERR(dart->regs))
    865		return PTR_ERR(dart->regs);
    866
    867	if (resource_size(res) < 0x4000) {
    868		dev_err(dev, "MMIO region too small (%pr)\n", res);
    869		return -EINVAL;
    870	}
    871
    872	dart->irq = platform_get_irq(pdev, 0);
    873	if (dart->irq < 0)
    874		return -ENODEV;
    875
    876	ret = devm_clk_bulk_get_all(dev, &dart->clks);
    877	if (ret < 0)
    878		return ret;
    879	dart->num_clks = ret;
    880
    881	ret = clk_bulk_prepare_enable(dart->num_clks, dart->clks);
    882	if (ret)
    883		return ret;
    884
    885	ret = apple_dart_hw_reset(dart);
    886	if (ret)
    887		goto err_clk_disable;
    888
    889	dart_params[0] = readl(dart->regs + DART_PARAMS1);
    890	dart_params[1] = readl(dart->regs + DART_PARAMS2);
    891	dart->pgsize = 1 << FIELD_GET(DART_PARAMS_PAGE_SHIFT, dart_params[0]);
    892	dart->supports_bypass = dart_params[1] & DART_PARAMS_BYPASS_SUPPORT;
    893	dart->force_bypass = dart->pgsize > PAGE_SIZE;
    894
    895	ret = request_irq(dart->irq, apple_dart_irq, IRQF_SHARED,
    896			  "apple-dart fault handler", dart);
    897	if (ret)
    898		goto err_clk_disable;
    899
    900	platform_set_drvdata(pdev, dart);
    901
    902	ret = apple_dart_set_bus_ops(&apple_dart_iommu_ops);
    903	if (ret)
    904		goto err_free_irq;
    905
    906	ret = iommu_device_sysfs_add(&dart->iommu, dev, NULL, "apple-dart.%s",
    907				     dev_name(&pdev->dev));
    908	if (ret)
    909		goto err_remove_bus_ops;
    910
    911	ret = iommu_device_register(&dart->iommu, &apple_dart_iommu_ops, dev);
    912	if (ret)
    913		goto err_sysfs_remove;
    914
    915	dev_info(
    916		&pdev->dev,
    917		"DART [pagesize %x, bypass support: %d, bypass forced: %d] initialized\n",
    918		dart->pgsize, dart->supports_bypass, dart->force_bypass);
    919	return 0;
    920
    921err_sysfs_remove:
    922	iommu_device_sysfs_remove(&dart->iommu);
    923err_remove_bus_ops:
    924	apple_dart_set_bus_ops(NULL);
    925err_free_irq:
    926	free_irq(dart->irq, dart);
    927err_clk_disable:
    928	clk_bulk_disable_unprepare(dart->num_clks, dart->clks);
    929
    930	return ret;
    931}
    932
    933static int apple_dart_remove(struct platform_device *pdev)
    934{
    935	struct apple_dart *dart = platform_get_drvdata(pdev);
    936
    937	apple_dart_hw_reset(dart);
    938	free_irq(dart->irq, dart);
    939	apple_dart_set_bus_ops(NULL);
    940
    941	iommu_device_unregister(&dart->iommu);
    942	iommu_device_sysfs_remove(&dart->iommu);
    943
    944	clk_bulk_disable_unprepare(dart->num_clks, dart->clks);
    945
    946	return 0;
    947}
    948
    949static const struct of_device_id apple_dart_of_match[] = {
    950	{ .compatible = "apple,t8103-dart", .data = NULL },
    951	{},
    952};
    953MODULE_DEVICE_TABLE(of, apple_dart_of_match);
    954
    955static struct platform_driver apple_dart_driver = {
    956	.driver	= {
    957		.name			= "apple-dart",
    958		.of_match_table		= apple_dart_of_match,
    959		.suppress_bind_attrs    = true,
    960	},
    961	.probe	= apple_dart_probe,
    962	.remove	= apple_dart_remove,
    963};
    964
    965module_platform_driver(apple_dart_driver);
    966
    967MODULE_DESCRIPTION("IOMMU API for Apple's DART");
    968MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
    969MODULE_LICENSE("GPL v2");