cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

iommu.c (3498B)


      1// SPDX-License-Identifier: GPL-2.0+
      2/*
      3 * NVIDIA Tegra Video decoder driver
      4 *
      5 * Copyright (C) 2016-2019 GRATE-DRIVER project
      6 */
      7
      8#include <linux/iommu.h>
      9#include <linux/iova.h>
     10#include <linux/kernel.h>
     11#include <linux/platform_device.h>
     12
     13#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
     14#include <asm/dma-iommu.h>
     15#endif
     16
     17#include "vde.h"
     18
     19int tegra_vde_iommu_map(struct tegra_vde *vde,
     20			struct sg_table *sgt,
     21			struct iova **iovap,
     22			size_t size)
     23{
     24	struct iova *iova;
     25	unsigned long shift;
     26	unsigned long end;
     27	dma_addr_t addr;
     28
     29	end = vde->domain->geometry.aperture_end;
     30	size = iova_align(&vde->iova, size);
     31	shift = iova_shift(&vde->iova);
     32
     33	iova = alloc_iova(&vde->iova, size >> shift, end >> shift, true);
     34	if (!iova)
     35		return -ENOMEM;
     36
     37	addr = iova_dma_addr(&vde->iova, iova);
     38
     39	size = iommu_map_sgtable(vde->domain, addr, sgt,
     40				 IOMMU_READ | IOMMU_WRITE);
     41	if (!size) {
     42		__free_iova(&vde->iova, iova);
     43		return -ENXIO;
     44	}
     45
     46	*iovap = iova;
     47
     48	return 0;
     49}
     50
     51void tegra_vde_iommu_unmap(struct tegra_vde *vde, struct iova *iova)
     52{
     53	unsigned long shift = iova_shift(&vde->iova);
     54	unsigned long size = iova_size(iova) << shift;
     55	dma_addr_t addr = iova_dma_addr(&vde->iova, iova);
     56
     57	iommu_unmap(vde->domain, addr, size);
     58	__free_iova(&vde->iova, iova);
     59}
     60
     61int tegra_vde_iommu_init(struct tegra_vde *vde)
     62{
     63	struct device *dev = vde->dev;
     64	struct iova *iova;
     65	unsigned long order;
     66	unsigned long shift;
     67	int err;
     68
     69	vde->group = iommu_group_get(dev);
     70	if (!vde->group)
     71		return 0;
     72
     73#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
     74	if (dev->archdata.mapping) {
     75		struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
     76
     77		arm_iommu_detach_device(dev);
     78		arm_iommu_release_mapping(mapping);
     79	}
     80#endif
     81	vde->domain = iommu_domain_alloc(&platform_bus_type);
     82	if (!vde->domain) {
     83		err = -ENOMEM;
     84		goto put_group;
     85	}
     86
     87	err = iova_cache_get();
     88	if (err)
     89		goto free_domain;
     90
     91	order = __ffs(vde->domain->pgsize_bitmap);
     92	init_iova_domain(&vde->iova, 1UL << order, 0);
     93
     94	err = iommu_attach_group(vde->domain, vde->group);
     95	if (err)
     96		goto put_iova;
     97
     98	/*
     99	 * We're using some static addresses that are not accessible by VDE
    100	 * to trap invalid memory accesses.
    101	 */
    102	shift = iova_shift(&vde->iova);
    103	iova = reserve_iova(&vde->iova, 0x60000000 >> shift,
    104			    0x70000000 >> shift);
    105	if (!iova) {
    106		err = -ENOMEM;
    107		goto detach_group;
    108	}
    109
    110	vde->iova_resv_static_addresses = iova;
    111
    112	/*
    113	 * BSEV's end-address wraps around due to integer overflow during
    114	 * of hardware context preparation if IOVA is allocated at the end
    115	 * of address space and VDE can't handle that. Hence simply reserve
    116	 * the last page to avoid the problem.
    117	 */
    118	iova = reserve_iova(&vde->iova, 0xffffffff >> shift,
    119			    (0xffffffff >> shift) + 1);
    120	if (!iova) {
    121		err = -ENOMEM;
    122		goto unreserve_iova;
    123	}
    124
    125	vde->iova_resv_last_page = iova;
    126
    127	return 0;
    128
    129unreserve_iova:
    130	__free_iova(&vde->iova, vde->iova_resv_static_addresses);
    131detach_group:
    132	iommu_detach_group(vde->domain, vde->group);
    133put_iova:
    134	put_iova_domain(&vde->iova);
    135	iova_cache_put();
    136free_domain:
    137	iommu_domain_free(vde->domain);
    138put_group:
    139	iommu_group_put(vde->group);
    140
    141	return err;
    142}
    143
    144void tegra_vde_iommu_deinit(struct tegra_vde *vde)
    145{
    146	if (vde->domain) {
    147		__free_iova(&vde->iova, vde->iova_resv_last_page);
    148		__free_iova(&vde->iova, vde->iova_resv_static_addresses);
    149		iommu_detach_group(vde->domain, vde->group);
    150		put_iova_domain(&vde->iova);
    151		iova_cache_put();
    152		iommu_domain_free(vde->domain);
    153		iommu_group_put(vde->group);
    154
    155		vde->domain = NULL;
    156	}
    157}