cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

tegra.c (9064B)


      1/*
      2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
     20 * DEALINGS IN THE SOFTWARE.
     21 */
     22#include <core/tegra.h>
     23#ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
     24#include "priv.h"
     25
     26#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
     27#include <asm/dma-iommu.h>
     28#endif
     29
     30static int
     31nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
     32{
     33	int ret;
     34
     35	if (tdev->vdd) {
     36		ret = regulator_enable(tdev->vdd);
     37		if (ret)
     38			goto err_power;
     39	}
     40
     41	ret = clk_prepare_enable(tdev->clk);
     42	if (ret)
     43		goto err_clk;
     44	if (tdev->clk_ref) {
     45		ret = clk_prepare_enable(tdev->clk_ref);
     46		if (ret)
     47			goto err_clk_ref;
     48	}
     49	ret = clk_prepare_enable(tdev->clk_pwr);
     50	if (ret)
     51		goto err_clk_pwr;
     52	clk_set_rate(tdev->clk_pwr, 204000000);
     53	udelay(10);
     54
     55	if (!tdev->pdev->dev.pm_domain) {
     56		reset_control_assert(tdev->rst);
     57		udelay(10);
     58
     59		ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D);
     60		if (ret)
     61			goto err_clamp;
     62		udelay(10);
     63
     64		reset_control_deassert(tdev->rst);
     65		udelay(10);
     66	}
     67
     68	return 0;
     69
     70err_clamp:
     71	clk_disable_unprepare(tdev->clk_pwr);
     72err_clk_pwr:
     73	if (tdev->clk_ref)
     74		clk_disable_unprepare(tdev->clk_ref);
     75err_clk_ref:
     76	clk_disable_unprepare(tdev->clk);
     77err_clk:
     78	if (tdev->vdd)
     79		regulator_disable(tdev->vdd);
     80err_power:
     81	return ret;
     82}
     83
     84static int
     85nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev)
     86{
     87	int ret;
     88
     89	clk_disable_unprepare(tdev->clk_pwr);
     90	if (tdev->clk_ref)
     91		clk_disable_unprepare(tdev->clk_ref);
     92	clk_disable_unprepare(tdev->clk);
     93	udelay(10);
     94
     95	if (tdev->vdd) {
     96		ret = regulator_disable(tdev->vdd);
     97		if (ret)
     98			return ret;
     99	}
    100
    101	return 0;
    102}
    103
    104static void
    105nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
    106{
    107#if IS_ENABLED(CONFIG_IOMMU_API)
    108	struct device *dev = &tdev->pdev->dev;
    109	unsigned long pgsize_bitmap;
    110	int ret;
    111
    112#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
    113	if (dev->archdata.mapping) {
    114		struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
    115
    116		arm_iommu_detach_device(dev);
    117		arm_iommu_release_mapping(mapping);
    118	}
    119#endif
    120
    121	if (!tdev->func->iommu_bit)
    122		return;
    123
    124	mutex_init(&tdev->iommu.mutex);
    125
    126	if (device_iommu_mapped(dev)) {
    127		tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
    128		if (!tdev->iommu.domain)
    129			goto error;
    130
    131		/*
    132		 * A IOMMU is only usable if it supports page sizes smaller
    133		 * or equal to the system's PAGE_SIZE, with a preference if
    134		 * both are equal.
    135		 */
    136		pgsize_bitmap = tdev->iommu.domain->pgsize_bitmap;
    137		if (pgsize_bitmap & PAGE_SIZE) {
    138			tdev->iommu.pgshift = PAGE_SHIFT;
    139		} else {
    140			tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK);
    141			if (tdev->iommu.pgshift == 0) {
    142				dev_warn(dev, "unsupported IOMMU page size\n");
    143				goto free_domain;
    144			}
    145			tdev->iommu.pgshift -= 1;
    146		}
    147
    148		ret = iommu_attach_device(tdev->iommu.domain, dev);
    149		if (ret)
    150			goto free_domain;
    151
    152		ret = nvkm_mm_init(&tdev->iommu.mm, 0, 0,
    153				   (1ULL << tdev->func->iommu_bit) >>
    154				   tdev->iommu.pgshift, 1);
    155		if (ret)
    156			goto detach_device;
    157	}
    158
    159	return;
    160
    161detach_device:
    162	iommu_detach_device(tdev->iommu.domain, dev);
    163
    164free_domain:
    165	iommu_domain_free(tdev->iommu.domain);
    166
    167error:
    168	tdev->iommu.domain = NULL;
    169	tdev->iommu.pgshift = 0;
    170	dev_err(dev, "cannot initialize IOMMU MM\n");
    171#endif
    172}
    173
    174static void
    175nvkm_device_tegra_remove_iommu(struct nvkm_device_tegra *tdev)
    176{
    177#if IS_ENABLED(CONFIG_IOMMU_API)
    178	if (tdev->iommu.domain) {
    179		nvkm_mm_fini(&tdev->iommu.mm);
    180		iommu_detach_device(tdev->iommu.domain, tdev->device.dev);
    181		iommu_domain_free(tdev->iommu.domain);
    182	}
    183#endif
    184}
    185
    186static struct nvkm_device_tegra *
    187nvkm_device_tegra(struct nvkm_device *device)
    188{
    189	return container_of(device, struct nvkm_device_tegra, device);
    190}
    191
    192static struct resource *
    193nvkm_device_tegra_resource(struct nvkm_device *device, unsigned bar)
    194{
    195	struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
    196	return platform_get_resource(tdev->pdev, IORESOURCE_MEM, bar);
    197}
    198
    199static resource_size_t
    200nvkm_device_tegra_resource_addr(struct nvkm_device *device, unsigned bar)
    201{
    202	struct resource *res = nvkm_device_tegra_resource(device, bar);
    203	return res ? res->start : 0;
    204}
    205
    206static resource_size_t
    207nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar)
    208{
    209	struct resource *res = nvkm_device_tegra_resource(device, bar);
    210	return res ? resource_size(res) : 0;
    211}
    212
    213static irqreturn_t
    214nvkm_device_tegra_intr(int irq, void *arg)
    215{
    216	struct nvkm_device_tegra *tdev = arg;
    217	struct nvkm_device *device = &tdev->device;
    218	bool handled = false;
    219	nvkm_mc_intr_unarm(device);
    220	nvkm_mc_intr(device, &handled);
    221	nvkm_mc_intr_rearm(device);
    222	return handled ? IRQ_HANDLED : IRQ_NONE;
    223}
    224
    225static void
    226nvkm_device_tegra_fini(struct nvkm_device *device, bool suspend)
    227{
    228	struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
    229	if (tdev->irq) {
    230		free_irq(tdev->irq, tdev);
    231		tdev->irq = 0;
    232	}
    233}
    234
    235static int
    236nvkm_device_tegra_init(struct nvkm_device *device)
    237{
    238	struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
    239	int irq, ret;
    240
    241	irq = platform_get_irq_byname(tdev->pdev, "stall");
    242	if (irq < 0)
    243		return irq;
    244
    245	ret = request_irq(irq, nvkm_device_tegra_intr,
    246			  IRQF_SHARED, "nvkm", tdev);
    247	if (ret)
    248		return ret;
    249
    250	tdev->irq = irq;
    251	return 0;
    252}
    253
    254static void *
    255nvkm_device_tegra_dtor(struct nvkm_device *device)
    256{
    257	struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
    258	nvkm_device_tegra_power_down(tdev);
    259	nvkm_device_tegra_remove_iommu(tdev);
    260	return tdev;
    261}
    262
    263static const struct nvkm_device_func
    264nvkm_device_tegra_func = {
    265	.tegra = nvkm_device_tegra,
    266	.dtor = nvkm_device_tegra_dtor,
    267	.init = nvkm_device_tegra_init,
    268	.fini = nvkm_device_tegra_fini,
    269	.resource_addr = nvkm_device_tegra_resource_addr,
    270	.resource_size = nvkm_device_tegra_resource_size,
    271	.cpu_coherent = false,
    272};
    273
    274int
    275nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
    276		      struct platform_device *pdev,
    277		      const char *cfg, const char *dbg,
    278		      bool detect, bool mmio, u64 subdev_mask,
    279		      struct nvkm_device **pdevice)
    280{
    281	struct nvkm_device_tegra *tdev;
    282	unsigned long rate;
    283	int ret;
    284
    285	if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
    286		return -ENOMEM;
    287
    288	tdev->func = func;
    289	tdev->pdev = pdev;
    290
    291	if (func->require_vdd) {
    292		tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
    293		if (IS_ERR(tdev->vdd)) {
    294			ret = PTR_ERR(tdev->vdd);
    295			goto free;
    296		}
    297	}
    298
    299	tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
    300	if (IS_ERR(tdev->rst)) {
    301		ret = PTR_ERR(tdev->rst);
    302		goto free;
    303	}
    304
    305	tdev->clk = devm_clk_get(&pdev->dev, "gpu");
    306	if (IS_ERR(tdev->clk)) {
    307		ret = PTR_ERR(tdev->clk);
    308		goto free;
    309	}
    310
    311	rate = clk_get_rate(tdev->clk);
    312	if (rate == 0) {
    313		ret = clk_set_rate(tdev->clk, ULONG_MAX);
    314		if (ret < 0)
    315			goto free;
    316
    317		rate = clk_get_rate(tdev->clk);
    318
    319		dev_dbg(&pdev->dev, "GPU clock set to %lu\n", rate);
    320	}
    321
    322	if (func->require_ref_clk)
    323		tdev->clk_ref = devm_clk_get(&pdev->dev, "ref");
    324	if (IS_ERR(tdev->clk_ref)) {
    325		ret = PTR_ERR(tdev->clk_ref);
    326		goto free;
    327	}
    328
    329	tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
    330	if (IS_ERR(tdev->clk_pwr)) {
    331		ret = PTR_ERR(tdev->clk_pwr);
    332		goto free;
    333	}
    334
    335	/**
    336	 * The IOMMU bit defines the upper limit of the GPU-addressable space.
    337	 */
    338	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(tdev->func->iommu_bit));
    339	if (ret)
    340		goto free;
    341
    342	nvkm_device_tegra_probe_iommu(tdev);
    343
    344	ret = nvkm_device_tegra_power_up(tdev);
    345	if (ret)
    346		goto remove;
    347
    348	tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
    349	tdev->gpu_speedo_id = tegra_sku_info.gpu_speedo_id;
    350	ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
    351			       NVKM_DEVICE_TEGRA, pdev->id, NULL,
    352			       cfg, dbg, detect, mmio, subdev_mask,
    353			       &tdev->device);
    354	if (ret)
    355		goto powerdown;
    356
    357	*pdevice = &tdev->device;
    358
    359	return 0;
    360
    361powerdown:
    362	nvkm_device_tegra_power_down(tdev);
    363remove:
    364	nvkm_device_tegra_remove_iommu(tdev);
    365free:
    366	kfree(tdev);
    367	return ret;
    368}
    369#else
    370int
    371nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
    372		      struct platform_device *pdev,
    373		      const char *cfg, const char *dbg,
    374		      bool detect, bool mmio, u64 subdev_mask,
    375		      struct nvkm_device **pdevice)
    376{
    377	return -ENOSYS;
    378}
    379#endif