cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

i915_mm.c (4412B)


      1/*
      2 * Copyright © 2014 Intel Corporation
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice (including the next
     12 * paragraph) shall be included in all copies or substantial portions of the
     13 * Software.
     14 *
     15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     21 * IN THE SOFTWARE.
     22 *
     23 */
     24
     25#include <linux/mm.h>
     26#include <linux/io-mapping.h>
     27
     28
     29#include "i915_drv.h"
     30#include "i915_mm.h"
     31
     32struct remap_pfn {
     33	struct mm_struct *mm;
     34	unsigned long pfn;
     35	pgprot_t prot;
     36
     37	struct sgt_iter sgt;
     38	resource_size_t iobase;
     39};
     40
     41#define use_dma(io) ((io) != -1)
     42
     43static inline unsigned long sgt_pfn(const struct remap_pfn *r)
     44{
     45	if (use_dma(r->iobase))
     46		return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT;
     47	else
     48		return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT);
     49}
     50
     51static int remap_sg(pte_t *pte, unsigned long addr, void *data)
     52{
     53	struct remap_pfn *r = data;
     54
     55	if (GEM_WARN_ON(!r->sgt.sgp))
     56		return -EINVAL;
     57
     58	/* Special PTE are not associated with any struct page */
     59	set_pte_at(r->mm, addr, pte,
     60		   pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot)));
     61	r->pfn++; /* track insertions in case we need to unwind later */
     62
     63	r->sgt.curr += PAGE_SIZE;
     64	if (r->sgt.curr >= r->sgt.max)
     65		r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase));
     66
     67	return 0;
     68}
     69
     70#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
     71
     72#if IS_ENABLED(CONFIG_X86)
     73static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
     74{
     75	struct remap_pfn *r = data;
     76
     77	/* Special PTE are not associated with any struct page */
     78	set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot)));
     79	r->pfn++;
     80
     81	return 0;
     82}
     83
     84/**
     85 * remap_io_mapping - remap an IO mapping to userspace
     86 * @vma: user vma to map to
     87 * @addr: target user address to start at
     88 * @pfn: physical address of kernel memory
     89 * @size: size of map area
     90 * @iomap: the source io_mapping
     91 *
     92 *  Note: this is only safe if the mm semaphore is held when called.
     93 */
     94int remap_io_mapping(struct vm_area_struct *vma,
     95		     unsigned long addr, unsigned long pfn, unsigned long size,
     96		     struct io_mapping *iomap)
     97{
     98	struct remap_pfn r;
     99	int err;
    100
    101	GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
    102
    103	/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
    104	r.mm = vma->vm_mm;
    105	r.pfn = pfn;
    106	r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) |
    107			  (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK));
    108
    109	err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r);
    110	if (unlikely(err)) {
    111		zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT);
    112		return err;
    113	}
    114
    115	return 0;
    116}
    117#endif
    118
    119/**
    120 * remap_io_sg - remap an IO mapping to userspace
    121 * @vma: user vma to map to
    122 * @addr: target user address to start at
    123 * @size: size of map area
    124 * @sgl: Start sg entry
    125 * @iobase: Use stored dma address offset by this address or pfn if -1
    126 *
    127 *  Note: this is only safe if the mm semaphore is held when called.
    128 */
    129int remap_io_sg(struct vm_area_struct *vma,
    130		unsigned long addr, unsigned long size,
    131		struct scatterlist *sgl, resource_size_t iobase)
    132{
    133	struct remap_pfn r = {
    134		.mm = vma->vm_mm,
    135		.prot = vma->vm_page_prot,
    136		.sgt = __sgt_iter(sgl, use_dma(iobase)),
    137		.iobase = iobase,
    138	};
    139	int err;
    140
    141	/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
    142	GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
    143
    144	if (!use_dma(iobase))
    145		flush_cache_range(vma, addr, size);
    146
    147	err = apply_to_page_range(r.mm, addr, size, remap_sg, &r);
    148	if (unlikely(err)) {
    149		zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT);
    150		return err;
    151	}
    152
    153	return 0;
    154}