cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

swiotlb.h (5779B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef __LINUX_SWIOTLB_H
      3#define __LINUX_SWIOTLB_H
      4
      5#include <linux/device.h>
      6#include <linux/dma-direction.h>
      7#include <linux/init.h>
      8#include <linux/types.h>
      9#include <linux/limits.h>
     10#include <linux/spinlock.h>
     11
     12struct device;
     13struct page;
     14struct scatterlist;
     15
     16#define SWIOTLB_VERBOSE	(1 << 0) /* verbose initialization */
     17#define SWIOTLB_FORCE	(1 << 1) /* force bounce buffering */
     18#define SWIOTLB_ANY	(1 << 2) /* allow any memory for the buffer */
     19
     20/*
     21 * Maximum allowable number of contiguous slabs to map,
     22 * must be a power of 2.  What is the appropriate value ?
     23 * The complexity of {map,unmap}_single is linearly dependent on this value.
     24 */
     25#define IO_TLB_SEGSIZE	128
     26
     27/*
     28 * log of the size of each IO TLB slab.  The number of slabs is command line
     29 * controllable.
     30 */
     31#define IO_TLB_SHIFT 11
     32#define IO_TLB_SIZE (1 << IO_TLB_SHIFT)
     33
     34/* default to 64MB */
     35#define IO_TLB_DEFAULT_SIZE (64UL<<20)
     36
     37unsigned long swiotlb_size_or_default(void);
     38void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
     39	int (*remap)(void *tlb, unsigned long nslabs));
     40int swiotlb_init_late(size_t size, gfp_t gfp_mask,
     41	int (*remap)(void *tlb, unsigned long nslabs));
     42extern void __init swiotlb_update_mem_attributes(void);
     43
     44phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
     45		size_t mapping_size, size_t alloc_size,
     46		unsigned int alloc_aligned_mask, enum dma_data_direction dir,
     47		unsigned long attrs);
     48
     49extern void swiotlb_tbl_unmap_single(struct device *hwdev,
     50				     phys_addr_t tlb_addr,
     51				     size_t mapping_size,
     52				     enum dma_data_direction dir,
     53				     unsigned long attrs);
     54
     55void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
     56		size_t size, enum dma_data_direction dir);
     57void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
     58		size_t size, enum dma_data_direction dir);
     59dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
     60		size_t size, enum dma_data_direction dir, unsigned long attrs);
     61
     62#ifdef CONFIG_SWIOTLB
     63extern enum swiotlb_force swiotlb_force;
     64
     65/**
     66 * struct io_tlb_mem - IO TLB Memory Pool Descriptor
     67 *
     68 * @start:	The start address of the swiotlb memory pool. Used to do a quick
     69 *		range check to see if the memory was in fact allocated by this
     70 *		API.
     71 * @end:	The end address of the swiotlb memory pool. Used to do a quick
     72 *		range check to see if the memory was in fact allocated by this
     73 *		API.
     74 * @vaddr:	The vaddr of the swiotlb memory pool. The swiotlb memory pool
     75 *		may be remapped in the memory encrypted case and store virtual
     76 *		address for bounce buffer operation.
     77 * @nslabs:	The number of IO TLB blocks (in groups of 64) between @start and
     78 *		@end. For default swiotlb, this is command line adjustable via
     79 *		setup_io_tlb_npages.
     80 * @used:	The number of used IO TLB block.
     81 * @list:	The free list describing the number of free entries available
     82 *		from each index.
     83 * @index:	The index to start searching in the next round.
     84 * @orig_addr:	The original address corresponding to a mapped entry.
     85 * @alloc_size:	Size of the allocated buffer.
     86 * @lock:	The lock to protect the above data structures in the map and
     87 *		unmap calls.
     88 * @debugfs:	The dentry to debugfs.
     89 * @late_alloc:	%true if allocated using the page allocator
     90 * @force_bounce: %true if swiotlb bouncing is forced
     91 * @for_alloc:  %true if the pool is used for memory allocation
     92 */
     93struct io_tlb_mem {
     94	phys_addr_t start;
     95	phys_addr_t end;
     96	void *vaddr;
     97	unsigned long nslabs;
     98	unsigned long used;
     99	unsigned int index;
    100	spinlock_t lock;
    101	struct dentry *debugfs;
    102	bool late_alloc;
    103	bool force_bounce;
    104	bool for_alloc;
    105	struct io_tlb_slot {
    106		phys_addr_t orig_addr;
    107		size_t alloc_size;
    108		unsigned int list;
    109	} *slots;
    110};
    111extern struct io_tlb_mem io_tlb_default_mem;
    112
    113static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
    114{
    115	struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
    116
    117	return mem && paddr >= mem->start && paddr < mem->end;
    118}
    119
    120static inline bool is_swiotlb_force_bounce(struct device *dev)
    121{
    122	struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
    123
    124	return mem && mem->force_bounce;
    125}
    126
    127void swiotlb_init(bool addressing_limited, unsigned int flags);
    128void __init swiotlb_exit(void);
    129unsigned int swiotlb_max_segment(void);
    130size_t swiotlb_max_mapping_size(struct device *dev);
    131bool is_swiotlb_active(struct device *dev);
    132void __init swiotlb_adjust_size(unsigned long size);
    133#else
    134static inline void swiotlb_init(bool addressing_limited, unsigned int flags)
    135{
    136}
    137static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
    138{
    139	return false;
    140}
    141static inline bool is_swiotlb_force_bounce(struct device *dev)
    142{
    143	return false;
    144}
    145static inline void swiotlb_exit(void)
    146{
    147}
    148static inline unsigned int swiotlb_max_segment(void)
    149{
    150	return 0;
    151}
    152static inline size_t swiotlb_max_mapping_size(struct device *dev)
    153{
    154	return SIZE_MAX;
    155}
    156
    157static inline bool is_swiotlb_active(struct device *dev)
    158{
    159	return false;
    160}
    161
    162static inline void swiotlb_adjust_size(unsigned long size)
    163{
    164}
    165#endif /* CONFIG_SWIOTLB */
    166
    167extern void swiotlb_print_info(void);
    168
    169#ifdef CONFIG_DMA_RESTRICTED_POOL
    170struct page *swiotlb_alloc(struct device *dev, size_t size);
    171bool swiotlb_free(struct device *dev, struct page *page, size_t size);
    172
    173static inline bool is_swiotlb_for_alloc(struct device *dev)
    174{
    175	return dev->dma_io_tlb_mem->for_alloc;
    176}
    177#else
    178static inline struct page *swiotlb_alloc(struct device *dev, size_t size)
    179{
    180	return NULL;
    181}
    182static inline bool swiotlb_free(struct device *dev, struct page *page,
    183				size_t size)
    184{
    185	return false;
    186}
    187static inline bool is_swiotlb_for_alloc(struct device *dev)
    188{
    189	return false;
    190}
    191#endif /* CONFIG_DMA_RESTRICTED_POOL */
    192
    193extern phys_addr_t swiotlb_unencrypted_base;
    194
    195#endif /* __LINUX_SWIOTLB_H */