cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ib_umem.h (6138B)


      1/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
      2/*
      3 * Copyright (c) 2007 Cisco Systems.  All rights reserved.
      4 * Copyright (c) 2020 Intel Corporation.  All rights reserved.
      5 */
      6
      7#ifndef IB_UMEM_H
      8#define IB_UMEM_H
      9
     10#include <linux/list.h>
     11#include <linux/scatterlist.h>
     12#include <linux/workqueue.h>
     13#include <rdma/ib_verbs.h>
     14
     15struct ib_ucontext;
     16struct ib_umem_odp;
     17struct dma_buf_attach_ops;
     18
     19struct ib_umem {
     20	struct ib_device       *ibdev;
     21	struct mm_struct       *owning_mm;
     22	u64 iova;
     23	size_t			length;
     24	unsigned long		address;
     25	u32 writable : 1;
     26	u32 is_odp : 1;
     27	u32 is_dmabuf : 1;
     28	struct work_struct	work;
     29	struct sg_append_table sgt_append;
     30};
     31
     32struct ib_umem_dmabuf {
     33	struct ib_umem umem;
     34	struct dma_buf_attachment *attach;
     35	struct sg_table *sgt;
     36	struct scatterlist *first_sg;
     37	struct scatterlist *last_sg;
     38	unsigned long first_sg_offset;
     39	unsigned long last_sg_trim;
     40	void *private;
     41	u8 pinned : 1;
     42};
     43
     44static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem)
     45{
     46	return container_of(umem, struct ib_umem_dmabuf, umem);
     47}
     48
     49/* Returns the offset of the umem start relative to the first page. */
     50static inline int ib_umem_offset(struct ib_umem *umem)
     51{
     52	return umem->address & ~PAGE_MASK;
     53}
     54
     55static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem,
     56					       unsigned long pgsz)
     57{
     58	return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) &
     59	       (pgsz - 1);
     60}
     61
     62static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem,
     63					    unsigned long pgsz)
     64{
     65	return (size_t)((ALIGN(umem->iova + umem->length, pgsz) -
     66			 ALIGN_DOWN(umem->iova, pgsz))) /
     67	       pgsz;
     68}
     69
     70static inline size_t ib_umem_num_pages(struct ib_umem *umem)
     71{
     72	return ib_umem_num_dma_blocks(umem, PAGE_SIZE);
     73}
     74
     75static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
     76						struct ib_umem *umem,
     77						unsigned long pgsz)
     78{
     79	__rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl,
     80				umem->sgt_append.sgt.nents, pgsz);
     81}
     82
     83/**
     84 * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem
     85 * @umem: umem to iterate over
     86 * @pgsz: Page size to split the list into
     87 *
     88 * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The
     89 * returned DMA blocks will be aligned to pgsz and span the range:
     90 * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)
     91 *
     92 * Performs exactly ib_umem_num_dma_blocks() iterations.
     93 */
     94#define rdma_umem_for_each_dma_block(umem, biter, pgsz)                        \
     95	for (__rdma_umem_block_iter_start(biter, umem, pgsz);                  \
     96	     __rdma_block_iter_next(biter);)
     97
     98#ifdef CONFIG_INFINIBAND_USER_MEM
     99
    100struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
    101			    size_t size, int access);
    102void ib_umem_release(struct ib_umem *umem);
    103int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
    104		      size_t length);
    105unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
    106				     unsigned long pgsz_bitmap,
    107				     unsigned long virt);
    108
    109/**
    110 * ib_umem_find_best_pgoff - Find best HW page size
    111 *
    112 * @umem: umem struct
    113 * @pgsz_bitmap bitmap of HW supported page sizes
    114 * @pgoff_bitmask: Mask of bits that can be represented with an offset
    115 *
    116 * This is very similar to ib_umem_find_best_pgsz() except instead of accepting
    117 * an IOVA it accepts a bitmask specifying what address bits can be represented
    118 * with a page offset.
    119 *
    120 * For instance if the HW has multiple page sizes, requires 64 byte alignemnt,
    121 * and can support aligned offsets up to 4032 then pgoff_bitmask would be
    122 * "111111000000".
    123 *
    124 * If the pgoff_bitmask requires either alignment in the low bit or an
    125 * unavailable page size for the high bits, this function returns 0.
    126 */
    127static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
    128						    unsigned long pgsz_bitmap,
    129						    u64 pgoff_bitmask)
    130{
    131	struct scatterlist *sg = umem->sgt_append.sgt.sgl;
    132	dma_addr_t dma_addr;
    133
    134	dma_addr = sg_dma_address(sg) + (umem->address & ~PAGE_MASK);
    135	return ib_umem_find_best_pgsz(umem, pgsz_bitmap,
    136				      dma_addr & pgoff_bitmask);
    137}
    138
    139struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
    140					  unsigned long offset, size_t size,
    141					  int fd, int access,
    142					  const struct dma_buf_attach_ops *ops);
    143struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
    144						 unsigned long offset,
    145						 size_t size, int fd,
    146						 int access);
    147int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf);
    148void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf);
    149void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf);
    150
    151#else /* CONFIG_INFINIBAND_USER_MEM */
    152
    153#include <linux/err.h>
    154
    155static inline struct ib_umem *ib_umem_get(struct ib_device *device,
    156					  unsigned long addr, size_t size,
    157					  int access)
    158{
    159	return ERR_PTR(-EOPNOTSUPP);
    160}
    161static inline void ib_umem_release(struct ib_umem *umem) { }
    162static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
    163		      		    size_t length) {
    164	return -EOPNOTSUPP;
    165}
    166static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
    167						   unsigned long pgsz_bitmap,
    168						   unsigned long virt)
    169{
    170	return 0;
    171}
    172static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
    173						    unsigned long pgsz_bitmap,
    174						    u64 pgoff_bitmask)
    175{
    176	return 0;
    177}
    178static inline
    179struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
    180					  unsigned long offset,
    181					  size_t size, int fd,
    182					  int access,
    183					  struct dma_buf_attach_ops *ops)
    184{
    185	return ERR_PTR(-EOPNOTSUPP);
    186}
    187static inline struct ib_umem_dmabuf *
    188ib_umem_dmabuf_get_pinned(struct ib_device *device, unsigned long offset,
    189			  size_t size, int fd, int access)
    190{
    191	return ERR_PTR(-EOPNOTSUPP);
    192}
    193static inline int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
    194{
    195	return -EOPNOTSUPP;
    196}
    197static inline void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) { }
    198static inline void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { }
    199
    200#endif /* CONFIG_INFINIBAND_USER_MEM */
    201#endif /* IB_UMEM_H */