cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

rdmavt_mr.h (4038B)


      1/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
      2/*
      3 * Copyright(c) 2016 Intel Corporation.
      4 */
      5
      6#ifndef DEF_RDMAVT_INCMR_H
      7#define DEF_RDMAVT_INCMR_H
      8
      9/*
     10 * For Memory Regions. This stuff should probably be moved into rdmavt/mr.h once
     11 * drivers no longer need access to the MR directly.
     12 */
     13#include <linux/percpu-refcount.h>
     14
     15/*
     16 * A segment is a linear region of low physical memory.
     17 * Used by the verbs layer.
     18 */
     19struct rvt_seg {
     20	void *vaddr;
     21	size_t length;
     22};
     23
     24/* The number of rvt_segs that fit in a page. */
     25#define RVT_SEGSZ     (PAGE_SIZE / sizeof(struct rvt_seg))
     26
     27struct rvt_segarray {
     28	struct rvt_seg segs[RVT_SEGSZ];
     29};
     30
     31struct rvt_mregion {
     32	struct ib_pd *pd;       /* shares refcnt of ibmr.pd */
     33	u64 user_base;          /* User's address for this region */
     34	u64 iova;               /* IB start address of this region */
     35	size_t length;
     36	u32 lkey;
     37	u32 offset;             /* offset (bytes) to start of region */
     38	int access_flags;
     39	u32 max_segs;           /* number of rvt_segs in all the arrays */
     40	u32 mapsz;              /* size of the map array */
     41	atomic_t lkey_invalid;	/* true if current lkey is invalid */
     42	u8  page_shift;         /* 0 - non unform/non powerof2 sizes */
     43	u8  lkey_published;     /* in global table */
     44	struct percpu_ref refcount;
     45	struct completion comp; /* complete when refcount goes to zero */
     46	struct rvt_segarray *map[];    /* the segments */
     47};
     48
     49#define RVT_MAX_LKEY_TABLE_BITS 23
     50
     51struct rvt_lkey_table {
     52	/* read mostly fields */
     53	u32 max;                /* size of the table */
     54	u32 shift;              /* lkey/rkey shift */
     55	struct rvt_mregion __rcu **table;
     56	/* writeable fields */
     57	/* protect changes in this struct */
     58	spinlock_t lock ____cacheline_aligned_in_smp;
     59	u32 next;               /* next unused index (speeds search) */
     60	u32 gen;                /* generation count */
     61};
     62
     63/*
     64 * These keep track of the copy progress within a memory region.
     65 * Used by the verbs layer.
     66 */
     67struct rvt_sge {
     68	struct rvt_mregion *mr;
     69	void *vaddr;            /* kernel virtual address of segment */
     70	u32 sge_length;         /* length of the SGE */
     71	u32 length;             /* remaining length of the segment */
     72	u16 m;                  /* current index: mr->map[m] */
     73	u16 n;                  /* current index: mr->map[m]->segs[n] */
     74};
     75
     76struct rvt_sge_state {
     77	struct rvt_sge *sg_list;      /* next SGE to be used if any */
     78	struct rvt_sge sge;   /* progress state for the current SGE */
     79	u32 total_len;
     80	u8 num_sge;
     81};
     82
     83static inline void rvt_put_mr(struct rvt_mregion *mr)
     84{
     85	percpu_ref_put(&mr->refcount);
     86}
     87
     88static inline void rvt_get_mr(struct rvt_mregion *mr)
     89{
     90	percpu_ref_get(&mr->refcount);
     91}
     92
     93static inline void rvt_put_ss(struct rvt_sge_state *ss)
     94{
     95	while (ss->num_sge) {
     96		rvt_put_mr(ss->sge.mr);
     97		if (--ss->num_sge)
     98			ss->sge = *ss->sg_list++;
     99	}
    100}
    101
    102static inline u32 rvt_get_sge_length(struct rvt_sge *sge, u32 length)
    103{
    104	u32 len = sge->length;
    105
    106	if (len > length)
    107		len = length;
    108	if (len > sge->sge_length)
    109		len = sge->sge_length;
    110
    111	return len;
    112}
    113
    114static inline void rvt_update_sge(struct rvt_sge_state *ss, u32 length,
    115				  bool release)
    116{
    117	struct rvt_sge *sge = &ss->sge;
    118
    119	sge->vaddr += length;
    120	sge->length -= length;
    121	sge->sge_length -= length;
    122	if (sge->sge_length == 0) {
    123		if (release)
    124			rvt_put_mr(sge->mr);
    125		if (--ss->num_sge)
    126			*sge = *ss->sg_list++;
    127	} else if (sge->length == 0 && sge->mr->lkey) {
    128		if (++sge->n >= RVT_SEGSZ) {
    129			if (++sge->m >= sge->mr->mapsz)
    130				return;
    131			sge->n = 0;
    132		}
    133		sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
    134		sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
    135	}
    136}
    137
    138static inline void rvt_skip_sge(struct rvt_sge_state *ss, u32 length,
    139				bool release)
    140{
    141	struct rvt_sge *sge = &ss->sge;
    142
    143	while (length) {
    144		u32 len = rvt_get_sge_length(sge, length);
    145
    146		WARN_ON_ONCE(len == 0);
    147		rvt_update_sge(ss, len, release);
    148		length -= len;
    149	}
    150}
    151
    152bool rvt_ss_has_lkey(struct rvt_sge_state *ss, u32 lkey);
    153bool rvt_mr_has_lkey(struct rvt_mregion *mr, u32 lkey);
    154
    155#endif          /* DEF_RDMAVT_INCMRH */