cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

pvrdma.h (14369B)


      1/*
      2 * Copyright (c) 2012-2016 VMware, Inc.  All rights reserved.
      3 *
      4 * This program is free software; you can redistribute it and/or
      5 * modify it under the terms of EITHER the GNU General Public License
      6 * version 2 as published by the Free Software Foundation or the BSD
      7 * 2-Clause License. This program is distributed in the hope that it
      8 * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
      9 * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
     10 * See the GNU General Public License version 2 for more details at
     11 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
     12 *
     13 * You should have received a copy of the GNU General Public License
     14 * along with this program available in the file COPYING in the main
     15 * directory of this source tree.
     16 *
     17 * The BSD 2-Clause License
     18 *
     19 *     Redistribution and use in source and binary forms, with or
     20 *     without modification, are permitted provided that the following
     21 *     conditions are met:
     22 *
     23 *      - Redistributions of source code must retain the above
     24 *        copyright notice, this list of conditions and the following
     25 *        disclaimer.
     26 *
     27 *      - Redistributions in binary form must reproduce the above
     28 *        copyright notice, this list of conditions and the following
     29 *        disclaimer in the documentation and/or other materials
     30 *        provided with the distribution.
     31 *
     32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
     35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
     36 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
     37 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     38 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     39 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
     41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
     43 * OF THE POSSIBILITY OF SUCH DAMAGE.
     44 */
     45
     46#ifndef __PVRDMA_H__
     47#define __PVRDMA_H__
     48
     49#include <linux/compiler.h>
     50#include <linux/interrupt.h>
     51#include <linux/list.h>
     52#include <linux/mutex.h>
     53#include <linux/pci.h>
     54#include <linux/semaphore.h>
     55#include <linux/workqueue.h>
     56#include <rdma/ib_umem.h>
     57#include <rdma/ib_verbs.h>
     58#include <rdma/vmw_pvrdma-abi.h>
     59
     60#include "pvrdma_ring.h"
     61#include "pvrdma_dev_api.h"
     62#include "pvrdma_verbs.h"
     63
     64/* NOT the same as BIT_MASK(). */
     65#define PVRDMA_MASK(n) ((n << 1) - 1)
     66
     67/*
     68 * VMware PVRDMA PCI device id.
     69 */
     70#define PCI_DEVICE_ID_VMWARE_PVRDMA	0x0820
     71
     72#define PVRDMA_NUM_RING_PAGES		4
     73#define PVRDMA_QP_NUM_HEADER_PAGES	1
     74
     75struct pvrdma_dev;
     76
     77struct pvrdma_page_dir {
     78	dma_addr_t dir_dma;
     79	u64 *dir;
     80	int ntables;
     81	u64 **tables;
     82	u64 npages;
     83	void **pages;
     84};
     85
     86struct pvrdma_cq {
     87	struct ib_cq ibcq;
     88	int offset;
     89	spinlock_t cq_lock; /* Poll lock. */
     90	struct pvrdma_uar_map *uar;
     91	struct ib_umem *umem;
     92	struct pvrdma_ring_state *ring_state;
     93	struct pvrdma_page_dir pdir;
     94	u32 cq_handle;
     95	bool is_kernel;
     96	refcount_t refcnt;
     97	struct completion free;
     98};
     99
    100struct pvrdma_id_table {
    101	u32 last;
    102	u32 top;
    103	u32 max;
    104	u32 mask;
    105	spinlock_t lock; /* Table lock. */
    106	unsigned long *table;
    107};
    108
    109struct pvrdma_uar_map {
    110	unsigned long pfn;
    111	void __iomem *map;
    112	int index;
    113};
    114
    115struct pvrdma_uar_table {
    116	struct pvrdma_id_table tbl;
    117	int size;
    118};
    119
    120struct pvrdma_ucontext {
    121	struct ib_ucontext ibucontext;
    122	struct pvrdma_dev *dev;
    123	struct pvrdma_uar_map uar;
    124	u64 ctx_handle;
    125};
    126
    127struct pvrdma_pd {
    128	struct ib_pd ibpd;
    129	u32 pdn;
    130	u32 pd_handle;
    131	int privileged;
    132};
    133
    134struct pvrdma_mr {
    135	u32 mr_handle;
    136	u64 iova;
    137	u64 size;
    138};
    139
    140struct pvrdma_user_mr {
    141	struct ib_mr ibmr;
    142	struct ib_umem *umem;
    143	struct pvrdma_mr mmr;
    144	struct pvrdma_page_dir pdir;
    145	u64 *pages;
    146	u32 npages;
    147	u32 max_pages;
    148	u32 page_shift;
    149};
    150
    151struct pvrdma_wq {
    152	struct pvrdma_ring *ring;
    153	spinlock_t lock; /* Work queue lock. */
    154	int wqe_cnt;
    155	int wqe_size;
    156	int max_sg;
    157	int offset;
    158};
    159
    160struct pvrdma_ah {
    161	struct ib_ah ibah;
    162	struct pvrdma_av av;
    163};
    164
    165struct pvrdma_srq {
    166	struct ib_srq ibsrq;
    167	int offset;
    168	spinlock_t lock; /* SRQ lock. */
    169	int wqe_cnt;
    170	int wqe_size;
    171	int max_gs;
    172	struct ib_umem *umem;
    173	struct pvrdma_ring_state *ring;
    174	struct pvrdma_page_dir pdir;
    175	u32 srq_handle;
    176	int npages;
    177	refcount_t refcnt;
    178	struct completion free;
    179};
    180
    181struct pvrdma_qp {
    182	struct ib_qp ibqp;
    183	u32 qp_handle;
    184	u32 qkey;
    185	struct pvrdma_wq sq;
    186	struct pvrdma_wq rq;
    187	struct ib_umem *rumem;
    188	struct ib_umem *sumem;
    189	struct pvrdma_page_dir pdir;
    190	struct pvrdma_srq *srq;
    191	int npages;
    192	int npages_send;
    193	int npages_recv;
    194	u32 flags;
    195	u8 port;
    196	u8 state;
    197	bool is_kernel;
    198	struct mutex mutex; /* QP state mutex. */
    199	refcount_t refcnt;
    200	struct completion free;
    201};
    202
    203struct pvrdma_dev {
    204	/* PCI device-related information. */
    205	struct ib_device ib_dev;
    206	struct pci_dev *pdev;
    207	void __iomem *regs;
    208	struct pvrdma_device_shared_region *dsr; /* Shared region pointer */
    209	dma_addr_t dsrbase; /* Shared region base address */
    210	void *cmd_slot;
    211	void *resp_slot;
    212	unsigned long flags;
    213	struct list_head device_link;
    214	unsigned int dsr_version;
    215
    216	/* Locking and interrupt information. */
    217	spinlock_t cmd_lock; /* Command lock. */
    218	struct semaphore cmd_sema;
    219	struct completion cmd_done;
    220	unsigned int nr_vectors;
    221
    222	/* RDMA-related device information. */
    223	union ib_gid *sgid_tbl;
    224	struct pvrdma_ring_state *async_ring_state;
    225	struct pvrdma_page_dir async_pdir;
    226	struct pvrdma_ring_state *cq_ring_state;
    227	struct pvrdma_page_dir cq_pdir;
    228	struct pvrdma_cq **cq_tbl;
    229	spinlock_t cq_tbl_lock;
    230	struct pvrdma_srq **srq_tbl;
    231	spinlock_t srq_tbl_lock;
    232	struct pvrdma_qp **qp_tbl;
    233	spinlock_t qp_tbl_lock;
    234	struct pvrdma_uar_table uar_table;
    235	struct pvrdma_uar_map driver_uar;
    236	__be64 sys_image_guid;
    237	spinlock_t desc_lock; /* Device modification lock. */
    238	u32 port_cap_mask;
    239	struct mutex port_mutex; /* Port modification mutex. */
    240	bool ib_active;
    241	atomic_t num_qps;
    242	atomic_t num_cqs;
    243	atomic_t num_srqs;
    244	atomic_t num_pds;
    245	atomic_t num_ahs;
    246
    247	/* Network device information. */
    248	struct net_device *netdev;
    249	struct notifier_block nb_netdev;
    250};
    251
    252struct pvrdma_netdevice_work {
    253	struct work_struct work;
    254	struct net_device *event_netdev;
    255	unsigned long event;
    256};
    257
    258static inline struct pvrdma_dev *to_vdev(struct ib_device *ibdev)
    259{
    260	return container_of(ibdev, struct pvrdma_dev, ib_dev);
    261}
    262
    263static inline struct
    264pvrdma_ucontext *to_vucontext(struct ib_ucontext *ibucontext)
    265{
    266	return container_of(ibucontext, struct pvrdma_ucontext, ibucontext);
    267}
    268
    269static inline struct pvrdma_pd *to_vpd(struct ib_pd *ibpd)
    270{
    271	return container_of(ibpd, struct pvrdma_pd, ibpd);
    272}
    273
    274static inline struct pvrdma_cq *to_vcq(struct ib_cq *ibcq)
    275{
    276	return container_of(ibcq, struct pvrdma_cq, ibcq);
    277}
    278
    279static inline struct pvrdma_srq *to_vsrq(struct ib_srq *ibsrq)
    280{
    281	return container_of(ibsrq, struct pvrdma_srq, ibsrq);
    282}
    283
    284static inline struct pvrdma_user_mr *to_vmr(struct ib_mr *ibmr)
    285{
    286	return container_of(ibmr, struct pvrdma_user_mr, ibmr);
    287}
    288
    289static inline struct pvrdma_qp *to_vqp(struct ib_qp *ibqp)
    290{
    291	return container_of(ibqp, struct pvrdma_qp, ibqp);
    292}
    293
    294static inline struct pvrdma_ah *to_vah(struct ib_ah *ibah)
    295{
    296	return container_of(ibah, struct pvrdma_ah, ibah);
    297}
    298
    299static inline void pvrdma_write_reg(struct pvrdma_dev *dev, u32 reg, u32 val)
    300{
    301	writel(cpu_to_le32(val), dev->regs + reg);
    302}
    303
    304static inline u32 pvrdma_read_reg(struct pvrdma_dev *dev, u32 reg)
    305{
    306	return le32_to_cpu(readl(dev->regs + reg));
    307}
    308
    309static inline void pvrdma_write_uar_cq(struct pvrdma_dev *dev, u32 val)
    310{
    311	writel(cpu_to_le32(val), dev->driver_uar.map + PVRDMA_UAR_CQ_OFFSET);
    312}
    313
    314static inline void pvrdma_write_uar_qp(struct pvrdma_dev *dev, u32 val)
    315{
    316	writel(cpu_to_le32(val), dev->driver_uar.map + PVRDMA_UAR_QP_OFFSET);
    317}
    318
    319static inline void *pvrdma_page_dir_get_ptr(struct pvrdma_page_dir *pdir,
    320					    u64 offset)
    321{
    322	return pdir->pages[offset / PAGE_SIZE] + (offset % PAGE_SIZE);
    323}
    324
    325static inline enum pvrdma_mtu ib_mtu_to_pvrdma(enum ib_mtu mtu)
    326{
    327	return (enum pvrdma_mtu)mtu;
    328}
    329
    330static inline enum ib_mtu pvrdma_mtu_to_ib(enum pvrdma_mtu mtu)
    331{
    332	return (enum ib_mtu)mtu;
    333}
    334
    335static inline enum pvrdma_port_state ib_port_state_to_pvrdma(
    336					enum ib_port_state state)
    337{
    338	return (enum pvrdma_port_state)state;
    339}
    340
    341static inline enum ib_port_state pvrdma_port_state_to_ib(
    342					enum pvrdma_port_state state)
    343{
    344	return (enum ib_port_state)state;
    345}
    346
    347static inline int pvrdma_port_cap_flags_to_ib(int flags)
    348{
    349	return flags;
    350}
    351
    352static inline enum pvrdma_port_width ib_port_width_to_pvrdma(
    353					enum ib_port_width width)
    354{
    355	return (enum pvrdma_port_width)width;
    356}
    357
    358static inline enum ib_port_width pvrdma_port_width_to_ib(
    359					enum pvrdma_port_width width)
    360{
    361	return (enum ib_port_width)width;
    362}
    363
    364static inline enum pvrdma_port_speed ib_port_speed_to_pvrdma(
    365					enum ib_port_speed speed)
    366{
    367	return (enum pvrdma_port_speed)speed;
    368}
    369
    370static inline enum ib_port_speed pvrdma_port_speed_to_ib(
    371					enum pvrdma_port_speed speed)
    372{
    373	return (enum ib_port_speed)speed;
    374}
    375
    376static inline int ib_qp_attr_mask_to_pvrdma(int attr_mask)
    377{
    378	return attr_mask & PVRDMA_MASK(PVRDMA_QP_ATTR_MASK_MAX);
    379}
    380
    381static inline enum pvrdma_mig_state ib_mig_state_to_pvrdma(
    382					enum ib_mig_state state)
    383{
    384	return (enum pvrdma_mig_state)state;
    385}
    386
    387static inline enum ib_mig_state pvrdma_mig_state_to_ib(
    388					enum pvrdma_mig_state state)
    389{
    390	return (enum ib_mig_state)state;
    391}
    392
    393static inline int ib_access_flags_to_pvrdma(int flags)
    394{
    395	return flags;
    396}
    397
    398static inline int pvrdma_access_flags_to_ib(int flags)
    399{
    400	return flags & PVRDMA_MASK(PVRDMA_ACCESS_FLAGS_MAX);
    401}
    402
    403static inline enum pvrdma_qp_type ib_qp_type_to_pvrdma(enum ib_qp_type type)
    404{
    405	return (enum pvrdma_qp_type)type;
    406}
    407
    408static inline enum pvrdma_qp_state ib_qp_state_to_pvrdma(enum ib_qp_state state)
    409{
    410	return (enum pvrdma_qp_state)state;
    411}
    412
    413static inline enum ib_qp_state pvrdma_qp_state_to_ib(enum pvrdma_qp_state state)
    414{
    415	return (enum ib_qp_state)state;
    416}
    417
    418static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op)
    419{
    420	switch (op) {
    421	case IB_WR_RDMA_WRITE:
    422		return PVRDMA_WR_RDMA_WRITE;
    423	case IB_WR_RDMA_WRITE_WITH_IMM:
    424		return PVRDMA_WR_RDMA_WRITE_WITH_IMM;
    425	case IB_WR_SEND:
    426		return PVRDMA_WR_SEND;
    427	case IB_WR_SEND_WITH_IMM:
    428		return PVRDMA_WR_SEND_WITH_IMM;
    429	case IB_WR_RDMA_READ:
    430		return PVRDMA_WR_RDMA_READ;
    431	case IB_WR_ATOMIC_CMP_AND_SWP:
    432		return PVRDMA_WR_ATOMIC_CMP_AND_SWP;
    433	case IB_WR_ATOMIC_FETCH_AND_ADD:
    434		return PVRDMA_WR_ATOMIC_FETCH_AND_ADD;
    435	case IB_WR_LSO:
    436		return PVRDMA_WR_LSO;
    437	case IB_WR_SEND_WITH_INV:
    438		return PVRDMA_WR_SEND_WITH_INV;
    439	case IB_WR_RDMA_READ_WITH_INV:
    440		return PVRDMA_WR_RDMA_READ_WITH_INV;
    441	case IB_WR_LOCAL_INV:
    442		return PVRDMA_WR_LOCAL_INV;
    443	case IB_WR_REG_MR:
    444		return PVRDMA_WR_FAST_REG_MR;
    445	case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
    446		return PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP;
    447	case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
    448		return PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD;
    449	case IB_WR_REG_MR_INTEGRITY:
    450		return PVRDMA_WR_REG_SIG_MR;
    451	default:
    452		return PVRDMA_WR_ERROR;
    453	}
    454}
    455
    456static inline enum ib_wc_status pvrdma_wc_status_to_ib(
    457					enum pvrdma_wc_status status)
    458{
    459	return (enum ib_wc_status)status;
    460}
    461
    462static inline int pvrdma_wc_opcode_to_ib(unsigned int opcode)
    463{
    464	switch (opcode) {
    465	case PVRDMA_WC_SEND:
    466		return IB_WC_SEND;
    467	case PVRDMA_WC_RDMA_WRITE:
    468		return IB_WC_RDMA_WRITE;
    469	case PVRDMA_WC_RDMA_READ:
    470		return IB_WC_RDMA_READ;
    471	case PVRDMA_WC_COMP_SWAP:
    472		return IB_WC_COMP_SWAP;
    473	case PVRDMA_WC_FETCH_ADD:
    474		return IB_WC_FETCH_ADD;
    475	case PVRDMA_WC_LOCAL_INV:
    476		return IB_WC_LOCAL_INV;
    477	case PVRDMA_WC_FAST_REG_MR:
    478		return IB_WC_REG_MR;
    479	case PVRDMA_WC_MASKED_COMP_SWAP:
    480		return IB_WC_MASKED_COMP_SWAP;
    481	case PVRDMA_WC_MASKED_FETCH_ADD:
    482		return IB_WC_MASKED_FETCH_ADD;
    483	case PVRDMA_WC_RECV:
    484		return IB_WC_RECV;
    485	case PVRDMA_WC_RECV_RDMA_WITH_IMM:
    486		return IB_WC_RECV_RDMA_WITH_IMM;
    487	default:
    488		return IB_WC_SEND;
    489	}
    490}
    491
    492static inline int pvrdma_wc_flags_to_ib(int flags)
    493{
    494	return flags;
    495}
    496
    497static inline int ib_send_flags_to_pvrdma(int flags)
    498{
    499	return flags & PVRDMA_MASK(PVRDMA_SEND_FLAGS_MAX);
    500}
    501
    502static inline int pvrdma_network_type_to_ib(enum pvrdma_network_type type)
    503{
    504	switch (type) {
    505	case PVRDMA_NETWORK_ROCE_V1:
    506		return RDMA_NETWORK_ROCE_V1;
    507	case PVRDMA_NETWORK_IPV4:
    508		return RDMA_NETWORK_IPV4;
    509	case PVRDMA_NETWORK_IPV6:
    510		return RDMA_NETWORK_IPV6;
    511	default:
    512		return RDMA_NETWORK_IPV6;
    513	}
    514}
    515
    516void pvrdma_qp_cap_to_ib(struct ib_qp_cap *dst,
    517			 const struct pvrdma_qp_cap *src);
    518void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap *dst,
    519			 const struct ib_qp_cap *src);
    520void pvrdma_gid_to_ib(union ib_gid *dst, const union pvrdma_gid *src);
    521void ib_gid_to_pvrdma(union pvrdma_gid *dst, const union ib_gid *src);
    522void pvrdma_global_route_to_ib(struct ib_global_route *dst,
    523			       const struct pvrdma_global_route *src);
    524void ib_global_route_to_pvrdma(struct pvrdma_global_route *dst,
    525			       const struct ib_global_route *src);
    526void pvrdma_ah_attr_to_rdma(struct rdma_ah_attr *dst,
    527			    const struct pvrdma_ah_attr *src);
    528void rdma_ah_attr_to_pvrdma(struct pvrdma_ah_attr *dst,
    529			    const struct rdma_ah_attr *src);
    530u8 ib_gid_type_to_pvrdma(enum ib_gid_type gid_type);
    531
    532int pvrdma_uar_table_init(struct pvrdma_dev *dev);
    533void pvrdma_uar_table_cleanup(struct pvrdma_dev *dev);
    534
    535int pvrdma_uar_alloc(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar);
    536void pvrdma_uar_free(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar);
    537
    538void _pvrdma_flush_cqe(struct pvrdma_qp *qp, struct pvrdma_cq *cq);
    539
    540int pvrdma_page_dir_init(struct pvrdma_dev *dev, struct pvrdma_page_dir *pdir,
    541			 u64 npages, bool alloc_pages);
    542void pvrdma_page_dir_cleanup(struct pvrdma_dev *dev,
    543			     struct pvrdma_page_dir *pdir);
    544int pvrdma_page_dir_insert_dma(struct pvrdma_page_dir *pdir, u64 idx,
    545			       dma_addr_t daddr);
    546int pvrdma_page_dir_insert_umem(struct pvrdma_page_dir *pdir,
    547				struct ib_umem *umem, u64 offset);
    548dma_addr_t pvrdma_page_dir_get_dma(struct pvrdma_page_dir *pdir, u64 idx);
    549int pvrdma_page_dir_insert_page_list(struct pvrdma_page_dir *pdir,
    550				     u64 *page_list, int num_pages);
    551
    552int pvrdma_cmd_post(struct pvrdma_dev *dev, union pvrdma_cmd_req *req,
    553		    union pvrdma_cmd_resp *rsp, unsigned resp_code);
    554
    555#endif /* __PVRDMA_H__ */