rtrs-srv.h (3764B)
1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * RDMA Transport Layer 4 * 5 * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved. 6 * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved. 7 * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved. 8 */ 9 10#ifndef RTRS_SRV_H 11#define RTRS_SRV_H 12 13#include <linux/device.h> 14#include <linux/refcount.h> 15#include "rtrs-pri.h" 16 17/* 18 * enum rtrs_srv_state - Server states. 19 */ 20enum rtrs_srv_state { 21 RTRS_SRV_CONNECTING, 22 RTRS_SRV_CONNECTED, 23 RTRS_SRV_CLOSING, 24 RTRS_SRV_CLOSED, 25}; 26 27/* stats for Read and write operation. 28 * see Documentation/ABI/testing/sysfs-class-rtrs-server for details 29 */ 30struct rtrs_srv_stats_rdma_stats { 31 struct { 32 atomic64_t cnt; 33 atomic64_t size_total; 34 } dir[2]; 35}; 36 37struct rtrs_srv_stats { 38 struct kobject kobj_stats; 39 struct rtrs_srv_stats_rdma_stats rdma_stats; 40 struct rtrs_srv_path *srv_path; 41}; 42 43struct rtrs_srv_con { 44 struct rtrs_con c; 45 struct list_head rsp_wr_wait_list; 46 spinlock_t rsp_wr_wait_lock; 47}; 48 49/* IO context in rtrs_srv, each io has one */ 50struct rtrs_srv_op { 51 struct rtrs_srv_con *con; 52 u32 msg_id; 53 u8 dir; 54 struct rtrs_msg_rdma_read *rd_msg; 55 struct ib_rdma_wr tx_wr; 56 struct ib_sge tx_sg; 57 struct list_head wait_list; 58 int status; 59}; 60 61/* 62 * server side memory region context, when always_invalidate=Y, we need 63 * queue_depth of memory region to invalidate each memory region. 64 */ 65struct rtrs_srv_mr { 66 struct ib_mr *mr; 67 struct sg_table sgt; 68 struct ib_cqe inv_cqe; /* only for always_invalidate=true */ 69 u32 msg_id; /* only for always_invalidate=true */ 70 u32 msg_off; /* only for always_invalidate=true */ 71 struct rtrs_iu *iu; /* send buffer for new rkey msg */ 72}; 73 74struct rtrs_srv_path { 75 struct rtrs_path s; 76 struct rtrs_srv_sess *srv; 77 struct work_struct close_work; 78 enum rtrs_srv_state state; 79 spinlock_t state_lock; 80 int cur_cq_vector; 81 struct rtrs_srv_op **ops_ids; 82 struct percpu_ref ids_inflight_ref; 83 struct completion complete_done; 84 struct rtrs_srv_mr *mrs; 85 unsigned int mrs_num; 86 dma_addr_t *dma_addr; 87 bool established; 88 unsigned int mem_bits; 89 struct kobject kobj; 90 struct rtrs_srv_stats *stats; 91}; 92 93struct rtrs_srv_sess { 94 struct list_head paths_list; 95 int paths_up; 96 struct mutex paths_ev_mutex; 97 size_t paths_num; 98 struct mutex paths_mutex; 99 uuid_t paths_uuid; 100 refcount_t refcount; 101 struct rtrs_srv_ctx *ctx; 102 struct list_head ctx_list; 103 void *priv; 104 size_t queue_depth; 105 struct page **chunks; 106 struct device dev; 107 unsigned int dev_ref; 108 struct kobject *kobj_paths; 109}; 110 111struct rtrs_srv_ctx { 112 struct rtrs_srv_ops ops; 113 struct rdma_cm_id *cm_id_ip; 114 struct rdma_cm_id *cm_id_ib; 115 struct mutex srv_mutex; 116 struct list_head srv_list; 117}; 118 119struct rtrs_srv_ib_ctx { 120 struct rtrs_srv_ctx *srv_ctx; 121 u16 port; 122 struct mutex ib_dev_mutex; 123 int ib_dev_count; 124}; 125 126extern struct class *rtrs_dev_class; 127 128void close_path(struct rtrs_srv_path *srv_path); 129 130static inline void rtrs_srv_update_rdma_stats(struct rtrs_srv_stats *s, 131 size_t size, int d) 132{ 133 atomic64_inc(&s->rdma_stats.dir[d].cnt); 134 atomic64_add(size, &s->rdma_stats.dir[d].size_total); 135} 136 137/* functions which are implemented in rtrs-srv-stats.c */ 138int rtrs_srv_reset_rdma_stats(struct rtrs_srv_stats *stats, bool enable); 139ssize_t rtrs_srv_stats_rdma_to_str(struct rtrs_srv_stats *stats, char *page); 140int rtrs_srv_reset_all_stats(struct rtrs_srv_stats *stats, bool enable); 141ssize_t rtrs_srv_reset_all_help(struct rtrs_srv_stats *stats, 142 char *page, size_t len); 143 144/* functions which are implemented in rtrs-srv-sysfs.c */ 145int rtrs_srv_create_path_files(struct rtrs_srv_path *srv_path); 146void rtrs_srv_destroy_path_files(struct rtrs_srv_path *srv_path); 147 148#endif /* RTRS_SRV_H */