cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

xprt.h (16233B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/*
      3 *  linux/include/linux/sunrpc/xprt.h
      4 *
      5 *  Declarations for the RPC transport interface.
      6 *
      7 *  Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
      8 */
      9
     10#ifndef _LINUX_SUNRPC_XPRT_H
     11#define _LINUX_SUNRPC_XPRT_H
     12
     13#include <linux/uio.h>
     14#include <linux/socket.h>
     15#include <linux/in.h>
     16#include <linux/ktime.h>
     17#include <linux/kref.h>
     18#include <linux/sunrpc/sched.h>
     19#include <linux/sunrpc/xdr.h>
     20#include <linux/sunrpc/msg_prot.h>
     21
     22#define RPC_MIN_SLOT_TABLE	(2U)
     23#define RPC_DEF_SLOT_TABLE	(16U)
     24#define RPC_MAX_SLOT_TABLE_LIMIT	(65536U)
     25#define RPC_MAX_SLOT_TABLE	RPC_MAX_SLOT_TABLE_LIMIT
     26
     27#define RPC_CWNDSHIFT		(8U)
     28#define RPC_CWNDSCALE		(1U << RPC_CWNDSHIFT)
     29#define RPC_INITCWND		RPC_CWNDSCALE
     30#define RPC_MAXCWND(xprt)	((xprt)->max_reqs << RPC_CWNDSHIFT)
     31#define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
     32
     33/*
     34 * This describes a timeout strategy
     35 */
     36struct rpc_timeout {
     37	unsigned long		to_initval,		/* initial timeout */
     38				to_maxval,		/* max timeout */
     39				to_increment;		/* if !exponential */
     40	unsigned int		to_retries;		/* max # of retries */
     41	unsigned char		to_exponential;
     42};
     43
     44enum rpc_display_format_t {
     45	RPC_DISPLAY_ADDR = 0,
     46	RPC_DISPLAY_PORT,
     47	RPC_DISPLAY_PROTO,
     48	RPC_DISPLAY_HEX_ADDR,
     49	RPC_DISPLAY_HEX_PORT,
     50	RPC_DISPLAY_NETID,
     51	RPC_DISPLAY_MAX,
     52};
     53
     54struct rpc_task;
     55struct rpc_xprt;
     56struct xprt_class;
     57struct seq_file;
     58struct svc_serv;
     59struct net;
     60
     61/*
     62 * This describes a complete RPC request
     63 */
     64struct rpc_rqst {
     65	/*
     66	 * This is the user-visible part
     67	 */
     68	struct rpc_xprt *	rq_xprt;		/* RPC client */
     69	struct xdr_buf		rq_snd_buf;		/* send buffer */
     70	struct xdr_buf		rq_rcv_buf;		/* recv buffer */
     71
     72	/*
     73	 * This is the private part
     74	 */
     75	struct rpc_task *	rq_task;	/* RPC task data */
     76	struct rpc_cred *	rq_cred;	/* Bound cred */
     77	__be32			rq_xid;		/* request XID */
     78	int			rq_cong;	/* has incremented xprt->cong */
     79	u32			rq_seqno;	/* gss seq no. used on req. */
     80	int			rq_enc_pages_num;
     81	struct page		**rq_enc_pages;	/* scratch pages for use by
     82						   gss privacy code */
     83	void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */
     84
     85	union {
     86		struct list_head	rq_list;	/* Slot allocation list */
     87		struct rb_node		rq_recv;	/* Receive queue */
     88	};
     89
     90	struct list_head	rq_xmit;	/* Send queue */
     91	struct list_head	rq_xmit2;	/* Send queue */
     92
     93	void			*rq_buffer;	/* Call XDR encode buffer */
     94	size_t			rq_callsize;
     95	void			*rq_rbuffer;	/* Reply XDR decode buffer */
     96	size_t			rq_rcvsize;
     97	size_t			rq_xmit_bytes_sent;	/* total bytes sent */
     98	size_t			rq_reply_bytes_recvd;	/* total reply bytes */
     99							/* received */
    100
    101	struct xdr_buf		rq_private_buf;		/* The receive buffer
    102							 * used in the softirq.
    103							 */
    104	unsigned long		rq_majortimeo;	/* major timeout alarm */
    105	unsigned long		rq_minortimeo;	/* minor timeout alarm */
    106	unsigned long		rq_timeout;	/* Current timeout value */
    107	ktime_t			rq_rtt;		/* round-trip time */
    108	unsigned int		rq_retries;	/* # of retries */
    109	unsigned int		rq_connect_cookie;
    110						/* A cookie used to track the
    111						   state of the transport
    112						   connection */
    113	atomic_t		rq_pin;
    114	
    115	/*
    116	 * Partial send handling
    117	 */
    118	u32			rq_bytes_sent;	/* Bytes we have sent */
    119
    120	ktime_t			rq_xtime;	/* transmit time stamp */
    121	int			rq_ntrans;
    122
    123#if defined(CONFIG_SUNRPC_BACKCHANNEL)
    124	struct list_head	rq_bc_list;	/* Callback service list */
    125	unsigned long		rq_bc_pa_state;	/* Backchannel prealloc state */
    126	struct list_head	rq_bc_pa_list;	/* Backchannel prealloc list */
    127#endif /* CONFIG_SUNRPC_BACKCHANEL */
    128};
    129#define rq_svec			rq_snd_buf.head
    130#define rq_slen			rq_snd_buf.len
    131
    132struct rpc_xprt_ops {
    133	void		(*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize);
    134	int		(*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
    135	void		(*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
    136	void		(*alloc_slot)(struct rpc_xprt *xprt, struct rpc_task *task);
    137	void		(*free_slot)(struct rpc_xprt *xprt,
    138				     struct rpc_rqst *req);
    139	void		(*rpcbind)(struct rpc_task *task);
    140	void		(*set_port)(struct rpc_xprt *xprt, unsigned short port);
    141	void		(*connect)(struct rpc_xprt *xprt, struct rpc_task *task);
    142	int		(*get_srcaddr)(struct rpc_xprt *xprt, char *buf,
    143				       size_t buflen);
    144	unsigned short	(*get_srcport)(struct rpc_xprt *xprt);
    145	int		(*buf_alloc)(struct rpc_task *task);
    146	void		(*buf_free)(struct rpc_task *task);
    147	int		(*prepare_request)(struct rpc_rqst *req);
    148	int		(*send_request)(struct rpc_rqst *req);
    149	void		(*wait_for_reply_request)(struct rpc_task *task);
    150	void		(*timer)(struct rpc_xprt *xprt, struct rpc_task *task);
    151	void		(*release_request)(struct rpc_task *task);
    152	void		(*close)(struct rpc_xprt *xprt);
    153	void		(*destroy)(struct rpc_xprt *xprt);
    154	void		(*set_connect_timeout)(struct rpc_xprt *xprt,
    155					unsigned long connect_timeout,
    156					unsigned long reconnect_timeout);
    157	void		(*print_stats)(struct rpc_xprt *xprt, struct seq_file *seq);
    158	int		(*enable_swap)(struct rpc_xprt *xprt);
    159	void		(*disable_swap)(struct rpc_xprt *xprt);
    160	void		(*inject_disconnect)(struct rpc_xprt *xprt);
    161	int		(*bc_setup)(struct rpc_xprt *xprt,
    162				    unsigned int min_reqs);
    163	size_t		(*bc_maxpayload)(struct rpc_xprt *xprt);
    164	unsigned int	(*bc_num_slots)(struct rpc_xprt *xprt);
    165	void		(*bc_free_rqst)(struct rpc_rqst *rqst);
    166	void		(*bc_destroy)(struct rpc_xprt *xprt,
    167				      unsigned int max_reqs);
    168};
    169
    170/*
    171 * RPC transport identifiers
    172 *
    173 * To preserve compatibility with the historical use of raw IP protocol
    174 * id's for transport selection, UDP and TCP identifiers are specified
    175 * with the previous values. No such restriction exists for new transports,
    176 * except that they may not collide with these values (17 and 6,
    177 * respectively).
    178 */
    179#define XPRT_TRANSPORT_BC       (1 << 31)
    180enum xprt_transports {
    181	XPRT_TRANSPORT_UDP	= IPPROTO_UDP,
    182	XPRT_TRANSPORT_TCP	= IPPROTO_TCP,
    183	XPRT_TRANSPORT_BC_TCP	= IPPROTO_TCP | XPRT_TRANSPORT_BC,
    184	XPRT_TRANSPORT_RDMA	= 256,
    185	XPRT_TRANSPORT_BC_RDMA	= XPRT_TRANSPORT_RDMA | XPRT_TRANSPORT_BC,
    186	XPRT_TRANSPORT_LOCAL	= 257,
    187};
    188
    189struct rpc_sysfs_xprt;
    190struct rpc_xprt {
    191	struct kref		kref;		/* Reference count */
    192	const struct rpc_xprt_ops *ops;		/* transport methods */
    193	unsigned int		id;		/* transport id */
    194
    195	const struct rpc_timeout *timeout;	/* timeout parms */
    196	struct sockaddr_storage	addr;		/* server address */
    197	size_t			addrlen;	/* size of server address */
    198	int			prot;		/* IP protocol */
    199
    200	unsigned long		cong;		/* current congestion */
    201	unsigned long		cwnd;		/* congestion window */
    202
    203	size_t			max_payload;	/* largest RPC payload size,
    204						   in bytes */
    205
    206	struct rpc_wait_queue	binding;	/* requests waiting on rpcbind */
    207	struct rpc_wait_queue	sending;	/* requests waiting to send */
    208	struct rpc_wait_queue	pending;	/* requests in flight */
    209	struct rpc_wait_queue	backlog;	/* waiting for slot */
    210	struct list_head	free;		/* free slots */
    211	unsigned int		max_reqs;	/* max number of slots */
    212	unsigned int		min_reqs;	/* min number of slots */
    213	unsigned int		num_reqs;	/* total slots */
    214	unsigned long		state;		/* transport state */
    215	unsigned char		resvport   : 1,	/* use a reserved port */
    216				reuseport  : 1; /* reuse port on reconnect */
    217	atomic_t		swapper;	/* we're swapping over this
    218						   transport */
    219	unsigned int		bind_index;	/* bind function index */
    220
    221	/*
    222	 * Multipath
    223	 */
    224	struct list_head	xprt_switch;
    225
    226	/*
    227	 * Connection of transports
    228	 */
    229	unsigned long		bind_timeout,
    230				reestablish_timeout;
    231	unsigned int		connect_cookie;	/* A cookie that gets bumped
    232						   every time the transport
    233						   is reconnected */
    234
    235	/*
    236	 * Disconnection of idle transports
    237	 */
    238	struct work_struct	task_cleanup;
    239	struct timer_list	timer;
    240	unsigned long		last_used,
    241				idle_timeout,
    242				connect_timeout,
    243				max_reconnect_timeout;
    244
    245	/*
    246	 * Send stuff
    247	 */
    248	atomic_long_t		queuelen;
    249	spinlock_t		transport_lock;	/* lock transport info */
    250	spinlock_t		reserve_lock;	/* lock slot table */
    251	spinlock_t		queue_lock;	/* send/receive queue lock */
    252	u32			xid;		/* Next XID value to use */
    253	struct rpc_task *	snd_task;	/* Task blocked in send */
    254
    255	struct list_head	xmit_queue;	/* Send queue */
    256	atomic_long_t		xmit_queuelen;
    257
    258	struct svc_xprt		*bc_xprt;	/* NFSv4.1 backchannel */
    259#if defined(CONFIG_SUNRPC_BACKCHANNEL)
    260	struct svc_serv		*bc_serv;       /* The RPC service which will */
    261						/* process the callback */
    262	unsigned int		bc_alloc_max;
    263	unsigned int		bc_alloc_count;	/* Total number of preallocs */
    264	atomic_t		bc_slot_count;	/* Number of allocated slots */
    265	spinlock_t		bc_pa_lock;	/* Protects the preallocated
    266						 * items */
    267	struct list_head	bc_pa_list;	/* List of preallocated
    268						 * backchannel rpc_rqst's */
    269#endif /* CONFIG_SUNRPC_BACKCHANNEL */
    270
    271	struct rb_root		recv_queue;	/* Receive queue */
    272
    273	struct {
    274		unsigned long		bind_count,	/* total number of binds */
    275					connect_count,	/* total number of connects */
    276					connect_start,	/* connect start timestamp */
    277					connect_time,	/* jiffies waiting for connect */
    278					sends,		/* how many complete requests */
    279					recvs,		/* how many complete requests */
    280					bad_xids,	/* lookup_rqst didn't find XID */
    281					max_slots;	/* max rpc_slots used */
    282
    283		unsigned long long	req_u,		/* average requests on the wire */
    284					bklog_u,	/* backlog queue utilization */
    285					sending_u,	/* send q utilization */
    286					pending_u;	/* pend q utilization */
    287	} stat;
    288
    289	struct net		*xprt_net;
    290	netns_tracker		ns_tracker;
    291	const char		*servername;
    292	const char		*address_strings[RPC_DISPLAY_MAX];
    293#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
    294	struct dentry		*debugfs;		/* debugfs directory */
    295#endif
    296	struct rcu_head		rcu;
    297	const struct xprt_class	*xprt_class;
    298	struct rpc_sysfs_xprt	*xprt_sysfs;
    299	bool			main; /*mark if this is the 1st transport */
    300};
    301
    302#if defined(CONFIG_SUNRPC_BACKCHANNEL)
    303/*
    304 * Backchannel flags
    305 */
    306#define	RPC_BC_PA_IN_USE	0x0001		/* Preallocated backchannel */
    307						/* buffer in use */
    308#endif /* CONFIG_SUNRPC_BACKCHANNEL */
    309
    310#if defined(CONFIG_SUNRPC_BACKCHANNEL)
    311static inline int bc_prealloc(struct rpc_rqst *req)
    312{
    313	return test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
    314}
    315#else
    316static inline int bc_prealloc(struct rpc_rqst *req)
    317{
    318	return 0;
    319}
    320#endif /* CONFIG_SUNRPC_BACKCHANNEL */
    321
    322#define XPRT_CREATE_INFINITE_SLOTS	(1U)
    323#define XPRT_CREATE_NO_IDLE_TIMEOUT	(1U << 1)
    324
    325struct xprt_create {
    326	int			ident;		/* XPRT_TRANSPORT identifier */
    327	struct net *		net;
    328	struct sockaddr *	srcaddr;	/* optional local address */
    329	struct sockaddr *	dstaddr;	/* remote peer address */
    330	size_t			addrlen;
    331	const char		*servername;
    332	struct svc_xprt		*bc_xprt;	/* NFSv4.1 backchannel */
    333	struct rpc_xprt_switch	*bc_xps;
    334	unsigned int		flags;
    335};
    336
    337struct xprt_class {
    338	struct list_head	list;
    339	int			ident;		/* XPRT_TRANSPORT identifier */
    340	struct rpc_xprt *	(*setup)(struct xprt_create *);
    341	struct module		*owner;
    342	char			name[32];
    343	const char *		netid[];
    344};
    345
    346/*
    347 * Generic internal transport functions
    348 */
    349struct rpc_xprt		*xprt_create_transport(struct xprt_create *args);
    350void			xprt_connect(struct rpc_task *task);
    351unsigned long		xprt_reconnect_delay(const struct rpc_xprt *xprt);
    352void			xprt_reconnect_backoff(struct rpc_xprt *xprt,
    353					       unsigned long init_to);
    354void			xprt_reserve(struct rpc_task *task);
    355void			xprt_retry_reserve(struct rpc_task *task);
    356int			xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
    357int			xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
    358void			xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
    359void			xprt_free_slot(struct rpc_xprt *xprt,
    360				       struct rpc_rqst *req);
    361bool			xprt_prepare_transmit(struct rpc_task *task);
    362void			xprt_request_enqueue_transmit(struct rpc_task *task);
    363int			xprt_request_enqueue_receive(struct rpc_task *task);
    364void			xprt_request_wait_receive(struct rpc_task *task);
    365void			xprt_request_dequeue_xprt(struct rpc_task *task);
    366bool			xprt_request_need_retransmit(struct rpc_task *task);
    367void			xprt_transmit(struct rpc_task *task);
    368void			xprt_end_transmit(struct rpc_task *task);
    369int			xprt_adjust_timeout(struct rpc_rqst *req);
    370void			xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
    371void			xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
    372void			xprt_release(struct rpc_task *task);
    373struct rpc_xprt *	xprt_get(struct rpc_xprt *xprt);
    374void			xprt_put(struct rpc_xprt *xprt);
    375struct rpc_xprt *	xprt_alloc(struct net *net, size_t size,
    376				unsigned int num_prealloc,
    377				unsigned int max_req);
    378void			xprt_free(struct rpc_xprt *);
    379void			xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task);
    380bool			xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req);
    381void			xprt_cleanup_ids(void);
    382
    383static inline int
    384xprt_enable_swap(struct rpc_xprt *xprt)
    385{
    386	return xprt->ops->enable_swap(xprt);
    387}
    388
    389static inline void
    390xprt_disable_swap(struct rpc_xprt *xprt)
    391{
    392	xprt->ops->disable_swap(xprt);
    393}
    394
    395/*
    396 * Transport switch helper functions
    397 */
    398int			xprt_register_transport(struct xprt_class *type);
    399int			xprt_unregister_transport(struct xprt_class *type);
    400int			xprt_find_transport_ident(const char *);
    401void			xprt_wait_for_reply_request_def(struct rpc_task *task);
    402void			xprt_wait_for_reply_request_rtt(struct rpc_task *task);
    403void			xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status);
    404void			xprt_wait_for_buffer_space(struct rpc_xprt *xprt);
    405bool			xprt_write_space(struct rpc_xprt *xprt);
    406void			xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result);
    407struct rpc_rqst *	xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid);
    408void			xprt_update_rtt(struct rpc_task *task);
    409void			xprt_complete_rqst(struct rpc_task *task, int copied);
    410void			xprt_pin_rqst(struct rpc_rqst *req);
    411void			xprt_unpin_rqst(struct rpc_rqst *req);
    412void			xprt_release_rqst_cong(struct rpc_task *task);
    413bool			xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req);
    414void			xprt_disconnect_done(struct rpc_xprt *xprt);
    415void			xprt_force_disconnect(struct rpc_xprt *xprt);
    416void			xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie);
    417
    418bool			xprt_lock_connect(struct rpc_xprt *, struct rpc_task *, void *);
    419void			xprt_unlock_connect(struct rpc_xprt *, void *);
    420void			xprt_release_write(struct rpc_xprt *, struct rpc_task *);
    421
    422/*
    423 * Reserved bit positions in xprt->state
    424 */
    425#define XPRT_LOCKED		(0)
    426#define XPRT_CONNECTED		(1)
    427#define XPRT_CONNECTING		(2)
    428#define XPRT_CLOSE_WAIT		(3)
    429#define XPRT_BOUND		(4)
    430#define XPRT_BINDING		(5)
    431#define XPRT_CLOSING		(6)
    432#define XPRT_OFFLINE		(7)
    433#define XPRT_REMOVE		(8)
    434#define XPRT_CONGESTED		(9)
    435#define XPRT_CWND_WAIT		(10)
    436#define XPRT_WRITE_SPACE	(11)
    437#define XPRT_SND_IS_COOKIE	(12)
    438
    439static inline void xprt_set_connected(struct rpc_xprt *xprt)
    440{
    441	set_bit(XPRT_CONNECTED, &xprt->state);
    442}
    443
    444static inline void xprt_clear_connected(struct rpc_xprt *xprt)
    445{
    446	clear_bit(XPRT_CONNECTED, &xprt->state);
    447}
    448
    449static inline int xprt_connected(struct rpc_xprt *xprt)
    450{
    451	return test_bit(XPRT_CONNECTED, &xprt->state);
    452}
    453
    454static inline int xprt_test_and_set_connected(struct rpc_xprt *xprt)
    455{
    456	return test_and_set_bit(XPRT_CONNECTED, &xprt->state);
    457}
    458
    459static inline int xprt_test_and_clear_connected(struct rpc_xprt *xprt)
    460{
    461	return test_and_clear_bit(XPRT_CONNECTED, &xprt->state);
    462}
    463
    464static inline void xprt_clear_connecting(struct rpc_xprt *xprt)
    465{
    466	smp_mb__before_atomic();
    467	clear_bit(XPRT_CONNECTING, &xprt->state);
    468	smp_mb__after_atomic();
    469}
    470
    471static inline int xprt_connecting(struct rpc_xprt *xprt)
    472{
    473	return test_bit(XPRT_CONNECTING, &xprt->state);
    474}
    475
    476static inline int xprt_test_and_set_connecting(struct rpc_xprt *xprt)
    477{
    478	return test_and_set_bit(XPRT_CONNECTING, &xprt->state);
    479}
    480
    481static inline void xprt_set_bound(struct rpc_xprt *xprt)
    482{
    483	test_and_set_bit(XPRT_BOUND, &xprt->state);
    484}
    485
    486static inline int xprt_bound(struct rpc_xprt *xprt)
    487{
    488	return test_bit(XPRT_BOUND, &xprt->state);
    489}
    490
    491static inline void xprt_clear_bound(struct rpc_xprt *xprt)
    492{
    493	clear_bit(XPRT_BOUND, &xprt->state);
    494}
    495
    496static inline void xprt_clear_binding(struct rpc_xprt *xprt)
    497{
    498	smp_mb__before_atomic();
    499	clear_bit(XPRT_BINDING, &xprt->state);
    500	smp_mb__after_atomic();
    501}
    502
    503static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt)
    504{
    505	return test_and_set_bit(XPRT_BINDING, &xprt->state);
    506}
    507
    508#endif /* _LINUX_SUNRPC_XPRT_H */