cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

wr.h (4063B)


      1/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
      2/*
      3 * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
      4 */
      5
      6#ifndef _MLX5_IB_WR_H
      7#define _MLX5_IB_WR_H
      8
      9#include "mlx5_ib.h"
     10
     11enum {
     12	MLX5_IB_SQ_UMR_INLINE_THRESHOLD = 64,
     13};
     14
     15struct mlx5_wqe_eth_pad {
     16	u8 rsvd0[16];
     17};
     18
     19
     20/* get_sq_edge - Get the next nearby edge.
     21 *
     22 * An 'edge' is defined as the first following address after the end
     23 * of the fragment or the SQ. Accordingly, during the WQE construction
     24 * which repetitively increases the pointer to write the next data, it
     25 * simply should check if it gets to an edge.
     26 *
     27 * @sq - SQ buffer.
     28 * @idx - Stride index in the SQ buffer.
     29 *
     30 * Return:
     31 *	The new edge.
     32 */
     33static inline void *get_sq_edge(struct mlx5_ib_wq *sq, u32 idx)
     34{
     35	void *fragment_end;
     36
     37	fragment_end = mlx5_frag_buf_get_wqe
     38		(&sq->fbc,
     39		 mlx5_frag_buf_get_idx_last_contig_stride(&sq->fbc, idx));
     40
     41	return fragment_end + MLX5_SEND_WQE_BB;
     42}
     43
     44/* handle_post_send_edge - Check if we get to SQ edge. If yes, update to the
     45 * next nearby edge and get new address translation for current WQE position.
     46 * @sq: SQ buffer.
     47 * @seg: Current WQE position (16B aligned).
     48 * @wqe_sz: Total current WQE size [16B].
     49 * @cur_edge: Updated current edge.
     50 */
     51static inline void handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg,
     52					 u32 wqe_sz, void **cur_edge)
     53{
     54	u32 idx;
     55
     56	if (likely(*seg != *cur_edge))
     57		return;
     58
     59	idx = (sq->cur_post + (wqe_sz >> 2)) & (sq->wqe_cnt - 1);
     60	*cur_edge = get_sq_edge(sq, idx);
     61
     62	*seg = mlx5_frag_buf_get_wqe(&sq->fbc, idx);
     63}
     64
     65/* mlx5r_memcpy_send_wqe - copy data from src to WQE and update the relevant
     66 * WQ's pointers. At the end @seg is aligned to 16B regardless the copied size.
     67 * @sq: SQ buffer.
     68 * @cur_edge: Updated current edge.
     69 * @seg: Current WQE position (16B aligned).
     70 * @wqe_sz: Total current WQE size [16B].
     71 * @src: Pointer to copy from.
     72 * @n: Number of bytes to copy.
     73 */
     74static inline void mlx5r_memcpy_send_wqe(struct mlx5_ib_wq *sq, void **cur_edge,
     75					 void **seg, u32 *wqe_sz,
     76					 const void *src, size_t n)
     77{
     78	while (likely(n)) {
     79		size_t leftlen = *cur_edge - *seg;
     80		size_t copysz = min_t(size_t, leftlen, n);
     81		size_t stride;
     82
     83		memcpy(*seg, src, copysz);
     84
     85		n -= copysz;
     86		src += copysz;
     87		stride = !n ? ALIGN(copysz, 16) : copysz;
     88		*seg += stride;
     89		*wqe_sz += stride >> 4;
     90		handle_post_send_edge(sq, seg, *wqe_sz, cur_edge);
     91	}
     92}
     93
     94int mlx5r_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq);
     95int mlx5r_begin_wqe(struct mlx5_ib_qp *qp, void **seg,
     96		    struct mlx5_wqe_ctrl_seg **ctrl, unsigned int *idx,
     97		    int *size, void **cur_edge, int nreq, __be32 general_id,
     98		    bool send_signaled, bool solicited);
     99void mlx5r_finish_wqe(struct mlx5_ib_qp *qp, struct mlx5_wqe_ctrl_seg *ctrl,
    100		      void *seg, u8 size, void *cur_edge, unsigned int idx,
    101		      u64 wr_id, int nreq, u8 fence, u32 mlx5_opcode);
    102void mlx5r_ring_db(struct mlx5_ib_qp *qp, unsigned int nreq,
    103		   struct mlx5_wqe_ctrl_seg *ctrl);
    104int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
    105		      const struct ib_send_wr **bad_wr, bool drain);
    106int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
    107		      const struct ib_recv_wr **bad_wr, bool drain);
    108
    109static inline int mlx5_ib_post_send_nodrain(struct ib_qp *ibqp,
    110					    const struct ib_send_wr *wr,
    111					    const struct ib_send_wr **bad_wr)
    112{
    113	return mlx5_ib_post_send(ibqp, wr, bad_wr, false);
    114}
    115
    116static inline int mlx5_ib_post_send_drain(struct ib_qp *ibqp,
    117					  const struct ib_send_wr *wr,
    118					  const struct ib_send_wr **bad_wr)
    119{
    120	return mlx5_ib_post_send(ibqp, wr, bad_wr, true);
    121}
    122
    123static inline int mlx5_ib_post_recv_nodrain(struct ib_qp *ibqp,
    124					    const struct ib_recv_wr *wr,
    125					    const struct ib_recv_wr **bad_wr)
    126{
    127	return mlx5_ib_post_recv(ibqp, wr, bad_wr, false);
    128}
    129
    130static inline int mlx5_ib_post_recv_drain(struct ib_qp *ibqp,
    131					  const struct ib_recv_wr *wr,
    132					  const struct ib_recv_wr **bad_wr)
    133{
    134	return mlx5_ib_post_recv(ibqp, wr, bad_wr, true);
    135}
    136#endif /* _MLX5_IB_WR_H */