mana.h (12233B)
1/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 2/* Copyright (c) 2021, Microsoft Corporation. */ 3 4#ifndef _MANA_H 5#define _MANA_H 6 7#include "gdma.h" 8#include "hw_channel.h" 9 10/* Microsoft Azure Network Adapter (MANA)'s definitions 11 * 12 * Structures labeled with "HW DATA" are exchanged with the hardware. All of 13 * them are naturally aligned and hence don't need __packed. 14 */ 15 16/* MANA protocol version */ 17#define MANA_MAJOR_VERSION 0 18#define MANA_MINOR_VERSION 1 19#define MANA_MICRO_VERSION 1 20 21typedef u64 mana_handle_t; 22#define INVALID_MANA_HANDLE ((mana_handle_t)-1) 23 24enum TRI_STATE { 25 TRI_STATE_UNKNOWN = -1, 26 TRI_STATE_FALSE = 0, 27 TRI_STATE_TRUE = 1 28}; 29 30/* Number of entries for hardware indirection table must be in power of 2 */ 31#define MANA_INDIRECT_TABLE_SIZE 64 32#define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1) 33 34/* The Toeplitz hash key's length in bytes: should be multiple of 8 */ 35#define MANA_HASH_KEY_SIZE 40 36 37#define COMP_ENTRY_SIZE 64 38 39#define ADAPTER_MTU_SIZE 1500 40#define MAX_FRAME_SIZE (ADAPTER_MTU_SIZE + 14) 41 42#define RX_BUFFERS_PER_QUEUE 512 43 44#define MAX_SEND_BUFFERS_PER_QUEUE 256 45 46#define EQ_SIZE (8 * PAGE_SIZE) 47#define LOG2_EQ_THROTTLE 3 48 49#define MAX_PORTS_IN_MANA_DEV 256 50 51struct mana_stats_rx { 52 u64 packets; 53 u64 bytes; 54 u64 xdp_drop; 55 u64 xdp_tx; 56 struct u64_stats_sync syncp; 57}; 58 59struct mana_stats_tx { 60 u64 packets; 61 u64 bytes; 62 struct u64_stats_sync syncp; 63}; 64 65struct mana_txq { 66 struct gdma_queue *gdma_sq; 67 68 union { 69 u32 gdma_txq_id; 70 struct { 71 u32 reserved1 : 10; 72 u32 vsq_frame : 14; 73 u32 reserved2 : 8; 74 }; 75 }; 76 77 u16 vp_offset; 78 79 struct net_device *ndev; 80 81 /* The SKBs are sent to the HW and we are waiting for the CQEs. */ 82 struct sk_buff_head pending_skbs; 83 struct netdev_queue *net_txq; 84 85 atomic_t pending_sends; 86 87 struct mana_stats_tx stats; 88}; 89 90/* skb data and frags dma mappings */ 91struct mana_skb_head { 92 dma_addr_t dma_handle[MAX_SKB_FRAGS + 1]; 93 94 u32 size[MAX_SKB_FRAGS + 1]; 95}; 96 97#define MANA_HEADROOM sizeof(struct mana_skb_head) 98 99enum mana_tx_pkt_format { 100 MANA_SHORT_PKT_FMT = 0, 101 MANA_LONG_PKT_FMT = 1, 102}; 103 104struct mana_tx_short_oob { 105 u32 pkt_fmt : 2; 106 u32 is_outer_ipv4 : 1; 107 u32 is_outer_ipv6 : 1; 108 u32 comp_iphdr_csum : 1; 109 u32 comp_tcp_csum : 1; 110 u32 comp_udp_csum : 1; 111 u32 supress_txcqe_gen : 1; 112 u32 vcq_num : 24; 113 114 u32 trans_off : 10; /* Transport header offset */ 115 u32 vsq_frame : 14; 116 u32 short_vp_offset : 8; 117}; /* HW DATA */ 118 119struct mana_tx_long_oob { 120 u32 is_encap : 1; 121 u32 inner_is_ipv6 : 1; 122 u32 inner_tcp_opt : 1; 123 u32 inject_vlan_pri_tag : 1; 124 u32 reserved1 : 12; 125 u32 pcp : 3; /* 802.1Q */ 126 u32 dei : 1; /* 802.1Q */ 127 u32 vlan_id : 12; /* 802.1Q */ 128 129 u32 inner_frame_offset : 10; 130 u32 inner_ip_rel_offset : 6; 131 u32 long_vp_offset : 12; 132 u32 reserved2 : 4; 133 134 u32 reserved3; 135 u32 reserved4; 136}; /* HW DATA */ 137 138struct mana_tx_oob { 139 struct mana_tx_short_oob s_oob; 140 struct mana_tx_long_oob l_oob; 141}; /* HW DATA */ 142 143enum mana_cq_type { 144 MANA_CQ_TYPE_RX, 145 MANA_CQ_TYPE_TX, 146}; 147 148enum mana_cqe_type { 149 CQE_INVALID = 0, 150 CQE_RX_OKAY = 1, 151 CQE_RX_COALESCED_4 = 2, 152 CQE_RX_OBJECT_FENCE = 3, 153 CQE_RX_TRUNCATED = 4, 154 155 CQE_TX_OKAY = 32, 156 CQE_TX_SA_DROP = 33, 157 CQE_TX_MTU_DROP = 34, 158 CQE_TX_INVALID_OOB = 35, 159 CQE_TX_INVALID_ETH_TYPE = 36, 160 CQE_TX_HDR_PROCESSING_ERROR = 37, 161 CQE_TX_VF_DISABLED = 38, 162 CQE_TX_VPORT_IDX_OUT_OF_RANGE = 39, 163 CQE_TX_VPORT_DISABLED = 40, 164 CQE_TX_VLAN_TAGGING_VIOLATION = 41, 165}; 166 167#define MANA_CQE_COMPLETION 1 168 169struct mana_cqe_header { 170 u32 cqe_type : 6; 171 u32 client_type : 2; 172 u32 vendor_err : 24; 173}; /* HW DATA */ 174 175/* NDIS HASH Types */ 176#define NDIS_HASH_IPV4 BIT(0) 177#define NDIS_HASH_TCP_IPV4 BIT(1) 178#define NDIS_HASH_UDP_IPV4 BIT(2) 179#define NDIS_HASH_IPV6 BIT(3) 180#define NDIS_HASH_TCP_IPV6 BIT(4) 181#define NDIS_HASH_UDP_IPV6 BIT(5) 182#define NDIS_HASH_IPV6_EX BIT(6) 183#define NDIS_HASH_TCP_IPV6_EX BIT(7) 184#define NDIS_HASH_UDP_IPV6_EX BIT(8) 185 186#define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX) 187#define MANA_HASH_L4 \ 188 (NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 | \ 189 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX) 190 191struct mana_rxcomp_perpkt_info { 192 u32 pkt_len : 16; 193 u32 reserved1 : 16; 194 u32 reserved2; 195 u32 pkt_hash; 196}; /* HW DATA */ 197 198#define MANA_RXCOMP_OOB_NUM_PPI 4 199 200/* Receive completion OOB */ 201struct mana_rxcomp_oob { 202 struct mana_cqe_header cqe_hdr; 203 204 u32 rx_vlan_id : 12; 205 u32 rx_vlantag_present : 1; 206 u32 rx_outer_iphdr_csum_succeed : 1; 207 u32 rx_outer_iphdr_csum_fail : 1; 208 u32 reserved1 : 1; 209 u32 rx_hashtype : 9; 210 u32 rx_iphdr_csum_succeed : 1; 211 u32 rx_iphdr_csum_fail : 1; 212 u32 rx_tcp_csum_succeed : 1; 213 u32 rx_tcp_csum_fail : 1; 214 u32 rx_udp_csum_succeed : 1; 215 u32 rx_udp_csum_fail : 1; 216 u32 reserved2 : 1; 217 218 struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI]; 219 220 u32 rx_wqe_offset; 221}; /* HW DATA */ 222 223struct mana_tx_comp_oob { 224 struct mana_cqe_header cqe_hdr; 225 226 u32 tx_data_offset; 227 228 u32 tx_sgl_offset : 5; 229 u32 tx_wqe_offset : 27; 230 231 u32 reserved[12]; 232}; /* HW DATA */ 233 234struct mana_rxq; 235 236#define CQE_POLLING_BUFFER 512 237 238struct mana_cq { 239 struct gdma_queue *gdma_cq; 240 241 /* Cache the CQ id (used to verify if each CQE comes to the right CQ. */ 242 u32 gdma_id; 243 244 /* Type of the CQ: TX or RX */ 245 enum mana_cq_type type; 246 247 /* Pointer to the mana_rxq that is pushing RX CQEs to the queue. 248 * Only and must be non-NULL if type is MANA_CQ_TYPE_RX. 249 */ 250 struct mana_rxq *rxq; 251 252 /* Pointer to the mana_txq that is pushing TX CQEs to the queue. 253 * Only and must be non-NULL if type is MANA_CQ_TYPE_TX. 254 */ 255 struct mana_txq *txq; 256 257 /* Buffer which the CQ handler can copy the CQE's into. */ 258 struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER]; 259 260 /* NAPI data */ 261 struct napi_struct napi; 262 int work_done; 263 int budget; 264}; 265 266#define GDMA_MAX_RQE_SGES 15 267 268struct mana_recv_buf_oob { 269 /* A valid GDMA work request representing the data buffer. */ 270 struct gdma_wqe_request wqe_req; 271 272 void *buf_va; 273 dma_addr_t buf_dma_addr; 274 275 /* SGL of the buffer going to be sent has part of the work request. */ 276 u32 num_sge; 277 struct gdma_sge sgl[GDMA_MAX_RQE_SGES]; 278 279 /* Required to store the result of mana_gd_post_work_request. 280 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the 281 * work queue when the WQE is consumed. 282 */ 283 struct gdma_posted_wqe_info wqe_inf; 284}; 285 286struct mana_rxq { 287 struct gdma_queue *gdma_rq; 288 /* Cache the gdma receive queue id */ 289 u32 gdma_id; 290 291 /* Index of RQ in the vPort, not gdma receive queue id */ 292 u32 rxq_idx; 293 294 u32 datasize; 295 296 mana_handle_t rxobj; 297 298 struct mana_cq rx_cq; 299 300 struct completion fence_event; 301 302 struct net_device *ndev; 303 304 /* Total number of receive buffers to be allocated */ 305 u32 num_rx_buf; 306 307 u32 buf_index; 308 309 struct mana_stats_rx stats; 310 311 struct bpf_prog __rcu *bpf_prog; 312 struct xdp_rxq_info xdp_rxq; 313 struct page *xdp_save_page; 314 315 /* MUST BE THE LAST MEMBER: 316 * Each receive buffer has an associated mana_recv_buf_oob. 317 */ 318 struct mana_recv_buf_oob rx_oobs[]; 319}; 320 321struct mana_tx_qp { 322 struct mana_txq txq; 323 324 struct mana_cq tx_cq; 325 326 mana_handle_t tx_object; 327}; 328 329struct mana_ethtool_stats { 330 u64 stop_queue; 331 u64 wake_queue; 332}; 333 334struct mana_context { 335 struct gdma_dev *gdma_dev; 336 337 u16 num_ports; 338 339 struct mana_eq *eqs; 340 341 struct net_device *ports[MAX_PORTS_IN_MANA_DEV]; 342}; 343 344struct mana_port_context { 345 struct mana_context *ac; 346 struct net_device *ndev; 347 348 u8 mac_addr[ETH_ALEN]; 349 350 enum TRI_STATE rss_state; 351 352 mana_handle_t default_rxobj; 353 bool tx_shortform_allowed; 354 u16 tx_vp_offset; 355 356 struct mana_tx_qp *tx_qp; 357 358 /* Indirection Table for RX & TX. The values are queue indexes */ 359 u32 indir_table[MANA_INDIRECT_TABLE_SIZE]; 360 361 /* Indirection table containing RxObject Handles */ 362 mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE]; 363 364 /* Hash key used by the NIC */ 365 u8 hashkey[MANA_HASH_KEY_SIZE]; 366 367 /* This points to an array of num_queues of RQ pointers. */ 368 struct mana_rxq **rxqs; 369 370 struct bpf_prog *bpf_prog; 371 372 /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */ 373 unsigned int max_queues; 374 unsigned int num_queues; 375 376 mana_handle_t port_handle; 377 378 u16 port_idx; 379 380 bool port_is_up; 381 bool port_st_save; /* Saved port state */ 382 383 struct mana_ethtool_stats eth_stats; 384}; 385 386int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev); 387int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx, 388 bool update_hash, bool update_tab); 389 390int mana_alloc_queues(struct net_device *ndev); 391int mana_attach(struct net_device *ndev); 392int mana_detach(struct net_device *ndev, bool from_close); 393 394int mana_probe(struct gdma_dev *gd, bool resuming); 395void mana_remove(struct gdma_dev *gd, bool suspending); 396 397void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev); 398u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq, 399 struct xdp_buff *xdp, void *buf_va, uint pkt_len); 400struct bpf_prog *mana_xdp_get(struct mana_port_context *apc); 401void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog); 402int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf); 403 404extern const struct ethtool_ops mana_ethtool_ops; 405 406struct mana_obj_spec { 407 u32 queue_index; 408 u64 gdma_region; 409 u32 queue_size; 410 u32 attached_eq; 411 u32 modr_ctx_id; 412}; 413 414enum mana_command_code { 415 MANA_QUERY_DEV_CONFIG = 0x20001, 416 MANA_QUERY_GF_STAT = 0x20002, 417 MANA_CONFIG_VPORT_TX = 0x20003, 418 MANA_CREATE_WQ_OBJ = 0x20004, 419 MANA_DESTROY_WQ_OBJ = 0x20005, 420 MANA_FENCE_RQ = 0x20006, 421 MANA_CONFIG_VPORT_RX = 0x20007, 422 MANA_QUERY_VPORT_CONFIG = 0x20008, 423}; 424 425/* Query Device Configuration */ 426struct mana_query_device_cfg_req { 427 struct gdma_req_hdr hdr; 428 429 /* MANA Nic Driver Capability flags */ 430 u64 mn_drv_cap_flags1; 431 u64 mn_drv_cap_flags2; 432 u64 mn_drv_cap_flags3; 433 u64 mn_drv_cap_flags4; 434 435 u32 proto_major_ver; 436 u32 proto_minor_ver; 437 u32 proto_micro_ver; 438 439 u32 reserved; 440}; /* HW DATA */ 441 442struct mana_query_device_cfg_resp { 443 struct gdma_resp_hdr hdr; 444 445 u64 pf_cap_flags1; 446 u64 pf_cap_flags2; 447 u64 pf_cap_flags3; 448 u64 pf_cap_flags4; 449 450 u16 max_num_vports; 451 u16 reserved; 452 u32 max_num_eqs; 453}; /* HW DATA */ 454 455/* Query vPort Configuration */ 456struct mana_query_vport_cfg_req { 457 struct gdma_req_hdr hdr; 458 u32 vport_index; 459}; /* HW DATA */ 460 461struct mana_query_vport_cfg_resp { 462 struct gdma_resp_hdr hdr; 463 u32 max_num_sq; 464 u32 max_num_rq; 465 u32 num_indirection_ent; 466 u32 reserved1; 467 u8 mac_addr[6]; 468 u8 reserved2[2]; 469 mana_handle_t vport; 470}; /* HW DATA */ 471 472/* Configure vPort */ 473struct mana_config_vport_req { 474 struct gdma_req_hdr hdr; 475 mana_handle_t vport; 476 u32 pdid; 477 u32 doorbell_pageid; 478}; /* HW DATA */ 479 480struct mana_config_vport_resp { 481 struct gdma_resp_hdr hdr; 482 u16 tx_vport_offset; 483 u8 short_form_allowed; 484 u8 reserved; 485}; /* HW DATA */ 486 487/* Create WQ Object */ 488struct mana_create_wqobj_req { 489 struct gdma_req_hdr hdr; 490 mana_handle_t vport; 491 u32 wq_type; 492 u32 reserved; 493 u64 wq_gdma_region; 494 u64 cq_gdma_region; 495 u32 wq_size; 496 u32 cq_size; 497 u32 cq_moderation_ctx_id; 498 u32 cq_parent_qid; 499}; /* HW DATA */ 500 501struct mana_create_wqobj_resp { 502 struct gdma_resp_hdr hdr; 503 u32 wq_id; 504 u32 cq_id; 505 mana_handle_t wq_obj; 506}; /* HW DATA */ 507 508/* Destroy WQ Object */ 509struct mana_destroy_wqobj_req { 510 struct gdma_req_hdr hdr; 511 u32 wq_type; 512 u32 reserved; 513 mana_handle_t wq_obj_handle; 514}; /* HW DATA */ 515 516struct mana_destroy_wqobj_resp { 517 struct gdma_resp_hdr hdr; 518}; /* HW DATA */ 519 520/* Fence RQ */ 521struct mana_fence_rq_req { 522 struct gdma_req_hdr hdr; 523 mana_handle_t wq_obj_handle; 524}; /* HW DATA */ 525 526struct mana_fence_rq_resp { 527 struct gdma_resp_hdr hdr; 528}; /* HW DATA */ 529 530/* Configure vPort Rx Steering */ 531struct mana_cfg_rx_steer_req { 532 struct gdma_req_hdr hdr; 533 mana_handle_t vport; 534 u16 num_indir_entries; 535 u16 indir_tab_offset; 536 u32 rx_enable; 537 u32 rss_enable; 538 u8 update_default_rxobj; 539 u8 update_hashkey; 540 u8 update_indir_tab; 541 u8 reserved; 542 mana_handle_t default_rxobj; 543 u8 hashkey[MANA_HASH_KEY_SIZE]; 544}; /* HW DATA */ 545 546struct mana_cfg_rx_steer_resp { 547 struct gdma_resp_hdr hdr; 548}; /* HW DATA */ 549 550#define MANA_MAX_NUM_QUEUES 64 551 552#define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1) 553 554struct mana_tx_package { 555 struct gdma_wqe_request wqe_req; 556 struct gdma_sge sgl_array[5]; 557 struct gdma_sge *sgl_ptr; 558 559 struct mana_tx_oob tx_oob; 560 561 struct gdma_posted_wqe_info wqe_info; 562}; 563 564#endif /* _MANA_H */