ena_com.h (31589B)
1/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2/* 3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved. 4 */ 5 6#ifndef ENA_COM 7#define ENA_COM 8 9#include <linux/compiler.h> 10#include <linux/delay.h> 11#include <linux/dma-mapping.h> 12#include <linux/gfp.h> 13#include <linux/io.h> 14#include <linux/prefetch.h> 15#include <linux/sched.h> 16#include <linux/sizes.h> 17#include <linux/spinlock.h> 18#include <linux/types.h> 19#include <linux/wait.h> 20#include <linux/netdevice.h> 21 22#include "ena_common_defs.h" 23#include "ena_admin_defs.h" 24#include "ena_eth_io_defs.h" 25#include "ena_regs_defs.h" 26 27#undef pr_fmt 28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 29 30#define ENA_MAX_NUM_IO_QUEUES 128U 31/* We need to queues for each IO (on for Tx and one for Rx) */ 32#define ENA_TOTAL_NUM_QUEUES (2 * (ENA_MAX_NUM_IO_QUEUES)) 33 34#define ENA_MAX_HANDLERS 256 35 36#define ENA_MAX_PHYS_ADDR_SIZE_BITS 48 37 38/* Unit in usec */ 39#define ENA_REG_READ_TIMEOUT 200000 40 41#define ADMIN_SQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aq_entry)) 42#define ADMIN_CQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_acq_entry)) 43#define ADMIN_AENQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aenq_entry)) 44 45/*****************************************************************************/ 46/*****************************************************************************/ 47/* ENA adaptive interrupt moderation settings */ 48 49#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 64 50#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 0 51#define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1 52 53#define ENA_HASH_KEY_SIZE 40 54 55#define ENA_HW_HINTS_NO_TIMEOUT 0xFFFF 56 57#define ENA_FEATURE_MAX_QUEUE_EXT_VER 1 58 59struct ena_llq_configurations { 60 enum ena_admin_llq_header_location llq_header_location; 61 enum ena_admin_llq_ring_entry_size llq_ring_entry_size; 62 enum ena_admin_llq_stride_ctrl llq_stride_ctrl; 63 enum ena_admin_llq_num_descs_before_header llq_num_decs_before_header; 64 u16 llq_ring_entry_size_value; 65}; 66 67enum queue_direction { 68 ENA_COM_IO_QUEUE_DIRECTION_TX, 69 ENA_COM_IO_QUEUE_DIRECTION_RX 70}; 71 72struct ena_com_buf { 73 dma_addr_t paddr; /**< Buffer physical address */ 74 u16 len; /**< Buffer length in bytes */ 75}; 76 77struct ena_com_rx_buf_info { 78 u16 len; 79 u16 req_id; 80}; 81 82struct ena_com_io_desc_addr { 83 u8 __iomem *pbuf_dev_addr; /* LLQ address */ 84 u8 *virt_addr; 85 dma_addr_t phys_addr; 86}; 87 88struct ena_com_tx_meta { 89 u16 mss; 90 u16 l3_hdr_len; 91 u16 l3_hdr_offset; 92 u16 l4_hdr_len; /* In words */ 93}; 94 95struct ena_com_llq_info { 96 u16 header_location_ctrl; 97 u16 desc_stride_ctrl; 98 u16 desc_list_entry_size_ctrl; 99 u16 desc_list_entry_size; 100 u16 descs_num_before_header; 101 u16 descs_per_entry; 102 u16 max_entries_in_tx_burst; 103 bool disable_meta_caching; 104}; 105 106struct ena_com_io_cq { 107 struct ena_com_io_desc_addr cdesc_addr; 108 109 /* Interrupt unmask register */ 110 u32 __iomem *unmask_reg; 111 112 /* The completion queue head doorbell register */ 113 u32 __iomem *cq_head_db_reg; 114 115 /* numa configuration register (for TPH) */ 116 u32 __iomem *numa_node_cfg_reg; 117 118 /* The value to write to the above register to unmask 119 * the interrupt of this queue 120 */ 121 u32 msix_vector; 122 123 enum queue_direction direction; 124 125 /* holds the number of cdesc of the current packet */ 126 u16 cur_rx_pkt_cdesc_count; 127 /* save the first cdesc idx of the current packet */ 128 u16 cur_rx_pkt_cdesc_start_idx; 129 130 u16 q_depth; 131 /* Caller qid */ 132 u16 qid; 133 134 /* Device queue index */ 135 u16 idx; 136 u16 head; 137 u16 last_head_update; 138 u8 phase; 139 u8 cdesc_entry_size_in_bytes; 140 141} ____cacheline_aligned; 142 143struct ena_com_io_bounce_buffer_control { 144 u8 *base_buffer; 145 u16 next_to_use; 146 u16 buffer_size; 147 u16 buffers_num; /* Must be a power of 2 */ 148}; 149 150/* This struct is to keep tracking the current location of the next llq entry */ 151struct ena_com_llq_pkt_ctrl { 152 u8 *curr_bounce_buf; 153 u16 idx; 154 u16 descs_left_in_line; 155}; 156 157struct ena_com_io_sq { 158 struct ena_com_io_desc_addr desc_addr; 159 160 u32 __iomem *db_addr; 161 u8 __iomem *header_addr; 162 163 enum queue_direction direction; 164 enum ena_admin_placement_policy_type mem_queue_type; 165 166 bool disable_meta_caching; 167 168 u32 msix_vector; 169 struct ena_com_tx_meta cached_tx_meta; 170 struct ena_com_llq_info llq_info; 171 struct ena_com_llq_pkt_ctrl llq_buf_ctrl; 172 struct ena_com_io_bounce_buffer_control bounce_buf_ctrl; 173 174 u16 q_depth; 175 u16 qid; 176 177 u16 idx; 178 u16 tail; 179 u16 next_to_comp; 180 u16 llq_last_copy_tail; 181 u32 tx_max_header_size; 182 u8 phase; 183 u8 desc_entry_size; 184 u8 dma_addr_bits; 185 u16 entries_in_tx_burst_left; 186} ____cacheline_aligned; 187 188struct ena_com_admin_cq { 189 struct ena_admin_acq_entry *entries; 190 dma_addr_t dma_addr; 191 192 u16 head; 193 u8 phase; 194}; 195 196struct ena_com_admin_sq { 197 struct ena_admin_aq_entry *entries; 198 dma_addr_t dma_addr; 199 200 u32 __iomem *db_addr; 201 202 u16 head; 203 u16 tail; 204 u8 phase; 205 206}; 207 208struct ena_com_stats_admin { 209 u64 aborted_cmd; 210 u64 submitted_cmd; 211 u64 completed_cmd; 212 u64 out_of_space; 213 u64 no_completion; 214}; 215 216struct ena_com_admin_queue { 217 void *q_dmadev; 218 struct ena_com_dev *ena_dev; 219 spinlock_t q_lock; /* spinlock for the admin queue */ 220 221 struct ena_comp_ctx *comp_ctx; 222 u32 completion_timeout; 223 u16 q_depth; 224 struct ena_com_admin_cq cq; 225 struct ena_com_admin_sq sq; 226 227 /* Indicate if the admin queue should poll for completion */ 228 bool polling; 229 230 /* Define if fallback to polling mode should occur */ 231 bool auto_polling; 232 233 u16 curr_cmd_id; 234 235 /* Indicate that the ena was initialized and can 236 * process new admin commands 237 */ 238 bool running_state; 239 240 /* Count the number of outstanding admin commands */ 241 atomic_t outstanding_cmds; 242 243 struct ena_com_stats_admin stats; 244}; 245 246struct ena_aenq_handlers; 247 248struct ena_com_aenq { 249 u16 head; 250 u8 phase; 251 struct ena_admin_aenq_entry *entries; 252 dma_addr_t dma_addr; 253 u16 q_depth; 254 struct ena_aenq_handlers *aenq_handlers; 255}; 256 257struct ena_com_mmio_read { 258 struct ena_admin_ena_mmio_req_read_less_resp *read_resp; 259 dma_addr_t read_resp_dma_addr; 260 u32 reg_read_to; /* in us */ 261 u16 seq_num; 262 bool readless_supported; 263 /* spin lock to ensure a single outstanding read */ 264 spinlock_t lock; 265}; 266 267struct ena_rss { 268 /* Indirect table */ 269 u16 *host_rss_ind_tbl; 270 struct ena_admin_rss_ind_table_entry *rss_ind_tbl; 271 dma_addr_t rss_ind_tbl_dma_addr; 272 u16 tbl_log_size; 273 274 /* Hash key */ 275 enum ena_admin_hash_functions hash_func; 276 struct ena_admin_feature_rss_flow_hash_control *hash_key; 277 dma_addr_t hash_key_dma_addr; 278 u32 hash_init_val; 279 280 /* Flow Control */ 281 struct ena_admin_feature_rss_hash_control *hash_ctrl; 282 dma_addr_t hash_ctrl_dma_addr; 283 284}; 285 286struct ena_host_attribute { 287 /* Debug area */ 288 u8 *debug_area_virt_addr; 289 dma_addr_t debug_area_dma_addr; 290 u32 debug_area_size; 291 292 /* Host information */ 293 struct ena_admin_host_info *host_info; 294 dma_addr_t host_info_dma_addr; 295}; 296 297/* Each ena_dev is a PCI function. */ 298struct ena_com_dev { 299 struct ena_com_admin_queue admin_queue; 300 struct ena_com_aenq aenq; 301 struct ena_com_io_cq io_cq_queues[ENA_TOTAL_NUM_QUEUES]; 302 struct ena_com_io_sq io_sq_queues[ENA_TOTAL_NUM_QUEUES]; 303 u8 __iomem *reg_bar; 304 void __iomem *mem_bar; 305 void *dmadev; 306 struct net_device *net_device; 307 308 enum ena_admin_placement_policy_type tx_mem_queue_type; 309 u32 tx_max_header_size; 310 u16 stats_func; /* Selected function for extended statistic dump */ 311 u16 stats_queue; /* Selected queue for extended statistic dump */ 312 313 struct ena_com_mmio_read mmio_read; 314 315 struct ena_rss rss; 316 u32 supported_features; 317 u32 capabilities; 318 u32 dma_addr_bits; 319 320 struct ena_host_attribute host_attr; 321 bool adaptive_coalescing; 322 u16 intr_delay_resolution; 323 324 /* interrupt moderation intervals are in usec divided by 325 * intr_delay_resolution, which is supplied by the device. 326 */ 327 u32 intr_moder_tx_interval; 328 u32 intr_moder_rx_interval; 329 330 struct ena_intr_moder_entry *intr_moder_tbl; 331 332 struct ena_com_llq_info llq_info; 333 334 u32 ena_min_poll_delay_us; 335}; 336 337struct ena_com_dev_get_features_ctx { 338 struct ena_admin_queue_feature_desc max_queues; 339 struct ena_admin_queue_ext_feature_desc max_queue_ext; 340 struct ena_admin_device_attr_feature_desc dev_attr; 341 struct ena_admin_feature_aenq_desc aenq; 342 struct ena_admin_feature_offload_desc offload; 343 struct ena_admin_ena_hw_hints hw_hints; 344 struct ena_admin_feature_llq_desc llq; 345}; 346 347struct ena_com_create_io_ctx { 348 enum ena_admin_placement_policy_type mem_queue_type; 349 enum queue_direction direction; 350 int numa_node; 351 u32 msix_vector; 352 u16 queue_size; 353 u16 qid; 354}; 355 356typedef void (*ena_aenq_handler)(void *data, 357 struct ena_admin_aenq_entry *aenq_e); 358 359/* Holds aenq handlers. Indexed by AENQ event group */ 360struct ena_aenq_handlers { 361 ena_aenq_handler handlers[ENA_MAX_HANDLERS]; 362 ena_aenq_handler unimplemented_handler; 363}; 364 365/*****************************************************************************/ 366/*****************************************************************************/ 367 368/* ena_com_mmio_reg_read_request_init - Init the mmio reg read mechanism 369 * @ena_dev: ENA communication layer struct 370 * 371 * Initialize the register read mechanism. 372 * 373 * @note: This method must be the first stage in the initialization sequence. 374 * 375 * @return - 0 on success, negative value on failure. 376 */ 377int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev); 378 379/* ena_com_set_mmio_read_mode - Enable/disable the indirect mmio reg read mechanism 380 * @ena_dev: ENA communication layer struct 381 * @readless_supported: readless mode (enable/disable) 382 */ 383void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, 384 bool readless_supported); 385 386/* ena_com_mmio_reg_read_request_write_dev_addr - Write the mmio reg read return 387 * value physical address. 388 * @ena_dev: ENA communication layer struct 389 */ 390void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev); 391 392/* ena_com_mmio_reg_read_request_destroy - Destroy the mmio reg read mechanism 393 * @ena_dev: ENA communication layer struct 394 */ 395void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev); 396 397/* ena_com_admin_init - Init the admin and the async queues 398 * @ena_dev: ENA communication layer struct 399 * @aenq_handlers: Those handlers to be called upon event. 400 * 401 * Initialize the admin submission and completion queues. 402 * Initialize the asynchronous events notification queues. 403 * 404 * @return - 0 on success, negative value on failure. 405 */ 406int ena_com_admin_init(struct ena_com_dev *ena_dev, 407 struct ena_aenq_handlers *aenq_handlers); 408 409/* ena_com_admin_destroy - Destroy the admin and the async events queues. 410 * @ena_dev: ENA communication layer struct 411 * 412 * @note: Before calling this method, the caller must validate that the device 413 * won't send any additional admin completions/aenq. 414 * To achieve that, a FLR is recommended. 415 */ 416void ena_com_admin_destroy(struct ena_com_dev *ena_dev); 417 418/* ena_com_dev_reset - Perform device FLR to the device. 419 * @ena_dev: ENA communication layer struct 420 * @reset_reason: Specify what is the trigger for the reset in case of an error. 421 * 422 * @return - 0 on success, negative value on failure. 423 */ 424int ena_com_dev_reset(struct ena_com_dev *ena_dev, 425 enum ena_regs_reset_reason_types reset_reason); 426 427/* ena_com_create_io_queue - Create io queue. 428 * @ena_dev: ENA communication layer struct 429 * @ctx - create context structure 430 * 431 * Create the submission and the completion queues. 432 * 433 * @return - 0 on success, negative value on failure. 434 */ 435int ena_com_create_io_queue(struct ena_com_dev *ena_dev, 436 struct ena_com_create_io_ctx *ctx); 437 438/* ena_com_destroy_io_queue - Destroy IO queue with the queue id - qid. 439 * @ena_dev: ENA communication layer struct 440 * @qid - the caller virtual queue id. 441 */ 442void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid); 443 444/* ena_com_get_io_handlers - Return the io queue handlers 445 * @ena_dev: ENA communication layer struct 446 * @qid - the caller virtual queue id. 447 * @io_sq - IO submission queue handler 448 * @io_cq - IO completion queue handler. 449 * 450 * @return - 0 on success, negative value on failure. 451 */ 452int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid, 453 struct ena_com_io_sq **io_sq, 454 struct ena_com_io_cq **io_cq); 455 456/* ena_com_admin_aenq_enable - ENAble asynchronous event notifications 457 * @ena_dev: ENA communication layer struct 458 * 459 * After this method, aenq event can be received via AENQ. 460 */ 461void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev); 462 463/* ena_com_set_admin_running_state - Set the state of the admin queue 464 * @ena_dev: ENA communication layer struct 465 * 466 * Change the state of the admin queue (enable/disable) 467 */ 468void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state); 469 470/* ena_com_get_admin_running_state - Get the admin queue state 471 * @ena_dev: ENA communication layer struct 472 * 473 * Retrieve the state of the admin queue (enable/disable) 474 * 475 * @return - current polling mode (enable/disable) 476 */ 477bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev); 478 479/* ena_com_set_admin_polling_mode - Set the admin completion queue polling mode 480 * @ena_dev: ENA communication layer struct 481 * @polling: ENAble/Disable polling mode 482 * 483 * Set the admin completion mode. 484 */ 485void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling); 486 487/* ena_com_set_admin_auto_polling_mode - Enable autoswitch to polling mode 488 * @ena_dev: ENA communication layer struct 489 * @polling: Enable/Disable polling mode 490 * 491 * Set the autopolling mode. 492 * If autopolling is on: 493 * In case of missing interrupt when data is available switch to polling. 494 */ 495void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev, 496 bool polling); 497 498/* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler 499 * @ena_dev: ENA communication layer struct 500 * 501 * This method goes over the admin completion queue and wakes up all the pending 502 * threads that wait on the commands wait event. 503 * 504 * @note: Should be called after MSI-X interrupt. 505 */ 506void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev); 507 508/* ena_com_aenq_intr_handler - AENQ interrupt handler 509 * @ena_dev: ENA communication layer struct 510 * 511 * This method goes over the async event notification queue and calls the proper 512 * aenq handler. 513 */ 514void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data); 515 516/* ena_com_abort_admin_commands - Abort all the outstanding admin commands. 517 * @ena_dev: ENA communication layer struct 518 * 519 * This method aborts all the outstanding admin commands. 520 * The caller should then call ena_com_wait_for_abort_completion to make sure 521 * all the commands were completed. 522 */ 523void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev); 524 525/* ena_com_wait_for_abort_completion - Wait for admin commands abort. 526 * @ena_dev: ENA communication layer struct 527 * 528 * This method waits until all the outstanding admin commands are completed. 529 */ 530void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev); 531 532/* ena_com_validate_version - Validate the device parameters 533 * @ena_dev: ENA communication layer struct 534 * 535 * This method verifies the device parameters are the same as the saved 536 * parameters in ena_dev. 537 * This method is useful after device reset, to validate the device mac address 538 * and the device offloads are the same as before the reset. 539 * 540 * @return - 0 on success negative value otherwise. 541 */ 542int ena_com_validate_version(struct ena_com_dev *ena_dev); 543 544/* ena_com_get_link_params - Retrieve physical link parameters. 545 * @ena_dev: ENA communication layer struct 546 * @resp: Link parameters 547 * 548 * Retrieve the physical link parameters, 549 * like speed, auto-negotiation and full duplex support. 550 * 551 * @return - 0 on Success negative value otherwise. 552 */ 553int ena_com_get_link_params(struct ena_com_dev *ena_dev, 554 struct ena_admin_get_feat_resp *resp); 555 556/* ena_com_get_dma_width - Retrieve physical dma address width the device 557 * supports. 558 * @ena_dev: ENA communication layer struct 559 * 560 * Retrieve the maximum physical address bits the device can handle. 561 * 562 * @return: > 0 on Success and negative value otherwise. 563 */ 564int ena_com_get_dma_width(struct ena_com_dev *ena_dev); 565 566/* ena_com_set_aenq_config - Set aenq groups configurations 567 * @ena_dev: ENA communication layer struct 568 * @groups flag: bit fields flags of enum ena_admin_aenq_group. 569 * 570 * Configure which aenq event group the driver would like to receive. 571 * 572 * @return: 0 on Success and negative value otherwise. 573 */ 574int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag); 575 576/* ena_com_get_dev_attr_feat - Get device features 577 * @ena_dev: ENA communication layer struct 578 * @get_feat_ctx: returned context that contain the get features. 579 * 580 * @return: 0 on Success and negative value otherwise. 581 */ 582int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, 583 struct ena_com_dev_get_features_ctx *get_feat_ctx); 584 585/* ena_com_get_dev_basic_stats - Get device basic statistics 586 * @ena_dev: ENA communication layer struct 587 * @stats: stats return value 588 * 589 * @return: 0 on Success and negative value otherwise. 590 */ 591int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev, 592 struct ena_admin_basic_stats *stats); 593 594/* ena_com_get_eni_stats - Get extended network interface statistics 595 * @ena_dev: ENA communication layer struct 596 * @stats: stats return value 597 * 598 * @return: 0 on Success and negative value otherwise. 599 */ 600int ena_com_get_eni_stats(struct ena_com_dev *ena_dev, 601 struct ena_admin_eni_stats *stats); 602 603/* ena_com_set_dev_mtu - Configure the device mtu. 604 * @ena_dev: ENA communication layer struct 605 * @mtu: mtu value 606 * 607 * @return: 0 on Success and negative value otherwise. 608 */ 609int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu); 610 611/* ena_com_get_offload_settings - Retrieve the device offloads capabilities 612 * @ena_dev: ENA communication layer struct 613 * @offlad: offload return value 614 * 615 * @return: 0 on Success and negative value otherwise. 616 */ 617int ena_com_get_offload_settings(struct ena_com_dev *ena_dev, 618 struct ena_admin_feature_offload_desc *offload); 619 620/* ena_com_rss_init - Init RSS 621 * @ena_dev: ENA communication layer struct 622 * @log_size: indirection log size 623 * 624 * Allocate RSS/RFS resources. 625 * The caller then can configure rss using ena_com_set_hash_function, 626 * ena_com_set_hash_ctrl and ena_com_indirect_table_set. 627 * 628 * @return: 0 on Success and negative value otherwise. 629 */ 630int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size); 631 632/* ena_com_rss_destroy - Destroy rss 633 * @ena_dev: ENA communication layer struct 634 * 635 * Free all the RSS/RFS resources. 636 */ 637void ena_com_rss_destroy(struct ena_com_dev *ena_dev); 638 639/* ena_com_get_current_hash_function - Get RSS hash function 640 * @ena_dev: ENA communication layer struct 641 * 642 * Return the current hash function. 643 * @return: 0 or one of the ena_admin_hash_functions values. 644 */ 645int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev); 646 647/* ena_com_fill_hash_function - Fill RSS hash function 648 * @ena_dev: ENA communication layer struct 649 * @func: The hash function (Toeplitz or crc) 650 * @key: Hash key (for toeplitz hash) 651 * @key_len: key length (max length 10 DW) 652 * @init_val: initial value for the hash function 653 * 654 * Fill the ena_dev resources with the desire hash function, hash key, key_len 655 * and key initial value (if needed by the hash function). 656 * To flush the key into the device the caller should call 657 * ena_com_set_hash_function. 658 * 659 * @return: 0 on Success and negative value otherwise. 660 */ 661int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, 662 enum ena_admin_hash_functions func, 663 const u8 *key, u16 key_len, u32 init_val); 664 665/* ena_com_set_hash_function - Flush the hash function and it dependencies to 666 * the device. 667 * @ena_dev: ENA communication layer struct 668 * 669 * Flush the hash function and it dependencies (key, key length and 670 * initial value) if needed. 671 * 672 * @note: Prior to this method the caller should call ena_com_fill_hash_function 673 * 674 * @return: 0 on Success and negative value otherwise. 675 */ 676int ena_com_set_hash_function(struct ena_com_dev *ena_dev); 677 678/* ena_com_get_hash_function - Retrieve the hash function from the device. 679 * @ena_dev: ENA communication layer struct 680 * @func: hash function 681 * 682 * Retrieve the hash function from the device. 683 * 684 * @note: If the caller called ena_com_fill_hash_function but didn't flush 685 * it to the device, the new configuration will be lost. 686 * 687 * @return: 0 on Success and negative value otherwise. 688 */ 689int ena_com_get_hash_function(struct ena_com_dev *ena_dev, 690 enum ena_admin_hash_functions *func); 691 692/* ena_com_get_hash_key - Retrieve the hash key 693 * @ena_dev: ENA communication layer struct 694 * @key: hash key 695 * 696 * Retrieve the hash key. 697 * 698 * @note: If the caller called ena_com_fill_hash_key but didn't flush 699 * it to the device, the new configuration will be lost. 700 * 701 * @return: 0 on Success and negative value otherwise. 702 */ 703int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key); 704/* ena_com_fill_hash_ctrl - Fill RSS hash control 705 * @ena_dev: ENA communication layer struct. 706 * @proto: The protocol to configure. 707 * @hash_fields: bit mask of ena_admin_flow_hash_fields 708 * 709 * Fill the ena_dev resources with the desire hash control (the ethernet 710 * fields that take part of the hash) for a specific protocol. 711 * To flush the hash control to the device, the caller should call 712 * ena_com_set_hash_ctrl. 713 * 714 * @return: 0 on Success and negative value otherwise. 715 */ 716int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev, 717 enum ena_admin_flow_hash_proto proto, 718 u16 hash_fields); 719 720/* ena_com_set_hash_ctrl - Flush the hash control resources to the device. 721 * @ena_dev: ENA communication layer struct 722 * 723 * Flush the hash control (the ethernet fields that take part of the hash) 724 * 725 * @note: Prior to this method the caller should call ena_com_fill_hash_ctrl. 726 * 727 * @return: 0 on Success and negative value otherwise. 728 */ 729int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev); 730 731/* ena_com_get_hash_ctrl - Retrieve the hash control from the device. 732 * @ena_dev: ENA communication layer struct 733 * @proto: The protocol to retrieve. 734 * @fields: bit mask of ena_admin_flow_hash_fields. 735 * 736 * Retrieve the hash control from the device. 737 * 738 * @note: If the caller called ena_com_fill_hash_ctrl but didn't flush 739 * it to the device, the new configuration will be lost. 740 * 741 * @return: 0 on Success and negative value otherwise. 742 */ 743int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev, 744 enum ena_admin_flow_hash_proto proto, 745 u16 *fields); 746 747/* ena_com_set_default_hash_ctrl - Set the hash control to a default 748 * configuration. 749 * @ena_dev: ENA communication layer struct 750 * 751 * Fill the ena_dev resources with the default hash control configuration. 752 * To flush the hash control to the device, the caller should call 753 * ena_com_set_hash_ctrl. 754 * 755 * @return: 0 on Success and negative value otherwise. 756 */ 757int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev); 758 759/* ena_com_indirect_table_fill_entry - Fill a single entry in the RSS 760 * indirection table 761 * @ena_dev: ENA communication layer struct. 762 * @entry_idx - indirection table entry. 763 * @entry_value - redirection value 764 * 765 * Fill a single entry of the RSS indirection table in the ena_dev resources. 766 * To flush the indirection table to the device, the called should call 767 * ena_com_indirect_table_set. 768 * 769 * @return: 0 on Success and negative value otherwise. 770 */ 771int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev, 772 u16 entry_idx, u16 entry_value); 773 774/* ena_com_indirect_table_set - Flush the indirection table to the device. 775 * @ena_dev: ENA communication layer struct 776 * 777 * Flush the indirection hash control to the device. 778 * Prior to this method the caller should call ena_com_indirect_table_fill_entry 779 * 780 * @return: 0 on Success and negative value otherwise. 781 */ 782int ena_com_indirect_table_set(struct ena_com_dev *ena_dev); 783 784/* ena_com_indirect_table_get - Retrieve the indirection table from the device. 785 * @ena_dev: ENA communication layer struct 786 * @ind_tbl: indirection table 787 * 788 * Retrieve the RSS indirection table from the device. 789 * 790 * @note: If the caller called ena_com_indirect_table_fill_entry but didn't flush 791 * it to the device, the new configuration will be lost. 792 * 793 * @return: 0 on Success and negative value otherwise. 794 */ 795int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl); 796 797/* ena_com_allocate_host_info - Allocate host info resources. 798 * @ena_dev: ENA communication layer struct 799 * 800 * @return: 0 on Success and negative value otherwise. 801 */ 802int ena_com_allocate_host_info(struct ena_com_dev *ena_dev); 803 804/* ena_com_allocate_debug_area - Allocate debug area. 805 * @ena_dev: ENA communication layer struct 806 * @debug_area_size - debug area size. 807 * 808 * @return: 0 on Success and negative value otherwise. 809 */ 810int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, 811 u32 debug_area_size); 812 813/* ena_com_delete_debug_area - Free the debug area resources. 814 * @ena_dev: ENA communication layer struct 815 * 816 * Free the allocated debug area. 817 */ 818void ena_com_delete_debug_area(struct ena_com_dev *ena_dev); 819 820/* ena_com_delete_host_info - Free the host info resources. 821 * @ena_dev: ENA communication layer struct 822 * 823 * Free the allocated host info. 824 */ 825void ena_com_delete_host_info(struct ena_com_dev *ena_dev); 826 827/* ena_com_set_host_attributes - Update the device with the host 828 * attributes (debug area and host info) base address. 829 * @ena_dev: ENA communication layer struct 830 * 831 * @return: 0 on Success and negative value otherwise. 832 */ 833int ena_com_set_host_attributes(struct ena_com_dev *ena_dev); 834 835/* ena_com_create_io_cq - Create io completion queue. 836 * @ena_dev: ENA communication layer struct 837 * @io_cq - io completion queue handler 838 839 * Create IO completion queue. 840 * 841 * @return - 0 on success, negative value on failure. 842 */ 843int ena_com_create_io_cq(struct ena_com_dev *ena_dev, 844 struct ena_com_io_cq *io_cq); 845 846/* ena_com_destroy_io_cq - Destroy io completion queue. 847 * @ena_dev: ENA communication layer struct 848 * @io_cq - io completion queue handler 849 850 * Destroy IO completion queue. 851 * 852 * @return - 0 on success, negative value on failure. 853 */ 854int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev, 855 struct ena_com_io_cq *io_cq); 856 857/* ena_com_execute_admin_command - Execute admin command 858 * @admin_queue: admin queue. 859 * @cmd: the admin command to execute. 860 * @cmd_size: the command size. 861 * @cmd_completion: command completion return value. 862 * @cmd_comp_size: command completion size. 863 864 * Submit an admin command and then wait until the device returns a 865 * completion. 866 * The completion will be copied into cmd_comp. 867 * 868 * @return - 0 on success, negative value on failure. 869 */ 870int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue, 871 struct ena_admin_aq_entry *cmd, 872 size_t cmd_size, 873 struct ena_admin_acq_entry *cmd_comp, 874 size_t cmd_comp_size); 875 876/* ena_com_init_interrupt_moderation - Init interrupt moderation 877 * @ena_dev: ENA communication layer struct 878 * 879 * @return - 0 on success, negative value on failure. 880 */ 881int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev); 882 883/* ena_com_interrupt_moderation_supported - Return if interrupt moderation 884 * capability is supported by the device. 885 * 886 * @return - supported or not. 887 */ 888bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev); 889 890/* ena_com_update_nonadaptive_moderation_interval_tx - Update the 891 * non-adaptive interval in Tx direction. 892 * @ena_dev: ENA communication layer struct 893 * @tx_coalesce_usecs: Interval in usec. 894 * 895 * @return - 0 on success, negative value on failure. 896 */ 897int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, 898 u32 tx_coalesce_usecs); 899 900/* ena_com_update_nonadaptive_moderation_interval_rx - Update the 901 * non-adaptive interval in Rx direction. 902 * @ena_dev: ENA communication layer struct 903 * @rx_coalesce_usecs: Interval in usec. 904 * 905 * @return - 0 on success, negative value on failure. 906 */ 907int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, 908 u32 rx_coalesce_usecs); 909 910/* ena_com_get_nonadaptive_moderation_interval_tx - Retrieve the 911 * non-adaptive interval in Tx direction. 912 * @ena_dev: ENA communication layer struct 913 * 914 * @return - interval in usec 915 */ 916unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev); 917 918/* ena_com_get_nonadaptive_moderation_interval_rx - Retrieve the 919 * non-adaptive interval in Rx direction. 920 * @ena_dev: ENA communication layer struct 921 * 922 * @return - interval in usec 923 */ 924unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev); 925 926/* ena_com_config_dev_mode - Configure the placement policy of the device. 927 * @ena_dev: ENA communication layer struct 928 * @llq_features: LLQ feature descriptor, retrieve via 929 * ena_com_get_dev_attr_feat. 930 * @ena_llq_config: The default driver LLQ parameters configurations 931 */ 932int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, 933 struct ena_admin_feature_llq_desc *llq_features, 934 struct ena_llq_configurations *llq_default_config); 935 936/* ena_com_io_sq_to_ena_dev - Extract ena_com_dev using contained field io_sq. 937 * @io_sq: IO submit queue struct 938 * 939 * @return - ena_com_dev struct extracted from io_sq 940 */ 941static inline struct ena_com_dev *ena_com_io_sq_to_ena_dev(struct ena_com_io_sq *io_sq) 942{ 943 return container_of(io_sq, struct ena_com_dev, io_sq_queues[io_sq->qid]); 944} 945 946/* ena_com_io_cq_to_ena_dev - Extract ena_com_dev using contained field io_cq. 947 * @io_sq: IO submit queue struct 948 * 949 * @return - ena_com_dev struct extracted from io_sq 950 */ 951static inline struct ena_com_dev *ena_com_io_cq_to_ena_dev(struct ena_com_io_cq *io_cq) 952{ 953 return container_of(io_cq, struct ena_com_dev, io_cq_queues[io_cq->qid]); 954} 955 956static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev) 957{ 958 return ena_dev->adaptive_coalescing; 959} 960 961static inline void ena_com_enable_adaptive_moderation(struct ena_com_dev *ena_dev) 962{ 963 ena_dev->adaptive_coalescing = true; 964} 965 966static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_dev) 967{ 968 ena_dev->adaptive_coalescing = false; 969} 970 971/* ena_com_get_cap - query whether device supports a capability. 972 * @ena_dev: ENA communication layer struct 973 * @cap_id: enum value representing the capability 974 * 975 * @return - true if capability is supported or false otherwise 976 */ 977static inline bool ena_com_get_cap(struct ena_com_dev *ena_dev, 978 enum ena_admin_aq_caps_id cap_id) 979{ 980 return !!(ena_dev->capabilities & BIT(cap_id)); 981} 982 983/* ena_com_update_intr_reg - Prepare interrupt register 984 * @intr_reg: interrupt register to update. 985 * @rx_delay_interval: Rx interval in usecs 986 * @tx_delay_interval: Tx interval in usecs 987 * @unmask: unmask enable/disable 988 * 989 * Prepare interrupt update register with the supplied parameters. 990 */ 991static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg, 992 u32 rx_delay_interval, 993 u32 tx_delay_interval, 994 bool unmask) 995{ 996 intr_reg->intr_control = 0; 997 intr_reg->intr_control |= rx_delay_interval & 998 ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK; 999 1000 intr_reg->intr_control |= 1001 (tx_delay_interval << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT) 1002 & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK; 1003 1004 if (unmask) 1005 intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK; 1006} 1007 1008static inline u8 *ena_com_get_next_bounce_buffer(struct ena_com_io_bounce_buffer_control *bounce_buf_ctrl) 1009{ 1010 u16 size, buffers_num; 1011 u8 *buf; 1012 1013 size = bounce_buf_ctrl->buffer_size; 1014 buffers_num = bounce_buf_ctrl->buffers_num; 1015 1016 buf = bounce_buf_ctrl->base_buffer + 1017 (bounce_buf_ctrl->next_to_use++ & (buffers_num - 1)) * size; 1018 1019 prefetchw(bounce_buf_ctrl->base_buffer + 1020 (bounce_buf_ctrl->next_to_use & (buffers_num - 1)) * size); 1021 1022 return buf; 1023} 1024 1025#endif /* !(ENA_COM) */