From fe56b9e6a8d957d6a20729d626027f800c17a2da Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Mon, 26 Oct 2015 11:02:25 +0200 Subject: qed: Add module with basic common support The Qlogic Everest Driver is the backend module for the QL4xxx ethernet products by Qlogic. This module serves two main purposes: 1. It's responsible to contain all the common code that will be shared between the various drivers that would be used with said line of products. Flows such as chip initialization and de-initialization fall under this category. 2. It would abstract the protocol-specific HW & FW components, allowing the protocol drivers to have a clean APIs which is detached in its slowpath configuration from the actual HSI. This adds a very basic module without any protocol-specific bits. I.e., this adds a basic implementation that almost entirely falls under the first category. Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- include/linux/qed/common_hsi.h | 607 +++++++++++++++++++++++++++++++++++++++++ include/linux/qed/qed_chain.h | 539 ++++++++++++++++++++++++++++++++++++ include/linux/qed/qed_if.h | 498 +++++++++++++++++++++++++++++++++ 3 files changed, 1644 insertions(+) create mode 100644 include/linux/qed/common_hsi.h create mode 100644 include/linux/qed/qed_chain.h create mode 100644 include/linux/qed/qed_if.h (limited to 'include/linux') diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h new file mode 100644 index 000000000000..6a4347639c03 --- /dev/null +++ b/include/linux/qed/common_hsi.h @@ -0,0 +1,607 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef __COMMON_HSI__ +#define __COMMON_HSI__ + +#define FW_MAJOR_VERSION 8 +#define FW_MINOR_VERSION 4 +#define FW_REVISION_VERSION 2 +#define FW_ENGINEERING_VERSION 0 + +/***********************/ +/* COMMON HW CONSTANTS */ +/***********************/ + +/* PCI functions */ +#define MAX_NUM_PORTS_K2 (4) +#define MAX_NUM_PORTS_BB (2) +#define MAX_NUM_PORTS (MAX_NUM_PORTS_K2) + +#define MAX_NUM_PFS_K2 (16) +#define MAX_NUM_PFS_BB (8) +#define MAX_NUM_PFS (MAX_NUM_PFS_K2) +#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */ + +#define MAX_NUM_VFS_K2 (192) +#define MAX_NUM_VFS_BB (120) +#define MAX_NUM_VFS (MAX_NUM_VFS_K2) + +#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB) +#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + MAX_NUM_VFS) + +#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB) +#define MAX_FUNCTION_NUMBER (MAX_NUM_PFS + MAX_NUM_VFS) + +#define MAX_NUM_VPORTS_K2 (208) +#define MAX_NUM_VPORTS_BB (160) +#define MAX_NUM_VPORTS (MAX_NUM_VPORTS_K2) + +#define MAX_NUM_L2_QUEUES_K2 (320) +#define MAX_NUM_L2_QUEUES_BB (256) +#define MAX_NUM_L2_QUEUES (MAX_NUM_L2_QUEUES_K2) + +/* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */ +#define NUM_PHYS_TCS_4PORT_K2 (4) +#define NUM_OF_PHYS_TCS (8) + +#define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1) +#define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1) + +#define LB_TC (NUM_OF_PHYS_TCS) + +/* Num of possible traffic priority values */ +#define NUM_OF_PRIO (8) + +#define MAX_NUM_VOQS_K2 (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2) +#define MAX_NUM_VOQS_BB (NUM_OF_TCS * MAX_NUM_PORTS_BB) +#define MAX_NUM_VOQS (MAX_NUM_VOQS_K2) +#define MAX_PHYS_VOQS (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB) + +/* CIDs */ +#define NUM_OF_CONNECTION_TYPES (8) +#define NUM_OF_LCIDS (320) +#define NUM_OF_LTIDS (320) + +/*****************/ +/* CDU CONSTANTS */ +/*****************/ + +#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17) +#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff) + +/*****************/ +/* DQ CONSTANTS */ +/*****************/ + +/* DEMS */ +#define DQ_DEMS_LEGACY 0 + +/* XCM agg val selection */ +#define DQ_XCM_AGG_VAL_SEL_WORD2 0 +#define DQ_XCM_AGG_VAL_SEL_WORD3 1 +#define DQ_XCM_AGG_VAL_SEL_WORD4 2 +#define DQ_XCM_AGG_VAL_SEL_WORD5 3 +#define DQ_XCM_AGG_VAL_SEL_REG3 4 +#define DQ_XCM_AGG_VAL_SEL_REG4 5 +#define DQ_XCM_AGG_VAL_SEL_REG5 6 +#define DQ_XCM_AGG_VAL_SEL_REG6 7 + +/* XCM agg val selection */ +#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD \ + DQ_XCM_AGG_VAL_SEL_WORD2 +#define DQ_XCM_ETH_TX_BD_CONS_CMD \ + DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_CORE_TX_BD_CONS_CMD \ + DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_ETH_TX_BD_PROD_CMD \ + DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_CORE_TX_BD_PROD_CMD \ + DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_CORE_SPQ_PROD_CMD \ + DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5 + +/* XCM agg counter flag selection */ +#define DQ_XCM_AGG_FLG_SHIFT_BIT14 0 +#define DQ_XCM_AGG_FLG_SHIFT_BIT15 1 +#define DQ_XCM_AGG_FLG_SHIFT_CF12 2 +#define DQ_XCM_AGG_FLG_SHIFT_CF13 3 +#define DQ_XCM_AGG_FLG_SHIFT_CF18 4 +#define DQ_XCM_AGG_FLG_SHIFT_CF19 5 +#define DQ_XCM_AGG_FLG_SHIFT_CF22 6 +#define DQ_XCM_AGG_FLG_SHIFT_CF23 7 + +/* XCM agg counter flag selection */ +#define DQ_XCM_ETH_DQ_CF_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF18) +#define DQ_XCM_CORE_DQ_CF_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF18) +#define DQ_XCM_ETH_TERMINATE_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_CORE_TERMINATE_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_ETH_SLOW_PATH_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_CORE_SLOW_PATH_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_ETH_TPH_EN_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF23) + +/*****************/ +/* QM CONSTANTS */ +/*****************/ + +/* number of TX queues in the QM */ +#define MAX_QM_TX_QUEUES_K2 512 +#define MAX_QM_TX_QUEUES_BB 448 +#define MAX_QM_TX_QUEUES MAX_QM_TX_QUEUES_K2 + +/* number of Other queues in the QM */ +#define MAX_QM_OTHER_QUEUES_BB 64 +#define MAX_QM_OTHER_QUEUES_K2 128 +#define MAX_QM_OTHER_QUEUES MAX_QM_OTHER_QUEUES_K2 + +/* number of queues in a PF queue group */ +#define QM_PF_QUEUE_GROUP_SIZE 8 + +/* base number of Tx PQs in the CM PQ representation. + * should be used when storing PQ IDs in CM PQ registers and context + */ +#define CM_TX_PQ_BASE 0x200 + +/* QM registers data */ +#define QM_LINE_CRD_REG_WIDTH 16 +#define QM_LINE_CRD_REG_SIGN_BIT (1 << (QM_LINE_CRD_REG_WIDTH - 1)) +#define QM_BYTE_CRD_REG_WIDTH 24 +#define QM_BYTE_CRD_REG_SIGN_BIT (1 << (QM_BYTE_CRD_REG_WIDTH - 1)) +#define QM_WFQ_CRD_REG_WIDTH 32 +#define QM_WFQ_CRD_REG_SIGN_BIT (1 << (QM_WFQ_CRD_REG_WIDTH - 1)) +#define QM_RL_CRD_REG_WIDTH 32 +#define QM_RL_CRD_REG_SIGN_BIT (1 << (QM_RL_CRD_REG_WIDTH - 1)) + +/*****************/ +/* CAU CONSTANTS */ +/*****************/ + +#define CAU_FSM_ETH_RX 0 +#define CAU_FSM_ETH_TX 1 + +/* Number of Protocol Indices per Status Block */ +#define PIS_PER_SB 12 + +#define CAU_HC_STOPPED_STATE 3 +#define CAU_HC_DISABLE_STATE 4 +#define CAU_HC_ENABLE_STATE 0 + +/*****************/ +/* IGU CONSTANTS */ +/*****************/ + +#define MAX_SB_PER_PATH_K2 (368) +#define MAX_SB_PER_PATH_BB (288) +#define MAX_TOT_SB_PER_PATH \ + MAX_SB_PER_PATH_K2 + +#define MAX_SB_PER_PF_MIMD 129 +#define MAX_SB_PER_PF_SIMD 64 +#define MAX_SB_PER_VF 64 + +/* Memory addresses on the BAR for the IGU Sub Block */ +#define IGU_MEM_BASE 0x0000 + +#define IGU_MEM_MSIX_BASE 0x0000 +#define IGU_MEM_MSIX_UPPER 0x0101 +#define IGU_MEM_MSIX_RESERVED_UPPER 0x01ff + +#define IGU_MEM_PBA_MSIX_BASE 0x0200 +#define IGU_MEM_PBA_MSIX_UPPER 0x0202 +#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff + +#define IGU_CMD_INT_ACK_BASE 0x0400 +#define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \ + MAX_TOT_SB_PER_PATH - \ + 1) +#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff + +#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0 +#define IGU_CMD_ATTN_BIT_SET_UPPER 0x05f1 +#define IGU_CMD_ATTN_BIT_CLR_UPPER 0x05f2 + +#define IGU_REG_SISR_MDPC_WMASK_UPPER 0x05f3 +#define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER 0x05f4 +#define IGU_REG_SISR_MDPC_WMASK_MSB_UPPER 0x05f5 +#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05f6 + +#define IGU_CMD_PROD_UPD_BASE 0x0600 +#define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\ + MAX_TOT_SB_PER_PATH - \ + 1) +#define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff + +/*****************/ +/* PXP CONSTANTS */ +/*****************/ + +/* PTT and GTT */ +#define PXP_NUM_PF_WINDOWS 12 +#define PXP_PER_PF_ENTRY_SIZE 8 +#define PXP_NUM_GLOBAL_WINDOWS 243 +#define PXP_GLOBAL_ENTRY_SIZE 4 +#define PXP_ADMIN_WINDOW_ALLOWED_LENGTH 4 +#define PXP_PF_WINDOW_ADMIN_START 0 +#define PXP_PF_WINDOW_ADMIN_LENGTH 0x1000 +#define PXP_PF_WINDOW_ADMIN_END (PXP_PF_WINDOW_ADMIN_START + \ + PXP_PF_WINDOW_ADMIN_LENGTH - 1) +#define PXP_PF_WINDOW_ADMIN_PER_PF_START 0 +#define PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH (PXP_NUM_PF_WINDOWS * \ + PXP_PER_PF_ENTRY_SIZE) +#define PXP_PF_WINDOW_ADMIN_PER_PF_END (PXP_PF_WINDOW_ADMIN_PER_PF_START + \ + PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH - 1) +#define PXP_PF_WINDOW_ADMIN_GLOBAL_START 0x200 +#define PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH (PXP_NUM_GLOBAL_WINDOWS * \ + PXP_GLOBAL_ENTRY_SIZE) +#define PXP_PF_WINDOW_ADMIN_GLOBAL_END \ + (PXP_PF_WINDOW_ADMIN_GLOBAL_START + \ + PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH - 1) +#define PXP_PF_GLOBAL_PRETEND_ADDR 0x1f0 +#define PXP_PF_ME_OPAQUE_MASK_ADDR 0xf4 +#define PXP_PF_ME_OPAQUE_ADDR 0x1f8 +#define PXP_PF_ME_CONCRETE_ADDR 0x1fc + +#define PXP_EXTERNAL_BAR_PF_WINDOW_START 0x1000 +#define PXP_EXTERNAL_BAR_PF_WINDOW_NUM PXP_NUM_PF_WINDOWS +#define PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE 0x1000 +#define PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH \ + (PXP_EXTERNAL_BAR_PF_WINDOW_NUM * \ + PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) +#define PXP_EXTERNAL_BAR_PF_WINDOW_END \ + (PXP_EXTERNAL_BAR_PF_WINDOW_START + \ + PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH - 1) + +#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START \ + (PXP_EXTERNAL_BAR_PF_WINDOW_END + 1) +#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM PXP_NUM_GLOBAL_WINDOWS +#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE 0x1000 +#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH \ + (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM * \ + PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE) +#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_END \ + (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \ + PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1) + +#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12 +#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024 + +/* ILT Records */ +#define PXP_NUM_ILT_RECORDS_BB 7600 +#define PXP_NUM_ILT_RECORDS_K2 11000 +#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2) + +/******************/ +/* PBF CONSTANTS */ +/******************/ + +/* Number of PBF command queue lines. Each line is 32B. */ +#define PBF_MAX_CMD_LINES 3328 + +/* Number of BTB blocks. Each block is 256B. */ +#define BTB_MAX_BLOCKS 1440 + +/*****************/ +/* PRS CONSTANTS */ +/*****************/ + +/* Async data KCQ CQE */ +struct async_data { + __le32 cid; + __le16 itid; + u8 error_code; + u8 fw_debug_param; +}; + +struct regpair { + __le32 lo; + __le32 hi; +}; + +/* Event Data Union */ +union event_ring_data { + u8 bytes[8]; + struct async_data async_info; +}; + +/* Event Ring Entry */ +struct event_ring_entry { + u8 protocol_id; + u8 opcode; + __le16 reserved0; + __le16 echo; + u8 fw_return_code; + u8 flags; +#define EVENT_RING_ENTRY_ASYNC_MASK 0x1 +#define EVENT_RING_ENTRY_ASYNC_SHIFT 0 +#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F +#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1 + union event_ring_data data; +}; + +/* Multi function mode */ +enum mf_mode { + SF, + MF_OVLAN, + MF_NPAR, + MAX_MF_MODE +}; + +/* Per-protocol connection types */ +enum protocol_type { + PROTOCOLID_RESERVED1, + PROTOCOLID_RESERVED2, + PROTOCOLID_RESERVED3, + PROTOCOLID_CORE, + PROTOCOLID_ETH, + PROTOCOLID_RESERVED4, + PROTOCOLID_RESERVED5, + PROTOCOLID_PREROCE, + PROTOCOLID_COMMON, + PROTOCOLID_RESERVED6, + MAX_PROTOCOL_TYPE +}; + +/* status block structure */ +struct cau_pi_entry { + u32 prod; +#define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF +#define CAU_PI_ENTRY_PROD_VAL_SHIFT 0 +#define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F +#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16 +#define CAU_PI_ENTRY_FSM_SEL_MASK 0x1 +#define CAU_PI_ENTRY_FSM_SEL_SHIFT 23 +#define CAU_PI_ENTRY_RESERVED_MASK 0xFF +#define CAU_PI_ENTRY_RESERVED_SHIFT 24 +}; + +/* status block structure */ +struct cau_sb_entry { + u32 data; +#define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF +#define CAU_SB_ENTRY_SB_PROD_SHIFT 0 +#define CAU_SB_ENTRY_STATE0_MASK 0xF +#define CAU_SB_ENTRY_STATE0_SHIFT 24 +#define CAU_SB_ENTRY_STATE1_MASK 0xF +#define CAU_SB_ENTRY_STATE1_SHIFT 28 + u32 params; +#define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F +#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0 +#define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F +#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7 +#define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3 +#define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14 +#define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3 +#define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16 +#define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF +#define CAU_SB_ENTRY_VF_NUMBER_SHIFT 18 +#define CAU_SB_ENTRY_VF_VALID_MASK 0x1 +#define CAU_SB_ENTRY_VF_VALID_SHIFT 26 +#define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF +#define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27 +#define CAU_SB_ENTRY_TPH_MASK 0x1 +#define CAU_SB_ENTRY_TPH_SHIFT 31 +}; + +/* core doorbell data */ +struct core_db_data { + u8 params; +#define CORE_DB_DATA_DEST_MASK 0x3 +#define CORE_DB_DATA_DEST_SHIFT 0 +#define CORE_DB_DATA_AGG_CMD_MASK 0x3 +#define CORE_DB_DATA_AGG_CMD_SHIFT 2 +#define CORE_DB_DATA_BYPASS_EN_MASK 0x1 +#define CORE_DB_DATA_BYPASS_EN_SHIFT 4 +#define CORE_DB_DATA_RESERVED_MASK 0x1 +#define CORE_DB_DATA_RESERVED_SHIFT 5 +#define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6 + u8 agg_flags; + __le16 spq_prod; +}; + +/* Enum of doorbell aggregative command selection */ +enum db_agg_cmd_sel { + DB_AGG_CMD_NOP, + DB_AGG_CMD_SET, + DB_AGG_CMD_ADD, + DB_AGG_CMD_MAX, + MAX_DB_AGG_CMD_SEL +}; + +/* Enum of doorbell destination */ +enum db_dest { + DB_DEST_XCM, + DB_DEST_UCM, + DB_DEST_TCM, + DB_NUM_DESTINATIONS, + MAX_DB_DEST +}; + +/* Structure for doorbell address, in legacy mode */ +struct db_legacy_addr { + __le32 addr; +#define DB_LEGACY_ADDR_RESERVED0_MASK 0x3 +#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0 +#define DB_LEGACY_ADDR_DEMS_MASK 0x7 +#define DB_LEGACY_ADDR_DEMS_SHIFT 2 +#define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF +#define DB_LEGACY_ADDR_ICID_SHIFT 5 +}; + +/* Igu interrupt command */ +enum igu_int_cmd { + IGU_INT_ENABLE = 0, + IGU_INT_DISABLE = 1, + IGU_INT_NOP = 2, + IGU_INT_NOP2 = 3, + MAX_IGU_INT_CMD +}; + +/* IGU producer or consumer update command */ +struct igu_prod_cons_update { + u32 sb_id_and_flags; +#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF +#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0 +#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24 +#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3 +#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25 +#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27 +#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28 +#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3 +#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29 +#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31 + u32 reserved1; +}; + +/* Igu segments access for default status block only */ +enum igu_seg_access { + IGU_SEG_ACCESS_REG = 0, + IGU_SEG_ACCESS_ATTN = 1, + MAX_IGU_SEG_ACCESS +}; + +struct parsing_and_err_flags { + __le16 flags; +#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3 +#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0 +#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3 +#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2 +#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4 +#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5 +#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6 +#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7 +#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8 +#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9 +#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10 +#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11 +#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12 +#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15 +}; + +/* Concrete Function ID. */ +struct pxp_concrete_fid { + __le16 fid; +#define PXP_CONCRETE_FID_PFID_MASK 0xF +#define PXP_CONCRETE_FID_PFID_SHIFT 0 +#define PXP_CONCRETE_FID_PORT_MASK 0x3 +#define PXP_CONCRETE_FID_PORT_SHIFT 4 +#define PXP_CONCRETE_FID_PATH_MASK 0x1 +#define PXP_CONCRETE_FID_PATH_SHIFT 6 +#define PXP_CONCRETE_FID_VFVALID_MASK 0x1 +#define PXP_CONCRETE_FID_VFVALID_SHIFT 7 +#define PXP_CONCRETE_FID_VFID_MASK 0xFF +#define PXP_CONCRETE_FID_VFID_SHIFT 8 +}; + +struct pxp_pretend_concrete_fid { + __le16 fid; +#define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF +#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT 0 +#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7 +#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4 +#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK 0x1 +#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7 +#define PXP_PRETEND_CONCRETE_FID_VFID_MASK 0xFF +#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8 +}; + +union pxp_pretend_fid { + struct pxp_pretend_concrete_fid concrete_fid; + __le16 opaque_fid; +}; + +/* Pxp Pretend Command Register. */ +struct pxp_pretend_cmd { + union pxp_pretend_fid fid; + __le16 control; +#define PXP_PRETEND_CMD_PATH_MASK 0x1 +#define PXP_PRETEND_CMD_PATH_SHIFT 0 +#define PXP_PRETEND_CMD_USE_PORT_MASK 0x1 +#define PXP_PRETEND_CMD_USE_PORT_SHIFT 1 +#define PXP_PRETEND_CMD_PORT_MASK 0x3 +#define PXP_PRETEND_CMD_PORT_SHIFT 2 +#define PXP_PRETEND_CMD_RESERVED0_MASK 0xF +#define PXP_PRETEND_CMD_RESERVED0_SHIFT 4 +#define PXP_PRETEND_CMD_RESERVED1_MASK 0xF +#define PXP_PRETEND_CMD_RESERVED1_SHIFT 8 +#define PXP_PRETEND_CMD_PRETEND_PATH_MASK 0x1 +#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT 12 +#define PXP_PRETEND_CMD_PRETEND_PORT_MASK 0x1 +#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT 13 +#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK 0x1 +#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14 +#define PXP_PRETEND_CMD_IS_CONCRETE_MASK 0x1 +#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15 +}; + +/* PTT Record in PXP Admin Window. */ +struct pxp_ptt_entry { + __le32 offset; +#define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF +#define PXP_PTT_ENTRY_OFFSET_SHIFT 0 +#define PXP_PTT_ENTRY_RESERVED0_MASK 0x1FF +#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23 + struct pxp_pretend_cmd pretend; +}; + +/* RSS hash type */ +enum rss_hash_type { + RSS_HASH_TYPE_DEFAULT = 0, + RSS_HASH_TYPE_IPV4 = 1, + RSS_HASH_TYPE_TCP_IPV4 = 2, + RSS_HASH_TYPE_IPV6 = 3, + RSS_HASH_TYPE_TCP_IPV6 = 4, + RSS_HASH_TYPE_UDP_IPV4 = 5, + RSS_HASH_TYPE_UDP_IPV6 = 6, + MAX_RSS_HASH_TYPE +}; + +/* status block structure */ +struct status_block { + __le16 pi_array[PIS_PER_SB]; + __le32 sb_num; +#define STATUS_BLOCK_SB_NUM_MASK 0x1FF +#define STATUS_BLOCK_SB_NUM_SHIFT 0 +#define STATUS_BLOCK_ZERO_PAD_MASK 0x7F +#define STATUS_BLOCK_ZERO_PAD_SHIFT 9 +#define STATUS_BLOCK_ZERO_PAD2_MASK 0xFFFF +#define STATUS_BLOCK_ZERO_PAD2_SHIFT 16 + __le32 prod_index; +#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF +#define STATUS_BLOCK_PROD_INDEX_SHIFT 0 +#define STATUS_BLOCK_ZERO_PAD3_MASK 0xFF +#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24 +}; + +#endif /* __COMMON_HSI__ */ diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h new file mode 100644 index 000000000000..b920c3605c46 --- /dev/null +++ b/include/linux/qed/qed_chain.h @@ -0,0 +1,539 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QED_CHAIN_H +#define _QED_CHAIN_H + +#include +#include +#include +#include +#include +#include + +/* dma_addr_t manip */ +#define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x)) +#define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x)) + +#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo)) +#define HILO_DMA(hi, lo) HILO_GEN(hi, lo, dma_addr_t) +#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64) +#define HILO_DMA_REGPAIR(regpair) (HILO_DMA(regpair.hi, regpair.lo)) +#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo)) + +enum qed_chain_mode { + /* Each Page contains a next pointer at its end */ + QED_CHAIN_MODE_NEXT_PTR, + + /* Chain is a single page (next ptr) is unrequired */ + QED_CHAIN_MODE_SINGLE, + + /* Page pointers are located in a side list */ + QED_CHAIN_MODE_PBL, +}; + +enum qed_chain_use_mode { + QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */ + QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */ + QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */ +}; + +struct qed_chain_next { + struct regpair next_phys; + void *next_virt; +}; + +struct qed_chain_pbl { + dma_addr_t p_phys_table; + void *p_virt_table; + u16 prod_page_idx; + u16 cons_page_idx; +}; + +struct qed_chain { + void *p_virt_addr; + dma_addr_t p_phys_addr; + void *p_prod_elem; + void *p_cons_elem; + u16 page_cnt; + enum qed_chain_mode mode; + enum qed_chain_use_mode intended_use; /* used to produce/consume */ + u16 capacity; /*< number of _usable_ elements */ + u16 size; /* number of elements */ + u16 prod_idx; + u16 cons_idx; + u16 elem_per_page; + u16 elem_per_page_mask; + u16 elem_unusable; + u16 usable_per_page; + u16 elem_size; + u16 next_page_mask; + struct qed_chain_pbl pbl; +}; + +#define QED_CHAIN_PBL_ENTRY_SIZE (8) +#define QED_CHAIN_PAGE_SIZE (0x1000) +#define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size)) + +#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \ + ((mode == QED_CHAIN_MODE_NEXT_PTR) ? \ + (1 + ((sizeof(struct qed_chain_next) - 1) / \ + (elem_size))) : 0) + +#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \ + ((u32)(ELEMS_PER_PAGE(elem_size) - \ + UNUSABLE_ELEMS_PER_PAGE(elem_size, mode))) + +#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \ + DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode)) + +/* Accessors */ +static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain) +{ + return p_chain->prod_idx; +} + +static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain) +{ + return p_chain->cons_idx; +} + +static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain) +{ + u16 used; + + /* we don't need to trancate upon assignmet, as we assign u32->u16 */ + used = ((u32)0x10000u + (u32)(p_chain->prod_idx)) - + (u32)p_chain->cons_idx; + if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) + used -= (used / p_chain->elem_per_page); + + return p_chain->capacity - used; +} + +static inline u8 qed_chain_is_full(struct qed_chain *p_chain) +{ + return qed_chain_get_elem_left(p_chain) == p_chain->capacity; +} + +static inline u8 qed_chain_is_empty(struct qed_chain *p_chain) +{ + return qed_chain_get_elem_left(p_chain) == 0; +} + +static inline u16 qed_chain_get_elem_per_page( + struct qed_chain *p_chain) +{ + return p_chain->elem_per_page; +} + +static inline u16 qed_chain_get_usable_per_page( + struct qed_chain *p_chain) +{ + return p_chain->usable_per_page; +} + +static inline u16 qed_chain_get_unusable_per_page( + struct qed_chain *p_chain) +{ + return p_chain->elem_unusable; +} + +static inline u16 qed_chain_get_size(struct qed_chain *p_chain) +{ + return p_chain->size; +} + +static inline dma_addr_t +qed_chain_get_pbl_phys(struct qed_chain *p_chain) +{ + return p_chain->pbl.p_phys_table; +} + +/** + * @brief qed_chain_advance_page - + * + * Advance the next element accros pages for a linked chain + * + * @param p_chain + * @param p_next_elem + * @param idx_to_inc + * @param page_to_inc + */ +static inline void +qed_chain_advance_page(struct qed_chain *p_chain, + void **p_next_elem, + u16 *idx_to_inc, + u16 *page_to_inc) + +{ + switch (p_chain->mode) { + case QED_CHAIN_MODE_NEXT_PTR: + { + struct qed_chain_next *p_next = *p_next_elem; + *p_next_elem = p_next->next_virt; + *idx_to_inc += p_chain->elem_unusable; + break; + } + case QED_CHAIN_MODE_SINGLE: + *p_next_elem = p_chain->p_virt_addr; + break; + + case QED_CHAIN_MODE_PBL: + /* It is assumed pages are sequential, next element needs + * to change only when passing going back to first from last. + */ + if (++(*page_to_inc) == p_chain->page_cnt) { + *page_to_inc = 0; + *p_next_elem = p_chain->p_virt_addr; + } + } +} + +#define is_unusable_idx(p, idx) \ + (((p)->idx & (p)->elem_per_page_mask) == (p)->usable_per_page) + +#define is_unusable_next_idx(p, idx) \ + ((((p)->idx + 1) & (p)->elem_per_page_mask) == (p)->usable_per_page) + +#define test_ans_skip(p, idx) \ + do { \ + if (is_unusable_idx(p, idx)) { \ + (p)->idx += (p)->elem_unusable; \ + } \ + } while (0) + +/** + * @brief qed_chain_return_multi_produced - + * + * A chain in which the driver "Produces" elements should use this API + * to indicate previous produced elements are now consumed. + * + * @param p_chain + * @param num + */ +static inline void +qed_chain_return_multi_produced(struct qed_chain *p_chain, + u16 num) +{ + p_chain->cons_idx += num; + test_ans_skip(p_chain, cons_idx); +} + +/** + * @brief qed_chain_return_produced - + * + * A chain in which the driver "Produces" elements should use this API + * to indicate previous produced elements are now consumed. + * + * @param p_chain + */ +static inline void qed_chain_return_produced(struct qed_chain *p_chain) +{ + p_chain->cons_idx++; + test_ans_skip(p_chain, cons_idx); +} + +/** + * @brief qed_chain_produce - + * + * A chain in which the driver "Produces" elements should use this to get + * a pointer to the next element which can be "Produced". It's driver + * responsibility to validate that the chain has room for new element. + * + * @param p_chain + * + * @return void*, a pointer to next element + */ +static inline void *qed_chain_produce(struct qed_chain *p_chain) +{ + void *ret = NULL; + + if ((p_chain->prod_idx & p_chain->elem_per_page_mask) == + p_chain->next_page_mask) { + qed_chain_advance_page(p_chain, &p_chain->p_prod_elem, + &p_chain->prod_idx, + &p_chain->pbl.prod_page_idx); + } + + ret = p_chain->p_prod_elem; + p_chain->prod_idx++; + p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) + + p_chain->elem_size); + + return ret; +} + +/** + * @brief qed_chain_get_capacity - + * + * Get the maximum number of BDs in chain + * + * @param p_chain + * @param num + * + * @return u16, number of unusable BDs + */ +static inline u16 qed_chain_get_capacity(struct qed_chain *p_chain) +{ + return p_chain->capacity; +} + +/** + * @brief qed_chain_recycle_consumed - + * + * Returns an element which was previously consumed; + * Increments producers so they could be written to FW. + * + * @param p_chain + */ +static inline void +qed_chain_recycle_consumed(struct qed_chain *p_chain) +{ + test_ans_skip(p_chain, prod_idx); + p_chain->prod_idx++; +} + +/** + * @brief qed_chain_consume - + * + * A Chain in which the driver utilizes data written by a different source + * (i.e., FW) should use this to access passed buffers. + * + * @param p_chain + * + * @return void*, a pointer to the next buffer written + */ +static inline void *qed_chain_consume(struct qed_chain *p_chain) +{ + void *ret = NULL; + + if ((p_chain->cons_idx & p_chain->elem_per_page_mask) == + p_chain->next_page_mask) { + qed_chain_advance_page(p_chain, &p_chain->p_cons_elem, + &p_chain->cons_idx, + &p_chain->pbl.cons_page_idx); + } + + ret = p_chain->p_cons_elem; + p_chain->cons_idx++; + p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) + + p_chain->elem_size); + + return ret; +} + +/** + * @brief qed_chain_reset - Resets the chain to its start state + * + * @param p_chain pointer to a previously allocted chain + */ +static inline void qed_chain_reset(struct qed_chain *p_chain) +{ + int i; + + p_chain->prod_idx = 0; + p_chain->cons_idx = 0; + p_chain->p_cons_elem = p_chain->p_virt_addr; + p_chain->p_prod_elem = p_chain->p_virt_addr; + + if (p_chain->mode == QED_CHAIN_MODE_PBL) { + p_chain->pbl.prod_page_idx = p_chain->page_cnt - 1; + p_chain->pbl.cons_page_idx = p_chain->page_cnt - 1; + } + + switch (p_chain->intended_use) { + case QED_CHAIN_USE_TO_CONSUME_PRODUCE: + case QED_CHAIN_USE_TO_PRODUCE: + /* Do nothing */ + break; + + case QED_CHAIN_USE_TO_CONSUME: + /* produce empty elements */ + for (i = 0; i < p_chain->capacity; i++) + qed_chain_recycle_consumed(p_chain); + break; + } +} + +/** + * @brief qed_chain_init - Initalizes a basic chain struct + * + * @param p_chain + * @param p_virt_addr + * @param p_phys_addr physical address of allocated buffer's beginning + * @param page_cnt number of pages in the allocated buffer + * @param elem_size size of each element in the chain + * @param intended_use + * @param mode + */ +static inline void qed_chain_init(struct qed_chain *p_chain, + void *p_virt_addr, + dma_addr_t p_phys_addr, + u16 page_cnt, + u8 elem_size, + enum qed_chain_use_mode intended_use, + enum qed_chain_mode mode) +{ + /* chain fixed parameters */ + p_chain->p_virt_addr = p_virt_addr; + p_chain->p_phys_addr = p_phys_addr; + p_chain->elem_size = elem_size; + p_chain->page_cnt = page_cnt; + p_chain->mode = mode; + + p_chain->intended_use = intended_use; + p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size); + p_chain->usable_per_page = + USABLE_ELEMS_PER_PAGE(elem_size, mode); + p_chain->capacity = p_chain->usable_per_page * page_cnt; + p_chain->size = p_chain->elem_per_page * page_cnt; + p_chain->elem_per_page_mask = p_chain->elem_per_page - 1; + + p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode); + + p_chain->next_page_mask = (p_chain->usable_per_page & + p_chain->elem_per_page_mask); + + if (mode == QED_CHAIN_MODE_NEXT_PTR) { + struct qed_chain_next *p_next; + u16 i; + + for (i = 0; i < page_cnt - 1; i++) { + /* Increment mem_phy to the next page. */ + p_phys_addr += QED_CHAIN_PAGE_SIZE; + + /* Initialize the physical address of the next page. */ + p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + + elem_size * + p_chain-> + usable_per_page); + + p_next->next_phys.lo = DMA_LO_LE(p_phys_addr); + p_next->next_phys.hi = DMA_HI_LE(p_phys_addr); + + /* Initialize the virtual address of the next page. */ + p_next->next_virt = (void *)((u8 *)p_virt_addr + + QED_CHAIN_PAGE_SIZE); + + /* Move to the next page. */ + p_virt_addr = p_next->next_virt; + } + + /* Last page's next should point to beginning of the chain */ + p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + + elem_size * + p_chain->usable_per_page); + + p_next->next_phys.lo = DMA_LO_LE(p_chain->p_phys_addr); + p_next->next_phys.hi = DMA_HI_LE(p_chain->p_phys_addr); + p_next->next_virt = p_chain->p_virt_addr; + } + qed_chain_reset(p_chain); +} + +/** + * @brief qed_chain_pbl_init - Initalizes a basic pbl chain + * struct + * @param p_chain + * @param p_virt_addr virtual address of allocated buffer's beginning + * @param p_phys_addr physical address of allocated buffer's beginning + * @param page_cnt number of pages in the allocated buffer + * @param elem_size size of each element in the chain + * @param use_mode + * @param p_phys_pbl pointer to a pre-allocated side table + * which will hold physical page addresses. + * @param p_virt_pbl pointer to a pre allocated side table + * which will hold virtual page addresses. + */ +static inline void +qed_chain_pbl_init(struct qed_chain *p_chain, + void *p_virt_addr, + dma_addr_t p_phys_addr, + u16 page_cnt, + u8 elem_size, + enum qed_chain_use_mode use_mode, + dma_addr_t p_phys_pbl, + dma_addr_t *p_virt_pbl) +{ + dma_addr_t *p_pbl_dma = p_virt_pbl; + int i; + + qed_chain_init(p_chain, p_virt_addr, p_phys_addr, page_cnt, + elem_size, use_mode, QED_CHAIN_MODE_PBL); + + p_chain->pbl.p_phys_table = p_phys_pbl; + p_chain->pbl.p_virt_table = p_virt_pbl; + + /* Fill the PBL with physical addresses*/ + for (i = 0; i < page_cnt; i++) { + *p_pbl_dma = p_phys_addr; + p_phys_addr += QED_CHAIN_PAGE_SIZE; + p_pbl_dma++; + } +} + +/** + * @brief qed_chain_set_prod - sets the prod to the given + * value + * + * @param prod_idx + * @param p_prod_elem + */ +static inline void qed_chain_set_prod(struct qed_chain *p_chain, + u16 prod_idx, + void *p_prod_elem) +{ + p_chain->prod_idx = prod_idx; + p_chain->p_prod_elem = p_prod_elem; +} + +/** + * @brief qed_chain_get_elem - + * + * get a pointer to an element represented by absolute idx + * + * @param p_chain + * @assumption p_chain->size is a power of 2 + * + * @return void*, a pointer to next element + */ +static inline void *qed_chain_sge_get_elem(struct qed_chain *p_chain, + u16 idx) +{ + void *ret = NULL; + + if (idx >= p_chain->size) + return NULL; + + ret = (u8 *)p_chain->p_virt_addr + p_chain->elem_size * idx; + + return ret; +} + +/** + * @brief qed_chain_sge_inc_cons_prod + * + * for sge chains, producer isn't increased serially, the ring + * is expected to be full at all times. Once elements are + * consumed, they are immediately produced. + * + * @param p_chain + * @param cnt + * + * @return inline void + */ +static inline void +qed_chain_sge_inc_cons_prod(struct qed_chain *p_chain, + u16 cnt) +{ + p_chain->prod_idx += cnt; + p_chain->cons_idx += cnt; +} + +#endif diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h new file mode 100644 index 000000000000..dc9a1353f971 --- /dev/null +++ b/include/linux/qed/qed_if.h @@ -0,0 +1,498 @@ +/* QLogic qed NIC Driver + * + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QED_IF_H +#define _QED_IF_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \ + (void __iomem *)(reg_addr)) + +#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr)) + +#define QED_COALESCE_MAX 0xFF + +/* forward */ +struct qed_dev; + +struct qed_eth_pf_params { + /* The following parameters are used during HW-init + * and these parameters need to be passed as arguments + * to update_pf_params routine invoked before slowpath start + */ + u16 num_cons; +}; + +struct qed_pf_params { + struct qed_eth_pf_params eth_pf_params; +}; + +enum qed_int_mode { + QED_INT_MODE_INTA, + QED_INT_MODE_MSIX, + QED_INT_MODE_MSI, + QED_INT_MODE_POLL, +}; + +struct qed_sb_info { + struct status_block *sb_virt; + dma_addr_t sb_phys; + u32 sb_ack; /* Last given ack */ + u16 igu_sb_id; + void __iomem *igu_addr; + u8 flags; +#define QED_SB_INFO_INIT 0x1 +#define QED_SB_INFO_SETUP 0x2 + + struct qed_dev *cdev; +}; + +struct qed_dev_info { + unsigned long pci_mem_start; + unsigned long pci_mem_end; + unsigned int pci_irq; + u8 num_hwfns; + + u8 hw_mac[ETH_ALEN]; + bool is_mf; + + /* FW version */ + u16 fw_major; + u16 fw_minor; + u16 fw_rev; + u16 fw_eng; + + /* MFW version */ + u32 mfw_rev; + + u32 flash_size; + u8 mf_mode; +}; + +enum qed_sb_type { + QED_SB_TYPE_L2_QUEUE, +}; + +enum qed_protocol { + QED_PROTOCOL_ETH, +}; + +struct qed_link_params { + bool link_up; + +#define QED_LINK_OVERRIDE_SPEED_AUTONEG BIT(0) +#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1) +#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2) +#define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3) + u32 override_flags; + bool autoneg; + u32 adv_speeds; + u32 forced_speed; +#define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0) +#define QED_LINK_PAUSE_RX_ENABLE BIT(1) +#define QED_LINK_PAUSE_TX_ENABLE BIT(2) + u32 pause_config; +}; + +struct qed_link_output { + bool link_up; + + u32 supported_caps; /* In SUPPORTED defs */ + u32 advertised_caps; /* In ADVERTISED defs */ + u32 lp_caps; /* In ADVERTISED defs */ + u32 speed; /* In Mb/s */ + u8 duplex; /* In DUPLEX defs */ + u8 port; /* In PORT defs */ + bool autoneg; + u32 pause_config; +}; + +#define QED_DRV_VER_STR_SIZE 12 +struct qed_slowpath_params { + u32 int_mode; + u8 drv_major; + u8 drv_minor; + u8 drv_rev; + u8 drv_eng; + u8 name[QED_DRV_VER_STR_SIZE]; +}; + +#define ILT_PAGE_SIZE_TCFC 0x8000 /* 32KB */ + +struct qed_int_info { + struct msix_entry *msix; + u8 msix_cnt; + + /* This should be updated by the protocol driver */ + u8 used_cnt; +}; + +struct qed_common_cb_ops { + void (*link_update)(void *dev, + struct qed_link_output *link); +}; + +struct qed_common_ops { + struct qed_dev* (*probe)(struct pci_dev *dev, + enum qed_protocol protocol, + u32 dp_module, u8 dp_level); + + void (*remove)(struct qed_dev *cdev); + + int (*set_power_state)(struct qed_dev *cdev, + pci_power_t state); + + void (*set_id)(struct qed_dev *cdev, + char name[], + char ver_str[]); + + /* Client drivers need to make this call before slowpath_start. + * PF params required for the call before slowpath_start is + * documented within the qed_pf_params structure definition. + */ + void (*update_pf_params)(struct qed_dev *cdev, + struct qed_pf_params *params); + int (*slowpath_start)(struct qed_dev *cdev, + struct qed_slowpath_params *params); + + int (*slowpath_stop)(struct qed_dev *cdev); + + /* Requests to use `cnt' interrupts for fastpath. + * upon success, returns number of interrupts allocated for fastpath. + */ + int (*set_fp_int)(struct qed_dev *cdev, + u16 cnt); + + /* Fills `info' with pointers required for utilizing interrupts */ + int (*get_fp_int)(struct qed_dev *cdev, + struct qed_int_info *info); + + u32 (*sb_init)(struct qed_dev *cdev, + struct qed_sb_info *sb_info, + void *sb_virt_addr, + dma_addr_t sb_phy_addr, + u16 sb_id, + enum qed_sb_type type); + + u32 (*sb_release)(struct qed_dev *cdev, + struct qed_sb_info *sb_info, + u16 sb_id); + + void (*simd_handler_config)(struct qed_dev *cdev, + void *token, + int index, + void (*handler)(void *)); + + void (*simd_handler_clean)(struct qed_dev *cdev, + int index); +/** + * @brief set_link - set links according to params + * + * @param cdev + * @param params - values used to override the default link configuration + * + * @return 0 on success, error otherwise. + */ + int (*set_link)(struct qed_dev *cdev, + struct qed_link_params *params); + +/** + * @brief get_link - returns the current link state. + * + * @param cdev + * @param if_link - structure to be filled with current link configuration. + */ + void (*get_link)(struct qed_dev *cdev, + struct qed_link_output *if_link); + +/** + * @brief - drains chip in case Tx completions fail to arrive due to pause. + * + * @param cdev + */ + int (*drain)(struct qed_dev *cdev); + +/** + * @brief update_msglvl - update module debug level + * + * @param cdev + * @param dp_module + * @param dp_level + */ + void (*update_msglvl)(struct qed_dev *cdev, + u32 dp_module, + u8 dp_level); + + int (*chain_alloc)(struct qed_dev *cdev, + enum qed_chain_use_mode intended_use, + enum qed_chain_mode mode, + u16 num_elems, + size_t elem_size, + struct qed_chain *p_chain); + + void (*chain_free)(struct qed_dev *cdev, + struct qed_chain *p_chain); +}; + +/** + * @brief qed_get_protocol_version + * + * @param protocol + * + * @return version supported by qed for given protocol driver + */ +u32 qed_get_protocol_version(enum qed_protocol protocol); + +#define MASK_FIELD(_name, _value) \ + ((_value) &= (_name ## _MASK)) + +#define FIELD_VALUE(_name, _value) \ + ((_value & _name ## _MASK) << _name ## _SHIFT) + +#define SET_FIELD(value, name, flag) \ + do { \ + (value) &= ~(name ## _MASK << name ## _SHIFT); \ + (value) |= (((u64)flag) << (name ## _SHIFT)); \ + } while (0) + +#define GET_FIELD(value, name) \ + (((value) >> (name ## _SHIFT)) & name ## _MASK) + +/* Debug print definitions */ +#define DP_ERR(cdev, fmt, ...) \ + pr_err("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + DP_NAME(cdev) ? DP_NAME(cdev) : "", \ + ## __VA_ARGS__) \ + +#define DP_NOTICE(cdev, fmt, ...) \ + do { \ + if (unlikely((cdev)->dp_level <= QED_LEVEL_NOTICE)) { \ + pr_notice("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + DP_NAME(cdev) ? DP_NAME(cdev) : "", \ + ## __VA_ARGS__); \ + \ + } \ + } while (0) + +#define DP_INFO(cdev, fmt, ...) \ + do { \ + if (unlikely((cdev)->dp_level <= QED_LEVEL_INFO)) { \ + pr_notice("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + DP_NAME(cdev) ? DP_NAME(cdev) : "", \ + ## __VA_ARGS__); \ + } \ + } while (0) + +#define DP_VERBOSE(cdev, module, fmt, ...) \ + do { \ + if (unlikely(((cdev)->dp_level <= QED_LEVEL_VERBOSE) && \ + ((cdev)->dp_module & module))) { \ + pr_notice("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + DP_NAME(cdev) ? DP_NAME(cdev) : "", \ + ## __VA_ARGS__); \ + } \ + } while (0) + +enum DP_LEVEL { + QED_LEVEL_VERBOSE = 0x0, + QED_LEVEL_INFO = 0x1, + QED_LEVEL_NOTICE = 0x2, + QED_LEVEL_ERR = 0x3, +}; + +#define QED_LOG_LEVEL_SHIFT (30) +#define QED_LOG_VERBOSE_MASK (0x3fffffff) +#define QED_LOG_INFO_MASK (0x40000000) +#define QED_LOG_NOTICE_MASK (0x80000000) + +enum DP_MODULE { + QED_MSG_SPQ = 0x10000, + QED_MSG_STATS = 0x20000, + QED_MSG_DCB = 0x40000, + QED_MSG_IOV = 0x80000, + QED_MSG_SP = 0x100000, + QED_MSG_STORAGE = 0x200000, + QED_MSG_CXT = 0x800000, + QED_MSG_ILT = 0x2000000, + QED_MSG_ROCE = 0x4000000, + QED_MSG_DEBUG = 0x8000000, + /* to be added...up to 0x8000000 */ +}; + +struct qed_eth_stats { + u64 no_buff_discards; + u64 packet_too_big_discard; + u64 ttl0_discard; + u64 rx_ucast_bytes; + u64 rx_mcast_bytes; + u64 rx_bcast_bytes; + u64 rx_ucast_pkts; + u64 rx_mcast_pkts; + u64 rx_bcast_pkts; + u64 mftag_filter_discards; + u64 mac_filter_discards; + u64 tx_ucast_bytes; + u64 tx_mcast_bytes; + u64 tx_bcast_bytes; + u64 tx_ucast_pkts; + u64 tx_mcast_pkts; + u64 tx_bcast_pkts; + u64 tx_err_drop_pkts; + u64 tpa_coalesced_pkts; + u64 tpa_coalesced_events; + u64 tpa_aborts_num; + u64 tpa_not_coalesced_pkts; + u64 tpa_coalesced_bytes; + + /* port */ + u64 rx_64_byte_packets; + u64 rx_127_byte_packets; + u64 rx_255_byte_packets; + u64 rx_511_byte_packets; + u64 rx_1023_byte_packets; + u64 rx_1518_byte_packets; + u64 rx_1522_byte_packets; + u64 rx_2047_byte_packets; + u64 rx_4095_byte_packets; + u64 rx_9216_byte_packets; + u64 rx_16383_byte_packets; + u64 rx_crc_errors; + u64 rx_mac_crtl_frames; + u64 rx_pause_frames; + u64 rx_pfc_frames; + u64 rx_align_errors; + u64 rx_carrier_errors; + u64 rx_oversize_packets; + u64 rx_jabbers; + u64 rx_undersize_packets; + u64 rx_fragments; + u64 tx_64_byte_packets; + u64 tx_65_to_127_byte_packets; + u64 tx_128_to_255_byte_packets; + u64 tx_256_to_511_byte_packets; + u64 tx_512_to_1023_byte_packets; + u64 tx_1024_to_1518_byte_packets; + u64 tx_1519_to_2047_byte_packets; + u64 tx_2048_to_4095_byte_packets; + u64 tx_4096_to_9216_byte_packets; + u64 tx_9217_to_16383_byte_packets; + u64 tx_pause_frames; + u64 tx_pfc_frames; + u64 tx_lpi_entry_count; + u64 tx_total_collisions; + u64 brb_truncates; + u64 brb_discards; + u64 rx_mac_bytes; + u64 rx_mac_uc_packets; + u64 rx_mac_mc_packets; + u64 rx_mac_bc_packets; + u64 rx_mac_frames_ok; + u64 tx_mac_bytes; + u64 tx_mac_uc_packets; + u64 tx_mac_mc_packets; + u64 tx_mac_bc_packets; + u64 tx_mac_ctrl_frames; +}; + +#define QED_SB_IDX 0x0002 + +#define RX_PI 0 +#define TX_PI(tc) (RX_PI + 1 + tc) + +static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info) +{ + u32 prod = 0; + u16 rc = 0; + + prod = le32_to_cpu(sb_info->sb_virt->prod_index) & + STATUS_BLOCK_PROD_INDEX_MASK; + if (sb_info->sb_ack != prod) { + sb_info->sb_ack = prod; + rc |= QED_SB_IDX; + } + + /* Let SB update */ + mmiowb(); + return rc; +} + +/** + * + * @brief This function creates an update command for interrupts that is + * written to the IGU. + * + * @param sb_info - This is the structure allocated and + * initialized per status block. Assumption is + * that it was initialized using qed_sb_init + * @param int_cmd - Enable/Disable/Nop + * @param upd_flg - whether igu consumer should be + * updated. + * + * @return inline void + */ +static inline void qed_sb_ack(struct qed_sb_info *sb_info, + enum igu_int_cmd int_cmd, + u8 upd_flg) +{ + struct igu_prod_cons_update igu_ack = { 0 }; + + igu_ack.sb_id_and_flags = + ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | + (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | + (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | + (IGU_SEG_ACCESS_REG << + IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); + + DIRECT_REG_WR(sb_info->igu_addr, igu_ack.sb_id_and_flags); + + /* Both segments (interrupts & acks) are written to same place address; + * Need to guarantee all commands will be received (in-order) by HW. + */ + mmiowb(); + barrier(); +} + +static inline void __internal_ram_wr(void *p_hwfn, + void __iomem *addr, + int size, + u32 *data) + +{ + unsigned int i; + + for (i = 0; i < size / sizeof(*data); i++) + DIRECT_REG_WR(&((u32 __iomem *)addr)[i], data[i]); +} + +static inline void internal_ram_wr(void __iomem *addr, + int size, + u32 *data) +{ + __internal_ram_wr(NULL, addr, size, data); +} + +#endif -- cgit v1.2.3-71-gd317 From 25c089d78f3833edf614fc377e75e9cf848562f5 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Mon, 26 Oct 2015 11:02:26 +0200 Subject: qed: Add basic L2 interface This patch adds a public API for a network driver to work on top of QED. The interface itself is very minimal - it's mostly infrastructure, as the only content it has after this patch is a query for HW-based information required for the creation of a network interface [I.e., no actual protocol-specific configurations are supported]. Signed-off-by: Manish Chopra Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/Makefile | 2 +- drivers/net/ethernet/qlogic/qed/qed.h | 14 ++ drivers/net/ethernet/qlogic/qed/qed_dev.c | 62 +++++++ drivers/net/ethernet/qlogic/qed/qed_hsi.h | 1 + drivers/net/ethernet/qlogic/qed/qed_l2.c | 87 ++++++++++ include/linux/qed/eth_common.h | 279 ++++++++++++++++++++++++++++++ include/linux/qed/qed_eth_if.h | 38 ++++ 7 files changed, 482 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/qlogic/qed/qed_l2.c create mode 100644 include/linux/qed/eth_common.h create mode 100644 include/linux/qed/qed_eth_if.h (limited to 'include/linux') diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile index 6969b5c66929..5c2fd57236fe 100644 --- a/drivers/net/ethernet/qlogic/qed/Makefile +++ b/drivers/net/ethernet/qlogic/qed/Makefile @@ -1,4 +1,4 @@ obj-$(CONFIG_QED) := qed.o qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \ - qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o + qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index a63ef3120d78..e03371d3e622 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -25,6 +25,7 @@ #include #include "qed_hsi.h" +extern const struct qed_common_ops qed_common_ops_pass; #define DRV_MODULE_VERSION "8.4.0.0" #define MAX_HWFNS_PER_DEVICE (4) @@ -91,13 +92,22 @@ struct qed_qm_iids { enum QED_RESOURCES { QED_SB, + QED_L2_QUEUE, QED_VPORT, + QED_RSS_ENG, QED_PQ, QED_RL, + QED_MAC, + QED_VLAN, QED_ILT, QED_MAX_RESC, }; +enum QED_FEATURE { + QED_PF_L2_QUE, + QED_MAX_FEATURES, +}; + struct qed_hw_info { /* PCI personality */ enum qed_pci_personality personality; @@ -105,6 +115,7 @@ struct qed_hw_info { /* Resource Allocation scheme results */ u32 resc_start[QED_MAX_RESC]; u32 resc_num[QED_MAX_RESC]; + u32 feat_num[QED_MAX_FEATURES]; #define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc]) #define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc]) @@ -266,6 +277,9 @@ struct qed_hwfn { struct qed_mcp_info *mcp_info; + struct qed_hw_cid_data *p_tx_cids; + struct qed_hw_cid_data *p_rx_cids; + struct qed_dmae_info dmae_info; /* QM init */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 5b845220ae8c..3243cb4160c3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -89,6 +89,15 @@ void qed_resc_free(struct qed_dev *cdev) kfree(cdev->reset_stats); + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + kfree(p_hwfn->p_tx_cids); + p_hwfn->p_tx_cids = NULL; + kfree(p_hwfn->p_rx_cids); + p_hwfn->p_rx_cids = NULL; + } + for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; @@ -202,6 +211,29 @@ int qed_resc_alloc(struct qed_dev *cdev) if (!cdev->fw_data) return -ENOMEM; + /* Allocate Memory for the Queue->CID mapping */ + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + int tx_size = sizeof(struct qed_hw_cid_data) * + RESC_NUM(p_hwfn, QED_L2_QUEUE); + int rx_size = sizeof(struct qed_hw_cid_data) * + RESC_NUM(p_hwfn, QED_L2_QUEUE); + + p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL); + if (!p_hwfn->p_tx_cids) { + DP_NOTICE(p_hwfn, + "Failed to allocate memory for Tx Cids\n"); + goto alloc_err; + } + + p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL); + if (!p_hwfn->p_rx_cids) { + DP_NOTICE(p_hwfn, + "Failed to allocate memory for Rx Cids\n"); + goto alloc_err; + } + } + for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; @@ -881,6 +913,20 @@ static void get_function_id(struct qed_hwfn *p_hwfn) PXP_CONCRETE_FID_PORT); } +static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) +{ + u32 *feat_num = p_hwfn->hw_info.feat_num; + int num_features = 1; + + feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) / + num_features, + RESC_NUM(p_hwfn, QED_L2_QUEUE)); + DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, + "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n", + feat_num[QED_PF_L2_QUE], RESC_NUM(p_hwfn, QED_SB), + num_features); +} + static void qed_hw_get_resc(struct qed_hwfn *p_hwfn) { u32 *resc_start = p_hwfn->hw_info.resc_start; @@ -893,29 +939,45 @@ static void qed_hw_get_resc(struct qed_hwfn *p_hwfn) resc_num[QED_SB] = min_t(u32, (MAX_SB_PER_PATH_BB / num_funcs), qed_int_get_num_sbs(p_hwfn, NULL)); + resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs; resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs; + resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs; resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs; resc_num[QED_RL] = 8; + resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs; + resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) / + num_funcs; resc_num[QED_ILT] = 950; for (i = 0; i < QED_MAX_RESC; i++) resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id; + qed_hw_set_feat(p_hwfn); + DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, "The numbers for each resource are:\n" "SB = %d start = %d\n" + "L2_QUEUE = %d start = %d\n" "VPORT = %d start = %d\n" "PQ = %d start = %d\n" "RL = %d start = %d\n" + "MAC = %d start = %d\n" + "VLAN = %d start = %d\n" "ILT = %d start = %d\n", p_hwfn->hw_info.resc_num[QED_SB], p_hwfn->hw_info.resc_start[QED_SB], + p_hwfn->hw_info.resc_num[QED_L2_QUEUE], + p_hwfn->hw_info.resc_start[QED_L2_QUEUE], p_hwfn->hw_info.resc_num[QED_VPORT], p_hwfn->hw_info.resc_start[QED_VPORT], p_hwfn->hw_info.resc_num[QED_PQ], p_hwfn->hw_info.resc_start[QED_PQ], p_hwfn->hw_info.resc_num[QED_RL], p_hwfn->hw_info.resc_start[QED_RL], + p_hwfn->hw_info.resc_num[QED_MAC], + p_hwfn->hw_info.resc_start[QED_MAC], + p_hwfn->hw_info.resc_num[QED_VLAN], + p_hwfn->hw_info.resc_start[QED_VLAN], p_hwfn->hw_info.resc_num[QED_ILT], p_hwfn->hw_info.resc_start[QED_ILT]); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 61c15a57c267..27f2c005e2b0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -17,6 +17,7 @@ #include #include #include +#include struct qed_hwfn; struct qed_ptt; diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c new file mode 100644 index 000000000000..f2e76024409a --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -0,0 +1,87 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "qed.h" +#include +#include "qed_cxt.h" +#include "qed_dev_api.h" +#include +#include "qed_hsi.h" +#include "qed_hw.h" +#include "qed_int.h" +#include "qed_reg_addr.h" +#include "qed_sp.h" + +static int qed_fill_eth_dev_info(struct qed_dev *cdev, + struct qed_dev_eth_info *info) +{ + int i; + + memset(info, 0, sizeof(*info)); + + info->num_tc = 1; + + if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { + for_each_hwfn(cdev, i) + info->num_queues += FEAT_NUM(&cdev->hwfns[i], + QED_PF_L2_QUE); + if (cdev->int_params.fp_msix_cnt) + info->num_queues = min_t(u8, info->num_queues, + cdev->int_params.fp_msix_cnt); + } else { + info->num_queues = cdev->num_hwfns; + } + + info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN); + ether_addr_copy(info->port_mac, + cdev->hwfns[0].hw_info.hw_mac_addr); + + qed_fill_dev_info(cdev, &info->common); + + return 0; +} + +static const struct qed_eth_ops qed_eth_ops_pass = { + .common = &qed_common_ops_pass, + .fill_dev_info = &qed_fill_eth_dev_info, +}; + +const struct qed_eth_ops *qed_get_eth_ops(u32 version) +{ + if (version != QED_ETH_INTERFACE_VERSION) { + pr_notice("Cannot supply ethtool operations [%08x != %08x]\n", + version, QED_ETH_INTERFACE_VERSION); + return NULL; + } + + return &qed_eth_ops_pass; +} +EXPORT_SYMBOL(qed_get_eth_ops); + +void qed_put_eth_ops(void) +{ + /* TODO - reference count for module? */ +} +EXPORT_SYMBOL(qed_put_eth_ops); diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h new file mode 100644 index 000000000000..320b3373ac1d --- /dev/null +++ b/include/linux/qed/eth_common.h @@ -0,0 +1,279 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef __ETH_COMMON__ +#define __ETH_COMMON__ + +/********************/ +/* ETH FW CONSTANTS */ +/********************/ +#define ETH_CACHE_LINE_SIZE 64 + +#define ETH_MAX_RAMROD_PER_CON 8 +#define ETH_TX_BD_PAGE_SIZE_BYTES 4096 +#define ETH_RX_BD_PAGE_SIZE_BYTES 4096 +#define ETH_RX_SGE_PAGE_SIZE_BYTES 4096 +#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096 +#define ETH_RX_NUM_NEXT_PAGE_BDS 2 +#define ETH_RX_NUM_NEXT_PAGE_SGES 2 + +#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1 +#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18 +#define ETH_TX_MAX_LSO_HDR_NBD 4 +#define ETH_TX_MIN_BDS_PER_LSO_PKT 3 +#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3 +#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2 +#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2 +#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 12 + 8)) +#define ETH_TX_MAX_LSO_HDR_BYTES 510 + +#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS + +#define ETH_REG_CQE_PBL_SIZE 3 + +/* num of MAC/VLAN filters */ +#define ETH_NUM_MAC_FILTERS 512 +#define ETH_NUM_VLAN_FILTERS 512 + +/* approx. multicast constants */ +#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0 +#define ETH_MULTICAST_MAC_BINS 256 +#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32) + +/* ethernet vport update constants */ +#define ETH_FILTER_RULES_COUNT 10 +#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128 +#define ETH_RSS_KEY_SIZE_REGS 10 +#define ETH_RSS_ENGINE_NUM_K2 207 +#define ETH_RSS_ENGINE_NUM_BB 127 + +/* TPA constants */ +#define ETH_TPA_MAX_AGGS_NUM 64 +#define ETH_TPA_CQE_START_SGL_SIZE 3 +#define ETH_TPA_CQE_CONT_SGL_SIZE 6 +#define ETH_TPA_CQE_END_SGL_SIZE 4 + +/* Queue Zone sizes */ +#define TSTORM_QZONE_SIZE 0 +#define MSTORM_QZONE_SIZE sizeof(struct mstorm_eth_queue_zone) +#define USTORM_QZONE_SIZE sizeof(struct ustorm_eth_queue_zone) +#define XSTORM_QZONE_SIZE 0 +#define YSTORM_QZONE_SIZE sizeof(struct ystorm_eth_queue_zone) +#define PSTORM_QZONE_SIZE 0 + +/* Interrupt coalescing TimeSet */ +struct coalescing_timeset { + u8 timeset; + u8 valid; +}; + +struct eth_tx_1st_bd_flags { + u8 bitfields; +#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0 +#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 1 +#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 2 +#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 3 +#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 4 +#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 5 +#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6 +#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7 +}; + +/* The parsing information data fo rthe first tx bd of a given packet. */ +struct eth_tx_data_1st_bd { + __le16 vlan; + u8 nbds; + struct eth_tx_1st_bd_flags bd_flags; + __le16 fw_use_only; +}; + +/* The parsing information data for the second tx bd of a given packet. */ +struct eth_tx_data_2nd_bd { + __le16 tunn_ip_size; + __le16 bitfields; +#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF +#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0 +#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7 +#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13 + __le16 bitfields2; +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4 +#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3 +#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6 +#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3 +#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 8 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 10 +#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 11 +#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 12 +#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 13 +#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 14 +#define ETH_TX_DATA_2ND_BD_RESERVED1_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_RESERVED1_SHIFT 15 +}; + +/* Regular ETH Rx FP CQE. */ +struct eth_fast_path_rx_reg_cqe { + u8 type; + u8 bitfields; +#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7 +#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0 +#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF +#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3 +#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1 +#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7 + __le16 pkt_len; + struct parsing_and_err_flags pars_flags; + __le16 vlan_tag; + __le32 rss_hash; + __le16 len_on_bd; + u8 placement_offset; + u8 reserved; + __le16 pbl[ETH_REG_CQE_PBL_SIZE]; + u8 reserved1[10]; +}; + +/* The L4 pseudo checksum mode for Ethernet */ +enum eth_l4_pseudo_checksum_mode { + ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH, + ETH_L4_PSEUDO_CSUM_ZERO_LENGTH, + MAX_ETH_L4_PSEUDO_CHECKSUM_MODE +}; + +struct eth_rx_bd { + struct regpair addr; +}; + +/* regular ETH Rx SP CQE */ +struct eth_slow_path_rx_cqe { + u8 type; + u8 ramrod_cmd_id; + u8 error_flag; + u8 reserved[27]; + __le16 echo; +}; + +/* union for all ETH Rx CQE types */ +union eth_rx_cqe { + struct eth_fast_path_rx_reg_cqe fast_path_regular; + struct eth_slow_path_rx_cqe slow_path; +}; + +/* ETH Rx CQE type */ +enum eth_rx_cqe_type { + ETH_RX_CQE_TYPE_UNUSED, + ETH_RX_CQE_TYPE_REGULAR, + ETH_RX_CQE_TYPE_SLOW_PATH, + MAX_ETH_RX_CQE_TYPE +}; + +/* ETH Rx producers data */ +struct eth_rx_prod_data { + __le16 bd_prod; + __le16 sge_prod; + __le16 cqe_prod; + __le16 reserved; +}; + +/* The first tx bd of a given packet */ +struct eth_tx_1st_bd { + struct regpair addr; + __le16 nbytes; + struct eth_tx_data_1st_bd data; +}; + +/* The second tx bd of a given packet */ +struct eth_tx_2nd_bd { + struct regpair addr; + __le16 nbytes; + struct eth_tx_data_2nd_bd data; +}; + +/* The parsing information data for the third tx bd of a given packet. */ +struct eth_tx_data_3rd_bd { + __le16 lso_mss; + u8 bitfields; +#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF +#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0 +#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF +#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4 + u8 resereved0[3]; +}; + +/* The third tx bd of a given packet */ +struct eth_tx_3rd_bd { + struct regpair addr; + __le16 nbytes; + struct eth_tx_data_3rd_bd data; +}; + +/* The common non-special TX BD ring element */ +struct eth_tx_bd { + struct regpair addr; + __le16 nbytes; + __le16 reserved0; + __le32 reserved1; +}; + +union eth_tx_bd_types { + struct eth_tx_1st_bd first_bd; + struct eth_tx_2nd_bd second_bd; + struct eth_tx_3rd_bd third_bd; + struct eth_tx_bd reg_bd; +}; + +/* Mstorm Queue Zone */ +struct mstorm_eth_queue_zone { + struct eth_rx_prod_data rx_producers; + __le32 reserved[2]; +}; + +/* Ustorm Queue Zone */ +struct ustorm_eth_queue_zone { + struct coalescing_timeset int_coalescing_timeset; + __le16 reserved[3]; +}; + +/* Ystorm Queue Zone */ +struct ystorm_eth_queue_zone { + struct coalescing_timeset int_coalescing_timeset; + __le16 reserved[3]; +}; + +/* ETH doorbell data */ +struct eth_db_data { + u8 params; +#define ETH_DB_DATA_DEST_MASK 0x3 +#define ETH_DB_DATA_DEST_SHIFT 0 +#define ETH_DB_DATA_AGG_CMD_MASK 0x3 +#define ETH_DB_DATA_AGG_CMD_SHIFT 2 +#define ETH_DB_DATA_BYPASS_EN_MASK 0x1 +#define ETH_DB_DATA_BYPASS_EN_SHIFT 4 +#define ETH_DB_DATA_RESERVED_MASK 0x1 +#define ETH_DB_DATA_RESERVED_SHIFT 5 +#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6 + u8 agg_flags; + __le16 bd_prod; +}; + +#endif /* __ETH_COMMON__ */ diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h new file mode 100644 index 000000000000..fbd8700f0b31 --- /dev/null +++ b/include/linux/qed/qed_eth_if.h @@ -0,0 +1,38 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QED_ETH_IF_H +#define _QED_ETH_IF_H + +#include +#include +#include +#include + +struct qed_dev_eth_info { + struct qed_dev_info common; + + u8 num_queues; + u8 num_tc; + + u8 port_mac[ETH_ALEN]; + u8 num_vlan_filters; +}; + +struct qed_eth_ops { + const struct qed_common_ops *common; + + int (*fill_dev_info)(struct qed_dev *cdev, + struct qed_dev_eth_info *info); + +}; + +const struct qed_eth_ops *qed_get_eth_ops(u32 version); +void qed_put_eth_ops(void); + +#endif -- cgit v1.2.3-71-gd317 From cee4d26448c1000ccc1711eb5e6ed4c15f18fa83 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Mon, 26 Oct 2015 11:02:28 +0200 Subject: qed: Add slowpath L2 support This patch adds to the qed the support to configure various L2 elements, such as channels and basic filtering conditions. It also enhances its public API to allow qede to later utilize this functionality. Signed-off-by: Manish Chopra Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dev.c | 114 ++ drivers/net/ethernet/qlogic/qed/qed_dev_api.h | 58 + drivers/net/ethernet/qlogic/qed/qed_hsi.h | 294 +++++ drivers/net/ethernet/qlogic/qed/qed_l2.c | 1605 +++++++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_main.c | 10 + drivers/net/ethernet/qlogic/qed/qed_mcp.c | 16 + drivers/net/ethernet/qlogic/qed/qed_mcp.h | 13 + drivers/net/ethernet/qlogic/qed/qed_sp.h | 27 + drivers/net/ethernet/qlogic/qed/qed_spq.c | 29 + include/linux/qed/qed_eth_if.h | 120 ++ 10 files changed, 2286 insertions(+) (limited to 'include/linux') diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 3243cb4160c3..3d1bdbf9ade1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -799,6 +799,60 @@ int qed_hw_stop(struct qed_dev *cdev) return rc; } +void qed_hw_stop_fastpath(struct qed_dev *cdev) +{ + int i, j; + + for_each_hwfn(cdev, j) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; + struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; + + DP_VERBOSE(p_hwfn, + NETIF_MSG_IFDOWN, + "Shutting down the fastpath\n"); + + qed_wr(p_hwfn, p_ptt, + NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); + + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); + + qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); + qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); + for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) { + if ((!qed_rd(p_hwfn, p_ptt, + TM_REG_PF_SCAN_ACTIVE_CONN)) && + (!qed_rd(p_hwfn, p_ptt, + TM_REG_PF_SCAN_ACTIVE_TASK))) + break; + + usleep_range(1000, 2000); + } + if (i == QED_HW_STOP_RETRY_LIMIT) + DP_NOTICE(p_hwfn, + "Timers linear scans are not over [Connection %02x Tasks %02x]\n", + (u8)qed_rd(p_hwfn, p_ptt, + TM_REG_PF_SCAN_ACTIVE_CONN), + (u8)qed_rd(p_hwfn, p_ptt, + TM_REG_PF_SCAN_ACTIVE_TASK)); + + qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false); + + /* Need to wait 1ms to guarantee SBs are cleared */ + usleep_range(1000, 2000); + } +} + +void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn) +{ + /* Re-open incoming traffic */ + qed_wr(p_hwfn, p_hwfn->p_main_ptt, + NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0); +} + static int qed_reg_assert(struct qed_hwfn *hwfn, struct qed_ptt *ptt, u32 reg, bool expected) @@ -1337,3 +1391,63 @@ void qed_chain_free(struct qed_dev *cdev, p_chain->p_virt_addr, p_chain->p_phys_addr); } + +int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, + u16 src_id, u16 *dst_id) +{ + if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) { + u16 min, max; + + min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE); + max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE); + DP_NOTICE(p_hwfn, + "l2_queue id [%d] is not valid, available indices [%d - %d]\n", + src_id, min, max); + + return -EINVAL; + } + + *dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id; + + return 0; +} + +int qed_fw_vport(struct qed_hwfn *p_hwfn, + u8 src_id, u8 *dst_id) +{ + if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) { + u8 min, max; + + min = (u8)RESC_START(p_hwfn, QED_VPORT); + max = min + RESC_NUM(p_hwfn, QED_VPORT); + DP_NOTICE(p_hwfn, + "vport id [%d] is not valid, available indices [%d - %d]\n", + src_id, min, max); + + return -EINVAL; + } + + *dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id; + + return 0; +} + +int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, + u8 src_id, u8 *dst_id) +{ + if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) { + u8 min, max; + + min = (u8)RESC_START(p_hwfn, QED_RSS_ENG); + max = min + RESC_NUM(p_hwfn, QED_RSS_ENG); + DP_NOTICE(p_hwfn, + "rss_eng id [%d] is not valid, available indices [%d - %d]\n", + src_id, min, max); + + return -EINVAL; + } + + *dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id; + + return 0; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index 5051af5f378e..773070d04ab8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -86,6 +86,25 @@ int qed_hw_init(struct qed_dev *cdev, */ int qed_hw_stop(struct qed_dev *cdev); +/** + * @brief qed_hw_stop_fastpath -should be called incase + * slowpath is still required for the device, + * but fastpath is not. + * + * @param cdev + * + */ +void qed_hw_stop_fastpath(struct qed_dev *cdev); + +/** + * @brief qed_hw_start_fastpath -restart fastpath traffic, + * only if hw_stop_fastpath was called + * + * @param cdev + * + */ +void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn); + /** * @brief qed_hw_reset - * @@ -206,6 +225,45 @@ qed_chain_alloc(struct qed_dev *cdev, void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain); +/** + * @@brief qed_fw_l2_queue - Get absolute L2 queue ID + * + * @param p_hwfn + * @param src_id - relative to p_hwfn + * @param dst_id - absolute per engine + * + * @return int + */ +int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, + u16 src_id, + u16 *dst_id); + +/** + * @@brief qed_fw_vport - Get absolute vport ID + * + * @param p_hwfn + * @param src_id - relative to p_hwfn + * @param dst_id - absolute per engine + * + * @return int + */ +int qed_fw_vport(struct qed_hwfn *p_hwfn, + u8 src_id, + u8 *dst_id); + +/** + * @@brief qed_fw_rss_eng - Get absolute RSS engine ID + * + * @param p_hwfn + * @param src_id - relative to p_hwfn + * @param dst_id - absolute per engine + * + * @return int + */ +int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, + u8 src_id, + u8 *dst_id); + /** * *@brief Cleanup of previous driver remains prior to load * diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 27f2c005e2b0..5909823463ab 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -2561,6 +2561,300 @@ struct eth_conn_context { struct ustorm_eth_conn_st_ctx ustorm_st_context; }; +enum eth_filter_action { + ETH_FILTER_ACTION_REMOVE, + ETH_FILTER_ACTION_ADD, + ETH_FILTER_ACTION_REPLACE, + MAX_ETH_FILTER_ACTION +}; + +struct eth_filter_cmd { + u8 type /* Filter Type (MAC/VLAN/Pair/VNI) */; + u8 vport_id /* the vport id */; + u8 action /* filter command action: add/remove/replace */; + u8 reserved0; + __le32 vni; + __le16 mac_lsb; + __le16 mac_mid; + __le16 mac_msb; + __le16 vlan_id; +}; + +struct eth_filter_cmd_header { + u8 rx; + u8 tx; + u8 cmd_cnt; + u8 assert_on_error; + u8 reserved1[4]; +}; + +enum eth_filter_type { + ETH_FILTER_TYPE_MAC, + ETH_FILTER_TYPE_VLAN, + ETH_FILTER_TYPE_PAIR, + ETH_FILTER_TYPE_INNER_MAC, + ETH_FILTER_TYPE_INNER_VLAN, + ETH_FILTER_TYPE_INNER_PAIR, + ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR, + ETH_FILTER_TYPE_MAC_VNI_PAIR, + ETH_FILTER_TYPE_VNI, + MAX_ETH_FILTER_TYPE +}; + +enum eth_ramrod_cmd_id { + ETH_RAMROD_UNUSED, + ETH_RAMROD_VPORT_START /* VPort Start Ramrod */, + ETH_RAMROD_VPORT_UPDATE /* VPort Update Ramrod */, + ETH_RAMROD_VPORT_STOP /* VPort Stop Ramrod */, + ETH_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */, + ETH_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */, + ETH_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */, + ETH_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */, + ETH_RAMROD_FILTERS_UPDATE /* Add or Remove Mac/Vlan/Pair filters */, + ETH_RAMROD_RX_QUEUE_UPDATE /* RX Queue Update Ramrod */, + ETH_RAMROD_RESERVED, + ETH_RAMROD_RESERVED2, + ETH_RAMROD_RESERVED3, + ETH_RAMROD_RESERVED4, + ETH_RAMROD_RESERVED5, + ETH_RAMROD_RESERVED6, + ETH_RAMROD_RESERVED7, + ETH_RAMROD_RESERVED8, + MAX_ETH_RAMROD_CMD_ID +}; + +struct eth_vport_rss_config { + __le16 capabilities; +#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_SHIFT 0 +#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_SHIFT 1 +#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_SHIFT 2 +#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_SHIFT 3 +#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_SHIFT 4 +#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT 5 +#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6 +#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_TCP_FRAG_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_TCP_FRAG_SHIFT 7 +#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_UDP_FRAG_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_UDP_FRAG_SHIFT 8 +#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x7F +#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 9 + u8 rss_id; + u8 rss_mode; + u8 update_rss_key; + u8 update_rss_ind_table; + u8 update_rss_capabilities; + u8 tbl_size; + __le32 reserved2[2]; + __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM]; + __le32 rss_key[ETH_RSS_KEY_SIZE_REGS]; + __le32 reserved3[2]; +}; + +enum eth_vport_rss_mode { + ETH_VPORT_RSS_MODE_DISABLED, + ETH_VPORT_RSS_MODE_REGULAR, + MAX_ETH_VPORT_RSS_MODE +}; + +struct eth_vport_rx_mode { + __le16 state; +#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT 0 +#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT 1 +#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK 0x1 +#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2 +#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT 3 +#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT 4 +#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5 +#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF +#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6 + __le16 reserved2[3]; +}; + +struct eth_vport_tpa_param { + u64 reserved[2]; +}; + +struct eth_vport_tx_mode { + __le16 state; +#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT 0 +#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT 1 +#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT 2 +#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT 3 +#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4 +#define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF +#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5 + __le16 reserved2[3]; +}; + +struct rx_queue_start_ramrod_data { + __le16 rx_queue_id; + __le16 num_of_pbl_pages; + __le16 bd_max_bytes; + __le16 sb_id; + u8 sb_index; + u8 vport_id; + u8 default_rss_queue_flg; + u8 complete_cqe_flg; + u8 complete_event_flg; + u8 stats_counter_id; + u8 pin_context; + u8 pxp_tph_valid_bd; + u8 pxp_tph_valid_pkt; + u8 pxp_st_hint; + __le16 pxp_st_index; + u8 reserved[4]; + struct regpair cqe_pbl_addr; + struct regpair bd_base; + struct regpair sge_base; +}; + +struct rx_queue_stop_ramrod_data { + __le16 rx_queue_id; + u8 complete_cqe_flg; + u8 complete_event_flg; + u8 vport_id; + u8 reserved[3]; +}; + +struct rx_queue_update_ramrod_data { + __le16 rx_queue_id; + u8 complete_cqe_flg; + u8 complete_event_flg; + u8 init_sge_ring_flg; + u8 vport_id; + u8 pxp_tph_valid_sge; + u8 pxp_st_hint; + __le16 pxp_st_index; + u8 reserved[6]; + struct regpair sge_base; +}; + +struct tx_queue_start_ramrod_data { + __le16 sb_id; + u8 sb_index; + u8 vport_id; + u8 tc; + u8 stats_counter_id; + __le16 qm_pq_id; + u8 flags; +#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0 +#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1 +#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2 +#define TX_QUEUE_START_RAMROD_DATA_RESERVED0_MASK 0x1F +#define TX_QUEUE_START_RAMROD_DATA_RESERVED0_SHIFT 3 + u8 pin_context; + u8 pxp_tph_valid_bd; + u8 pxp_tph_valid_pkt; + __le16 pxp_st_index; + u8 pxp_st_hint; + u8 reserved1[3]; + __le16 queue_zone_id; + __le16 test_dup_count; + __le16 pbl_size; + struct regpair pbl_base_addr; +}; + +struct tx_queue_stop_ramrod_data { + __le16 reserved[4]; +}; + +struct vport_filter_update_ramrod_data { + struct eth_filter_cmd_header filter_cmd_hdr; + struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT]; +}; + +struct vport_start_ramrod_data { + u8 vport_id; + u8 sw_fid; + __le16 mtu; + u8 drop_ttl0_en; + u8 inner_vlan_removal_en; + struct eth_vport_rx_mode rx_mode; + struct eth_vport_tx_mode tx_mode; + struct eth_vport_tpa_param tpa_param; + __le16 sge_buff_size; + u8 max_sges_num; + u8 tx_switching_en; + u8 anti_spoofing_en; + u8 default_vlan_en; + u8 handle_ptp_pkts; + u8 silent_vlan_removal_en; + __le16 default_vlan; + u8 untagged; + u8 reserved[7]; +}; + +struct vport_stop_ramrod_data { + u8 vport_id; + u8 reserved[7]; +}; + +struct vport_update_ramrod_data_cmn { + u8 vport_id; + u8 update_rx_active_flg; + u8 rx_active_flg; + u8 update_tx_active_flg; + u8 tx_active_flg; + u8 update_rx_mode_flg; + u8 update_tx_mode_flg; + u8 update_approx_mcast_flg; + u8 update_rss_flg; + u8 update_inner_vlan_removal_en_flg; + u8 inner_vlan_removal_en; + u8 update_tpa_param_flg; + u8 update_tpa_en_flg; + u8 update_sge_param_flg; + __le16 sge_buff_size; + u8 max_sges_num; + u8 update_tx_switching_en_flg; + u8 tx_switching_en; + u8 update_anti_spoofing_en_flg; + u8 anti_spoofing_en; + u8 update_handle_ptp_pkts; + u8 handle_ptp_pkts; + u8 update_default_vlan_en_flg; + u8 default_vlan_en; + u8 update_default_vlan_flg; + __le16 default_vlan; + u8 update_accept_any_vlan_flg; + u8 accept_any_vlan; + u8 silent_vlan_removal_en; + u8 reserved; +}; + +struct vport_update_ramrod_mcast { + __le32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; +}; + +struct vport_update_ramrod_data { + struct vport_update_ramrod_data_cmn common; + struct eth_vport_rx_mode rx_mode; + struct eth_vport_tx_mode tx_mode; + struct eth_vport_tpa_param tpa_param; + struct vport_update_ramrod_mcast approx_mcast; + struct eth_vport_rss_config rss_config; +}; + struct mstorm_eth_conn_ag_ctx { u8 byte0 /* cdu_validation */; u8 byte1 /* state */; diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index f2e76024409a..2772573593a4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -34,6 +34,1202 @@ #include "qed_reg_addr.h" #include "qed_sp.h" +enum qed_rss_caps { + QED_RSS_IPV4 = 0x1, + QED_RSS_IPV6 = 0x2, + QED_RSS_IPV4_TCP = 0x4, + QED_RSS_IPV6_TCP = 0x8, + QED_RSS_IPV4_UDP = 0x10, + QED_RSS_IPV6_UDP = 0x20, +}; + +/* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */ +#define QED_RSS_IND_TABLE_SIZE 128 +#define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */ + +struct qed_rss_params { + u8 update_rss_config; + u8 rss_enable; + u8 rss_eng_id; + u8 update_rss_capabilities; + u8 update_rss_ind_table; + u8 update_rss_key; + u8 rss_caps; + u8 rss_table_size_log; + u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE]; + u32 rss_key[QED_RSS_KEY_SIZE]; +}; + +enum qed_filter_opcode { + QED_FILTER_ADD, + QED_FILTER_REMOVE, + QED_FILTER_MOVE, + QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */ + QED_FILTER_FLUSH, /* Removes all filters */ +}; + +enum qed_filter_ucast_type { + QED_FILTER_MAC, + QED_FILTER_VLAN, + QED_FILTER_MAC_VLAN, + QED_FILTER_INNER_MAC, + QED_FILTER_INNER_VLAN, + QED_FILTER_INNER_PAIR, + QED_FILTER_INNER_MAC_VNI_PAIR, + QED_FILTER_MAC_VNI_PAIR, + QED_FILTER_VNI, +}; + +struct qed_filter_ucast { + enum qed_filter_opcode opcode; + enum qed_filter_ucast_type type; + u8 is_rx_filter; + u8 is_tx_filter; + u8 vport_to_add_to; + u8 vport_to_remove_from; + unsigned char mac[ETH_ALEN]; + u8 assert_on_error; + u16 vlan; + u32 vni; +}; + +struct qed_filter_mcast { + /* MOVE is not supported for multicast */ + enum qed_filter_opcode opcode; + u8 vport_to_add_to; + u8 vport_to_remove_from; + u8 num_mc_addrs; +#define QED_MAX_MC_ADDRS 64 + unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN]; +}; + +struct qed_filter_accept_flags { + u8 update_rx_mode_config; + u8 update_tx_mode_config; + u8 rx_accept_filter; + u8 tx_accept_filter; +#define QED_ACCEPT_NONE 0x01 +#define QED_ACCEPT_UCAST_MATCHED 0x02 +#define QED_ACCEPT_UCAST_UNMATCHED 0x04 +#define QED_ACCEPT_MCAST_MATCHED 0x08 +#define QED_ACCEPT_MCAST_UNMATCHED 0x10 +#define QED_ACCEPT_BCAST 0x20 +}; + +struct qed_sp_vport_update_params { + u16 opaque_fid; + u8 vport_id; + u8 update_vport_active_rx_flg; + u8 vport_active_rx_flg; + u8 update_vport_active_tx_flg; + u8 vport_active_tx_flg; + u8 update_approx_mcast_flg; + unsigned long bins[8]; + struct qed_rss_params *rss_params; + struct qed_filter_accept_flags accept_flags; +}; + +#define QED_MAX_SGES_NUM 16 +#define CRC32_POLY 0x1edc6f41 + +static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, + u32 concrete_fid, + u16 opaque_fid, + u8 vport_id, + u16 mtu, + u8 drop_ttl0_flg, + u8 inner_vlan_removal_en_flg) +{ + struct qed_sp_init_request_params params; + struct vport_start_ramrod_data *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + int rc = -EINVAL; + u16 rx_mode = 0; + u8 abs_vport_id = 0; + + rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id); + if (rc != 0) + return rc; + + memset(¶ms, 0, sizeof(params)); + params.ramrod_data_size = sizeof(*p_ramrod); + params.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + qed_spq_get_cid(p_hwfn), + opaque_fid, + ETH_RAMROD_VPORT_START, + PROTOCOLID_ETH, + ¶ms); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.vport_start; + p_ramrod->vport_id = abs_vport_id; + + p_ramrod->mtu = cpu_to_le16(mtu); + p_ramrod->inner_vlan_removal_en = inner_vlan_removal_en_flg; + p_ramrod->drop_ttl0_en = drop_ttl0_flg; + + SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1); + SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1); + + p_ramrod->rx_mode.state = cpu_to_le16(rx_mode); + + /* TPA related fields */ + memset(&p_ramrod->tpa_param, 0, + sizeof(struct eth_vport_tpa_param)); + + /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */ + p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev, + concrete_fid); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int +qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn, + struct vport_update_ramrod_data *p_ramrod, + struct qed_rss_params *p_params) +{ + struct eth_vport_rss_config *rss = &p_ramrod->rss_config; + u16 abs_l2_queue = 0, capabilities = 0; + int rc = 0, i; + + if (!p_params) { + p_ramrod->common.update_rss_flg = 0; + return rc; + } + + BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != + ETH_RSS_IND_TABLE_ENTRIES_NUM); + + rc = qed_fw_rss_eng(p_hwfn, p_params->rss_eng_id, &rss->rss_id); + if (rc) + return rc; + + p_ramrod->common.update_rss_flg = p_params->update_rss_config; + rss->update_rss_capabilities = p_params->update_rss_capabilities; + rss->update_rss_ind_table = p_params->update_rss_ind_table; + rss->update_rss_key = p_params->update_rss_key; + + rss->rss_mode = p_params->rss_enable ? + ETH_VPORT_RSS_MODE_REGULAR : + ETH_VPORT_RSS_MODE_DISABLED; + + SET_FIELD(capabilities, + ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY, + !!(p_params->rss_caps & QED_RSS_IPV4)); + SET_FIELD(capabilities, + ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY, + !!(p_params->rss_caps & QED_RSS_IPV6)); + SET_FIELD(capabilities, + ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY, + !!(p_params->rss_caps & QED_RSS_IPV4_TCP)); + SET_FIELD(capabilities, + ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY, + !!(p_params->rss_caps & QED_RSS_IPV6_TCP)); + SET_FIELD(capabilities, + ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY, + !!(p_params->rss_caps & QED_RSS_IPV4_UDP)); + SET_FIELD(capabilities, + ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY, + !!(p_params->rss_caps & QED_RSS_IPV6_UDP)); + rss->tbl_size = p_params->rss_table_size_log; + + rss->capabilities = cpu_to_le16(capabilities); + + DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, + "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n", + p_ramrod->common.update_rss_flg, + rss->rss_mode, rss->update_rss_capabilities, + capabilities, rss->update_rss_ind_table, + rss->update_rss_key); + + for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { + rc = qed_fw_l2_queue(p_hwfn, + (u8)p_params->rss_ind_table[i], + &abs_l2_queue); + if (rc) + return rc; + + rss->indirection_table[i] = cpu_to_le16(abs_l2_queue); + DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "i= %d, queue = %d\n", + i, rss->indirection_table[i]); + } + + for (i = 0; i < 10; i++) + rss->rss_key[i] = cpu_to_le32(p_params->rss_key[i]); + + return rc; +} + +static void +qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn, + struct vport_update_ramrod_data *p_ramrod, + struct qed_filter_accept_flags accept_flags) +{ + p_ramrod->common.update_rx_mode_flg = + accept_flags.update_rx_mode_config; + + p_ramrod->common.update_tx_mode_flg = + accept_flags.update_tx_mode_config; + + /* Set Rx mode accept flags */ + if (p_ramrod->common.update_rx_mode_flg) { + u8 accept_filter = accept_flags.rx_accept_filter; + u16 state = 0; + + SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, + !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) || + !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED))); + + SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED, + !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)); + + SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, + !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) || + !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); + + SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL, + (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && + !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); + + SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL, + !!(accept_filter & QED_ACCEPT_BCAST)); + + p_ramrod->rx_mode.state = cpu_to_le16(state); + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "p_ramrod->rx_mode.state = 0x%x\n", state); + } + + /* Set Tx mode accept flags */ + if (p_ramrod->common.update_tx_mode_flg) { + u8 accept_filter = accept_flags.tx_accept_filter; + u16 state = 0; + + SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL, + !!(accept_filter & QED_ACCEPT_NONE)); + + SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL, + (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) && + !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED))); + + SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL, + !!(accept_filter & QED_ACCEPT_NONE)); + + SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL, + (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && + !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); + + SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL, + !!(accept_filter & QED_ACCEPT_BCAST)); + + p_ramrod->tx_mode.state = cpu_to_le16(state); + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "p_ramrod->tx_mode.state = 0x%x\n", state); + } +} + +static void +qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn, + struct vport_update_ramrod_data *p_ramrod, + struct qed_sp_vport_update_params *p_params) +{ + int i; + + memset(&p_ramrod->approx_mcast.bins, 0, + sizeof(p_ramrod->approx_mcast.bins)); + + if (p_params->update_approx_mcast_flg) { + p_ramrod->common.update_approx_mcast_flg = 1; + for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { + u32 *p_bins = (u32 *)p_params->bins; + __le32 val = cpu_to_le32(p_bins[i]); + + p_ramrod->approx_mcast.bins[i] = val; + } + } +} + +static int +qed_sp_vport_update(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_params, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) +{ + struct qed_rss_params *p_rss_params = p_params->rss_params; + struct vport_update_ramrod_data_cmn *p_cmn; + struct qed_sp_init_request_params sp_params; + struct vport_update_ramrod_data *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + u8 abs_vport_id = 0; + int rc = -EINVAL; + + rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); + if (rc != 0) + return rc; + + memset(&sp_params, 0, sizeof(sp_params)); + sp_params.ramrod_data_size = sizeof(*p_ramrod); + sp_params.comp_mode = comp_mode; + sp_params.p_comp_data = p_comp_data; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + qed_spq_get_cid(p_hwfn), + p_params->opaque_fid, + ETH_RAMROD_VPORT_UPDATE, + PROTOCOLID_ETH, + &sp_params); + if (rc) + return rc; + + /* Copy input params to ramrod according to FW struct */ + p_ramrod = &p_ent->ramrod.vport_update; + p_cmn = &p_ramrod->common; + + p_cmn->vport_id = abs_vport_id; + p_cmn->rx_active_flg = p_params->vport_active_rx_flg; + p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg; + p_cmn->tx_active_flg = p_params->vport_active_tx_flg; + p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg; + + rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); + if (rc) { + /* Return spq entry which is taken in qed_sp_init_request()*/ + qed_spq_return_entry(p_hwfn, p_ent); + return rc; + } + + /* Update mcast bins for VFs, PF doesn't use this functionality */ + qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params); + + qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags); + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + u8 vport_id) +{ + struct qed_sp_init_request_params sp_params; + struct vport_stop_ramrod_data *p_ramrod; + struct qed_spq_entry *p_ent; + u8 abs_vport_id = 0; + int rc; + + rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id); + if (rc != 0) + return rc; + + memset(&sp_params, 0, sizeof(sp_params)); + sp_params.ramrod_data_size = sizeof(*p_ramrod); + sp_params.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + qed_spq_get_cid(p_hwfn), + opaque_fid, + ETH_RAMROD_VPORT_STOP, + PROTOCOLID_ETH, + &sp_params); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.vport_stop; + p_ramrod->vport_id = abs_vport_id; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int qed_filter_accept_cmd(struct qed_dev *cdev, + u8 vport, + struct qed_filter_accept_flags accept_flags, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) +{ + struct qed_sp_vport_update_params vport_update_params; + int i, rc; + + /* Prepare and send the vport rx_mode change */ + memset(&vport_update_params, 0, sizeof(vport_update_params)); + vport_update_params.vport_id = vport; + vport_update_params.accept_flags = accept_flags; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; + + rc = qed_sp_vport_update(p_hwfn, &vport_update_params, + comp_mode, p_comp_data); + if (rc != 0) { + DP_ERR(cdev, "Update rx_mode failed %d\n", rc); + return rc; + } + + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Accept filter configured, flags = [Rx]%x [Tx]%x\n", + accept_flags.rx_accept_filter, + accept_flags.tx_accept_filter); + } + + return 0; +} + +static int qed_sp_release_queue_cid( + struct qed_hwfn *p_hwfn, + struct qed_hw_cid_data *p_cid_data) +{ + if (!p_cid_data->b_cid_allocated) + return 0; + + qed_cxt_release_cid(p_hwfn, p_cid_data->cid); + + p_cid_data->b_cid_allocated = false; + + return 0; +} + +static int +qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + u32 cid, + struct qed_queue_start_common_params *params, + u8 stats_id, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size) +{ + struct rx_queue_start_ramrod_data *p_ramrod = NULL; + struct qed_sp_init_request_params sp_params; + struct qed_spq_entry *p_ent = NULL; + struct qed_hw_cid_data *p_rx_cid; + u16 abs_rx_q_id = 0; + u8 abs_vport_id = 0; + int rc = -EINVAL; + + /* Store information for the stop */ + p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id]; + p_rx_cid->cid = cid; + p_rx_cid->opaque_fid = opaque_fid; + p_rx_cid->vport_id = params->vport_id; + + rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_vport_id); + if (rc != 0) + return rc; + + rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_rx_q_id); + if (rc != 0) + return rc; + + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n", + opaque_fid, cid, params->queue_id, params->vport_id, + params->sb); + + memset(&sp_params, 0, sizeof(params)); + sp_params.comp_mode = QED_SPQ_MODE_EBLOCK; + sp_params.ramrod_data_size = sizeof(*p_ramrod); + + rc = qed_sp_init_request(p_hwfn, &p_ent, + cid, opaque_fid, + ETH_RAMROD_RX_QUEUE_START, + PROTOCOLID_ETH, + &sp_params); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.rx_queue_start; + + p_ramrod->sb_id = cpu_to_le16(params->sb); + p_ramrod->sb_index = params->sb_idx; + p_ramrod->vport_id = abs_vport_id; + p_ramrod->stats_counter_id = stats_id; + p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id); + p_ramrod->complete_cqe_flg = 0; + p_ramrod->complete_event_flg = 1; + + p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes); + p_ramrod->bd_base.hi = DMA_HI_LE(bd_chain_phys_addr); + p_ramrod->bd_base.lo = DMA_LO_LE(bd_chain_phys_addr); + + p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); + p_ramrod->cqe_pbl_addr.hi = DMA_HI_LE(cqe_pbl_addr); + p_ramrod->cqe_pbl_addr.lo = DMA_LO_LE(cqe_pbl_addr); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + + return rc; +} + +static int +qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + struct qed_queue_start_common_params *params, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, + void __iomem **pp_prod) +{ + struct qed_hw_cid_data *p_rx_cid; + u64 init_prod_val = 0; + u16 abs_l2_queue = 0; + u8 abs_stats_id = 0; + int rc; + + rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue); + if (rc != 0) + return rc; + + rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_stats_id); + if (rc != 0) + return rc; + + *pp_prod = (u8 __iomem *)p_hwfn->regview + + GTT_BAR0_MAP_REG_MSDM_RAM + + MSTORM_PRODS_OFFSET(abs_l2_queue); + + /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ + __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64), + (u32 *)(&init_prod_val)); + + /* Allocate a CID for the queue */ + p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id]; + rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, + &p_rx_cid->cid); + if (rc) { + DP_NOTICE(p_hwfn, "Failed to acquire cid\n"); + return rc; + } + p_rx_cid->b_cid_allocated = true; + + rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, + opaque_fid, + p_rx_cid->cid, + params, + abs_stats_id, + bd_max_bytes, + bd_chain_phys_addr, + cqe_pbl_addr, + cqe_pbl_size); + + if (rc != 0) + qed_sp_release_queue_cid(p_hwfn, p_rx_cid); + + return rc; +} + +static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, + u16 rx_queue_id, + bool eq_completion_only, + bool cqe_completion) +{ + struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id]; + struct rx_queue_stop_ramrod_data *p_ramrod = NULL; + struct qed_sp_init_request_params sp_params; + struct qed_spq_entry *p_ent = NULL; + u16 abs_rx_q_id = 0; + int rc = -EINVAL; + + memset(&sp_params, 0, sizeof(sp_params)); + sp_params.ramrod_data_size = sizeof(*p_ramrod); + sp_params.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + p_rx_cid->cid, + p_rx_cid->opaque_fid, + ETH_RAMROD_RX_QUEUE_STOP, + PROTOCOLID_ETH, + &sp_params); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.rx_queue_stop; + + qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id); + qed_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id); + p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id); + + /* Cleaning the queue requires the completion to arrive there. + * In addition, VFs require the answer to come as eqe to PF. + */ + p_ramrod->complete_cqe_flg = + (!!(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) && + !eq_completion_only) || cqe_completion; + p_ramrod->complete_event_flg = + !(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) || + eq_completion_only; + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + return rc; + + return qed_sp_release_queue_cid(p_hwfn, p_rx_cid); +} + +static int +qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + u32 cid, + struct qed_queue_start_common_params *p_params, + u8 stats_id, + dma_addr_t pbl_addr, + u16 pbl_size, + union qed_qm_pq_params *p_pq_params) +{ + struct tx_queue_start_ramrod_data *p_ramrod = NULL; + struct qed_sp_init_request_params sp_params; + struct qed_spq_entry *p_ent = NULL; + struct qed_hw_cid_data *p_tx_cid; + u8 abs_vport_id; + int rc = -EINVAL; + u16 pq_id; + + /* Store information for the stop */ + p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id]; + p_tx_cid->cid = cid; + p_tx_cid->opaque_fid = opaque_fid; + + rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); + if (rc) + return rc; + + memset(&sp_params, 0, sizeof(sp_params)); + sp_params.ramrod_data_size = sizeof(*p_ramrod); + sp_params.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, cid, + opaque_fid, + ETH_RAMROD_TX_QUEUE_START, + PROTOCOLID_ETH, + &sp_params); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.tx_queue_start; + p_ramrod->vport_id = abs_vport_id; + + p_ramrod->sb_id = cpu_to_le16(p_params->sb); + p_ramrod->sb_index = p_params->sb_idx; + p_ramrod->stats_counter_id = stats_id; + p_ramrod->tc = p_pq_params->eth.tc; + + p_ramrod->pbl_size = cpu_to_le16(pbl_size); + p_ramrod->pbl_base_addr.hi = DMA_HI_LE(pbl_addr); + p_ramrod->pbl_base_addr.lo = DMA_LO_LE(pbl_addr); + + pq_id = qed_get_qm_pq(p_hwfn, + PROTOCOLID_ETH, + p_pq_params); + p_ramrod->qm_pq_id = cpu_to_le16(pq_id); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int +qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + struct qed_queue_start_common_params *p_params, + dma_addr_t pbl_addr, + u16 pbl_size, + void __iomem **pp_doorbell) +{ + struct qed_hw_cid_data *p_tx_cid; + union qed_qm_pq_params pq_params; + u8 abs_stats_id = 0; + int rc; + + rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id); + if (rc) + return rc; + + p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id]; + memset(p_tx_cid, 0, sizeof(*p_tx_cid)); + memset(&pq_params, 0, sizeof(pq_params)); + + /* Allocate a CID for the queue */ + rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, + &p_tx_cid->cid); + if (rc) { + DP_NOTICE(p_hwfn, "Failed to acquire cid\n"); + return rc; + } + p_tx_cid->b_cid_allocated = true; + + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n", + opaque_fid, p_tx_cid->cid, + p_params->queue_id, p_params->vport_id, p_params->sb); + + rc = qed_sp_eth_txq_start_ramrod(p_hwfn, + opaque_fid, + p_tx_cid->cid, + p_params, + abs_stats_id, + pbl_addr, + pbl_size, + &pq_params); + + *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + + qed_db_addr(p_tx_cid->cid, DQ_DEMS_LEGACY); + + if (rc) + qed_sp_release_queue_cid(p_hwfn, p_tx_cid); + + return rc; +} + +static int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, + u16 tx_queue_id) +{ + struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id]; + struct qed_sp_init_request_params sp_params; + struct qed_spq_entry *p_ent = NULL; + int rc = -EINVAL; + + memset(&sp_params, 0, sizeof(sp_params)); + sp_params.ramrod_data_size = sizeof(struct tx_queue_stop_ramrod_data); + sp_params.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + p_tx_cid->cid, + p_tx_cid->opaque_fid, + ETH_RAMROD_TX_QUEUE_STOP, + PROTOCOLID_ETH, + &sp_params); + if (rc) + return rc; + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + return rc; + + return qed_sp_release_queue_cid(p_hwfn, p_tx_cid); +} + +static enum eth_filter_action +qed_filter_action(enum qed_filter_opcode opcode) +{ + enum eth_filter_action action = MAX_ETH_FILTER_ACTION; + + switch (opcode) { + case QED_FILTER_ADD: + action = ETH_FILTER_ACTION_ADD; + break; + case QED_FILTER_REMOVE: + action = ETH_FILTER_ACTION_REMOVE; + break; + case QED_FILTER_REPLACE: + case QED_FILTER_FLUSH: + action = ETH_FILTER_ACTION_REPLACE; + break; + default: + action = MAX_ETH_FILTER_ACTION; + } + + return action; +} + +static void qed_set_fw_mac_addr(__le16 *fw_msb, + __le16 *fw_mid, + __le16 *fw_lsb, + u8 *mac) +{ + ((u8 *)fw_msb)[0] = mac[1]; + ((u8 *)fw_msb)[1] = mac[0]; + ((u8 *)fw_mid)[0] = mac[3]; + ((u8 *)fw_mid)[1] = mac[2]; + ((u8 *)fw_lsb)[0] = mac[5]; + ((u8 *)fw_lsb)[1] = mac[4]; +} + +static int +qed_filter_ucast_common(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + struct qed_filter_ucast *p_filter_cmd, + struct vport_filter_update_ramrod_data **pp_ramrod, + struct qed_spq_entry **pp_ent, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) +{ + u8 vport_to_add_to = 0, vport_to_remove_from = 0; + struct vport_filter_update_ramrod_data *p_ramrod; + struct qed_sp_init_request_params sp_params; + struct eth_filter_cmd *p_first_filter; + struct eth_filter_cmd *p_second_filter; + enum eth_filter_action action; + int rc; + + rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, + &vport_to_remove_from); + if (rc) + return rc; + + rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, + &vport_to_add_to); + if (rc) + return rc; + + memset(&sp_params, 0, sizeof(sp_params)); + sp_params.ramrod_data_size = sizeof(**pp_ramrod); + sp_params.comp_mode = comp_mode; + sp_params.p_comp_data = p_comp_data; + + rc = qed_sp_init_request(p_hwfn, pp_ent, + qed_spq_get_cid(p_hwfn), + opaque_fid, + ETH_RAMROD_FILTERS_UPDATE, + PROTOCOLID_ETH, + &sp_params); + if (rc) + return rc; + + *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update; + p_ramrod = *pp_ramrod; + p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0; + p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0; + + switch (p_filter_cmd->opcode) { + case QED_FILTER_FLUSH: + p_ramrod->filter_cmd_hdr.cmd_cnt = 0; break; + case QED_FILTER_MOVE: + p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break; + default: + p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break; + } + + p_first_filter = &p_ramrod->filter_cmds[0]; + p_second_filter = &p_ramrod->filter_cmds[1]; + + switch (p_filter_cmd->type) { + case QED_FILTER_MAC: + p_first_filter->type = ETH_FILTER_TYPE_MAC; break; + case QED_FILTER_VLAN: + p_first_filter->type = ETH_FILTER_TYPE_VLAN; break; + case QED_FILTER_MAC_VLAN: + p_first_filter->type = ETH_FILTER_TYPE_PAIR; break; + case QED_FILTER_INNER_MAC: + p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break; + case QED_FILTER_INNER_VLAN: + p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break; + case QED_FILTER_INNER_PAIR: + p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break; + case QED_FILTER_INNER_MAC_VNI_PAIR: + p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR; + break; + case QED_FILTER_MAC_VNI_PAIR: + p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break; + case QED_FILTER_VNI: + p_first_filter->type = ETH_FILTER_TYPE_VNI; break; + } + + if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) || + (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || + (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) || + (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) || + (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || + (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) { + qed_set_fw_mac_addr(&p_first_filter->mac_msb, + &p_first_filter->mac_mid, + &p_first_filter->mac_lsb, + (u8 *)p_filter_cmd->mac); + } + + if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) || + (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || + (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) || + (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR)) + p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan); + + if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || + (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) || + (p_first_filter->type == ETH_FILTER_TYPE_VNI)) + p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni); + + if (p_filter_cmd->opcode == QED_FILTER_MOVE) { + p_second_filter->type = p_first_filter->type; + p_second_filter->mac_msb = p_first_filter->mac_msb; + p_second_filter->mac_mid = p_first_filter->mac_mid; + p_second_filter->mac_lsb = p_first_filter->mac_lsb; + p_second_filter->vlan_id = p_first_filter->vlan_id; + p_second_filter->vni = p_first_filter->vni; + + p_first_filter->action = ETH_FILTER_ACTION_REMOVE; + + p_first_filter->vport_id = vport_to_remove_from; + + p_second_filter->action = ETH_FILTER_ACTION_ADD; + p_second_filter->vport_id = vport_to_add_to; + } else { + action = qed_filter_action(p_filter_cmd->opcode); + + if (action == MAX_ETH_FILTER_ACTION) { + DP_NOTICE(p_hwfn, + "%d is not supported yet\n", + p_filter_cmd->opcode); + return -EINVAL; + } + + p_first_filter->action = action; + p_first_filter->vport_id = (p_filter_cmd->opcode == + QED_FILTER_REMOVE) ? + vport_to_remove_from : + vport_to_add_to; + } + + return 0; +} + +static int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + struct qed_filter_ucast *p_filter_cmd, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) +{ + struct vport_filter_update_ramrod_data *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct eth_filter_cmd_header *p_header; + int rc; + + rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd, + &p_ramrod, &p_ent, + comp_mode, p_comp_data); + if (rc != 0) { + DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc); + return rc; + } + p_header = &p_ramrod->filter_cmd_hdr; + p_header->assert_on_error = p_filter_cmd->assert_on_error; + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc != 0) { + DP_ERR(p_hwfn, + "Unicast filter ADD command failed %d\n", + rc); + return rc; + } + + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n", + (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" : + ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ? + "REMOVE" : + ((p_filter_cmd->opcode == QED_FILTER_MOVE) ? + "MOVE" : "REPLACE")), + (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" : + ((p_filter_cmd->type == QED_FILTER_VLAN) ? + "VLAN" : "MAC & VLAN"), + p_ramrod->filter_cmd_hdr.cmd_cnt, + p_filter_cmd->is_rx_filter, + p_filter_cmd->is_tx_filter); + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n", + p_filter_cmd->vport_to_add_to, + p_filter_cmd->vport_to_remove_from, + p_filter_cmd->mac[0], + p_filter_cmd->mac[1], + p_filter_cmd->mac[2], + p_filter_cmd->mac[3], + p_filter_cmd->mac[4], + p_filter_cmd->mac[5], + p_filter_cmd->vlan); + + return 0; +} + +/******************************************************************************* + * Description: + * Calculates crc 32 on a buffer + * Note: crc32_length MUST be aligned to 8 + * Return: + ******************************************************************************/ +static u32 qed_calc_crc32c(u8 *crc32_packet, + u32 crc32_length, + u32 crc32_seed, + u8 complement) +{ + u32 byte = 0; + u32 bit = 0; + u8 msb = 0; + u8 current_byte = 0; + u32 crc32_result = crc32_seed; + + if ((!crc32_packet) || + (crc32_length == 0) || + ((crc32_length % 8) != 0)) + return crc32_result; + for (byte = 0; byte < crc32_length; byte++) { + current_byte = crc32_packet[byte]; + for (bit = 0; bit < 8; bit++) { + msb = (u8)(crc32_result >> 31); + crc32_result = crc32_result << 1; + if (msb != (0x1 & (current_byte >> bit))) { + crc32_result = crc32_result ^ CRC32_POLY; + crc32_result |= 1; /*crc32_result[0] = 1;*/ + } + } + } + return crc32_result; +} + +static inline u32 qed_crc32c_le(u32 seed, + u8 *mac, + u32 len) +{ + u32 packet_buf[2] = { 0 }; + + memcpy((u8 *)(&packet_buf[0]), &mac[0], 6); + return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0); +} + +static u8 qed_mcast_bin_from_mac(u8 *mac) +{ + u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, + mac, ETH_ALEN); + + return crc & 0xff; +} + +static int +qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + struct qed_filter_mcast *p_filter_cmd, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) +{ + unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; + struct vport_update_ramrod_data *p_ramrod = NULL; + struct qed_sp_init_request_params sp_params; + struct qed_spq_entry *p_ent = NULL; + u8 abs_vport_id = 0; + int rc, i; + + if (p_filter_cmd->opcode == QED_FILTER_ADD) { + rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, + &abs_vport_id); + if (rc) + return rc; + } else { + rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, + &abs_vport_id); + if (rc) + return rc; + } + + memset(&sp_params, 0, sizeof(sp_params)); + sp_params.ramrod_data_size = sizeof(*p_ramrod); + sp_params.comp_mode = comp_mode; + sp_params.p_comp_data = p_comp_data; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + qed_spq_get_cid(p_hwfn), + p_hwfn->hw_info.opaque_fid, + ETH_RAMROD_VPORT_UPDATE, + PROTOCOLID_ETH, + &sp_params); + + if (rc) { + DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc); + return rc; + } + + p_ramrod = &p_ent->ramrod.vport_update; + p_ramrod->common.update_approx_mcast_flg = 1; + + /* explicitly clear out the entire vector */ + memset(&p_ramrod->approx_mcast.bins, 0, + sizeof(p_ramrod->approx_mcast.bins)); + memset(bins, 0, sizeof(unsigned long) * + ETH_MULTICAST_MAC_BINS_IN_REGS); + /* filter ADD op is explicit set op and it removes + * any existing filters for the vport + */ + if (p_filter_cmd->opcode == QED_FILTER_ADD) { + for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { + u32 bit; + + bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); + __set_bit(bit, bins); + } + + /* Convert to correct endianity */ + for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { + u32 *p_bins = (u32 *)bins; + struct vport_update_ramrod_mcast *approx_mcast; + + approx_mcast = &p_ramrod->approx_mcast; + approx_mcast->bins[i] = cpu_to_le32(p_bins[i]); + } + } + + p_ramrod->common.vport_id = abs_vport_id; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int +qed_filter_mcast_cmd(struct qed_dev *cdev, + struct qed_filter_mcast *p_filter_cmd, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) +{ + int rc = 0; + int i; + + /* only ADD and REMOVE operations are supported for multi-cast */ + if ((p_filter_cmd->opcode != QED_FILTER_ADD && + (p_filter_cmd->opcode != QED_FILTER_REMOVE)) || + (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS)) + return -EINVAL; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + u16 opaque_fid; + + if (rc != 0) + break; + + opaque_fid = p_hwfn->hw_info.opaque_fid; + + rc = qed_sp_eth_filter_mcast(p_hwfn, + opaque_fid, + p_filter_cmd, + comp_mode, + p_comp_data); + } + return rc; +} + +static int qed_filter_ucast_cmd(struct qed_dev *cdev, + struct qed_filter_ucast *p_filter_cmd, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) +{ + int rc = 0; + int i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + u16 opaque_fid; + + if (rc != 0) + break; + + opaque_fid = p_hwfn->hw_info.opaque_fid; + + rc = qed_sp_eth_filter_ucast(p_hwfn, + opaque_fid, + p_filter_cmd, + comp_mode, + p_comp_data); + } + + return rc; +} + static int qed_fill_eth_dev_info(struct qed_dev *cdev, struct qed_dev_eth_info *info) { @@ -63,9 +1259,418 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, return 0; } +static int qed_start_vport(struct qed_dev *cdev, + u8 vport_id, + u16 mtu, + u8 drop_ttl0_flg, + u8 inner_vlan_removal_en_flg) +{ + int rc, i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + rc = qed_sp_vport_start(p_hwfn, + p_hwfn->hw_info.concrete_fid, + p_hwfn->hw_info.opaque_fid, + vport_id, + mtu, + drop_ttl0_flg, + inner_vlan_removal_en_flg); + + if (rc) { + DP_ERR(cdev, "Failed to start VPORT\n"); + return rc; + } + + qed_hw_start_fastpath(p_hwfn); + + DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), + "Started V-PORT %d with MTU %d\n", + vport_id, mtu); + } + + return 0; +} + +static int qed_stop_vport(struct qed_dev *cdev, + u8 vport_id) +{ + int rc, i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + rc = qed_sp_vport_stop(p_hwfn, + p_hwfn->hw_info.opaque_fid, + vport_id); + + if (rc) { + DP_ERR(cdev, "Failed to stop VPORT\n"); + return rc; + } + } + return 0; +} + +static int qed_update_vport(struct qed_dev *cdev, + struct qed_update_vport_params *params) +{ + struct qed_sp_vport_update_params sp_params; + struct qed_rss_params sp_rss_params; + int rc, i; + + if (!cdev) + return -ENODEV; + + memset(&sp_params, 0, sizeof(sp_params)); + memset(&sp_rss_params, 0, sizeof(sp_rss_params)); + + /* Translate protocol params into sp params */ + sp_params.vport_id = params->vport_id; + sp_params.update_vport_active_rx_flg = + params->update_vport_active_flg; + sp_params.update_vport_active_tx_flg = + params->update_vport_active_flg; + sp_params.vport_active_rx_flg = params->vport_active_flg; + sp_params.vport_active_tx_flg = params->vport_active_flg; + + /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns. + * We need to re-fix the rss values per engine for CMT. + */ + if (cdev->num_hwfns > 1 && params->update_rss_flg) { + struct qed_update_vport_rss_params *rss = + ¶ms->rss_params; + int k, max = 0; + + /* Find largest entry, since it's possible RSS needs to + * be disabled [in case only 1 queue per-hwfn] + */ + for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++) + max = (max > rss->rss_ind_table[k]) ? + max : rss->rss_ind_table[k]; + + /* Either fix RSS values or disable RSS */ + if (cdev->num_hwfns < max + 1) { + int divisor = (max + cdev->num_hwfns - 1) / + cdev->num_hwfns; + + DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), + "CMT - fixing RSS values (modulo %02x)\n", + divisor); + + for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++) + rss->rss_ind_table[k] = + rss->rss_ind_table[k] % divisor; + } else { + DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), + "CMT - 1 queue per-hwfn; Disabling RSS\n"); + params->update_rss_flg = 0; + } + } + + /* Now, update the RSS configuration for actual configuration */ + if (params->update_rss_flg) { + sp_rss_params.update_rss_config = 1; + sp_rss_params.rss_enable = 1; + sp_rss_params.update_rss_capabilities = 1; + sp_rss_params.update_rss_ind_table = 1; + sp_rss_params.update_rss_key = 1; + sp_rss_params.rss_caps = QED_RSS_IPV4 | + QED_RSS_IPV6 | + QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP; + sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */ + memcpy(sp_rss_params.rss_ind_table, + params->rss_params.rss_ind_table, + QED_RSS_IND_TABLE_SIZE * sizeof(u16)); + memcpy(sp_rss_params.rss_key, params->rss_params.rss_key, + QED_RSS_KEY_SIZE * sizeof(u32)); + } + sp_params.rss_params = &sp_rss_params; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid; + rc = qed_sp_vport_update(p_hwfn, &sp_params, + QED_SPQ_MODE_EBLOCK, + NULL); + if (rc) { + DP_ERR(cdev, "Failed to update VPORT\n"); + return rc; + } + + DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), + "Updated V-PORT %d: active_flag %d [update %d]\n", + params->vport_id, params->vport_active_flg, + params->update_vport_active_flg); + } + + return 0; +} + +static int qed_start_rxq(struct qed_dev *cdev, + struct qed_queue_start_common_params *params, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, + void __iomem **pp_prod) +{ + int rc, hwfn_index; + struct qed_hwfn *p_hwfn; + + hwfn_index = params->rss_id % cdev->num_hwfns; + p_hwfn = &cdev->hwfns[hwfn_index]; + + /* Fix queue ID in 100g mode */ + params->queue_id /= cdev->num_hwfns; + + rc = qed_sp_eth_rx_queue_start(p_hwfn, + p_hwfn->hw_info.opaque_fid, + params, + bd_max_bytes, + bd_chain_phys_addr, + cqe_pbl_addr, + cqe_pbl_size, + pp_prod); + + if (rc) { + DP_ERR(cdev, "Failed to start RXQ#%d\n", params->queue_id); + return rc; + } + + DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), + "Started RX-Q %d [rss %d] on V-PORT %d and SB %d\n", + params->queue_id, params->rss_id, params->vport_id, + params->sb); + + return 0; +} + +static int qed_stop_rxq(struct qed_dev *cdev, + struct qed_stop_rxq_params *params) +{ + int rc, hwfn_index; + struct qed_hwfn *p_hwfn; + + hwfn_index = params->rss_id % cdev->num_hwfns; + p_hwfn = &cdev->hwfns[hwfn_index]; + + rc = qed_sp_eth_rx_queue_stop(p_hwfn, + params->rx_queue_id / cdev->num_hwfns, + params->eq_completion_only, + false); + if (rc) { + DP_ERR(cdev, "Failed to stop RXQ#%d\n", params->rx_queue_id); + return rc; + } + + return 0; +} + +static int qed_start_txq(struct qed_dev *cdev, + struct qed_queue_start_common_params *p_params, + dma_addr_t pbl_addr, + u16 pbl_size, + void __iomem **pp_doorbell) +{ + struct qed_hwfn *p_hwfn; + int rc, hwfn_index; + + hwfn_index = p_params->rss_id % cdev->num_hwfns; + p_hwfn = &cdev->hwfns[hwfn_index]; + + /* Fix queue ID in 100g mode */ + p_params->queue_id /= cdev->num_hwfns; + + rc = qed_sp_eth_tx_queue_start(p_hwfn, + p_hwfn->hw_info.opaque_fid, + p_params, + pbl_addr, + pbl_size, + pp_doorbell); + + if (rc) { + DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id); + return rc; + } + + DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), + "Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n", + p_params->queue_id, p_params->rss_id, p_params->vport_id, + p_params->sb); + + return 0; +} + +#define QED_HW_STOP_RETRY_LIMIT (10) +static int qed_fastpath_stop(struct qed_dev *cdev) +{ + qed_hw_stop_fastpath(cdev); + + return 0; +} + +static int qed_stop_txq(struct qed_dev *cdev, + struct qed_stop_txq_params *params) +{ + struct qed_hwfn *p_hwfn; + int rc, hwfn_index; + + hwfn_index = params->rss_id % cdev->num_hwfns; + p_hwfn = &cdev->hwfns[hwfn_index]; + + rc = qed_sp_eth_tx_queue_stop(p_hwfn, + params->tx_queue_id / cdev->num_hwfns); + if (rc) { + DP_ERR(cdev, "Failed to stop TXQ#%d\n", params->tx_queue_id); + return rc; + } + + return 0; +} + +static int qed_configure_filter_rx_mode(struct qed_dev *cdev, + enum qed_filter_rx_mode_type type) +{ + struct qed_filter_accept_flags accept_flags; + + memset(&accept_flags, 0, sizeof(accept_flags)); + + accept_flags.update_rx_mode_config = 1; + accept_flags.update_tx_mode_config = 1; + accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED | + QED_ACCEPT_MCAST_MATCHED | + QED_ACCEPT_BCAST; + accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED | + QED_ACCEPT_MCAST_MATCHED | + QED_ACCEPT_BCAST; + + if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) + accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | + QED_ACCEPT_MCAST_UNMATCHED; + else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) + accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; + + return qed_filter_accept_cmd(cdev, 0, accept_flags, + QED_SPQ_MODE_CB, NULL); +} + +static int qed_configure_filter_ucast(struct qed_dev *cdev, + struct qed_filter_ucast_params *params) +{ + struct qed_filter_ucast ucast; + + if (!params->vlan_valid && !params->mac_valid) { + DP_NOTICE( + cdev, + "Tried configuring a unicast filter, but both MAC and VLAN are not set\n"); + return -EINVAL; + } + + memset(&ucast, 0, sizeof(ucast)); + switch (params->type) { + case QED_FILTER_XCAST_TYPE_ADD: + ucast.opcode = QED_FILTER_ADD; + break; + case QED_FILTER_XCAST_TYPE_DEL: + ucast.opcode = QED_FILTER_REMOVE; + break; + case QED_FILTER_XCAST_TYPE_REPLACE: + ucast.opcode = QED_FILTER_REPLACE; + break; + default: + DP_NOTICE(cdev, "Unknown unicast filter type %d\n", + params->type); + } + + if (params->vlan_valid && params->mac_valid) { + ucast.type = QED_FILTER_MAC_VLAN; + ether_addr_copy(ucast.mac, params->mac); + ucast.vlan = params->vlan; + } else if (params->mac_valid) { + ucast.type = QED_FILTER_MAC; + ether_addr_copy(ucast.mac, params->mac); + } else { + ucast.type = QED_FILTER_VLAN; + ucast.vlan = params->vlan; + } + + ucast.is_rx_filter = true; + ucast.is_tx_filter = true; + + return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL); +} + +static int qed_configure_filter_mcast(struct qed_dev *cdev, + struct qed_filter_mcast_params *params) +{ + struct qed_filter_mcast mcast; + int i; + + memset(&mcast, 0, sizeof(mcast)); + switch (params->type) { + case QED_FILTER_XCAST_TYPE_ADD: + mcast.opcode = QED_FILTER_ADD; + break; + case QED_FILTER_XCAST_TYPE_DEL: + mcast.opcode = QED_FILTER_REMOVE; + break; + default: + DP_NOTICE(cdev, "Unknown multicast filter type %d\n", + params->type); + } + + mcast.num_mc_addrs = params->num; + for (i = 0; i < mcast.num_mc_addrs; i++) + ether_addr_copy(mcast.mac[i], params->mac[i]); + + return qed_filter_mcast_cmd(cdev, &mcast, + QED_SPQ_MODE_CB, NULL); +} + +static int qed_configure_filter(struct qed_dev *cdev, + struct qed_filter_params *params) +{ + enum qed_filter_rx_mode_type accept_flags; + + switch (params->type) { + case QED_FILTER_TYPE_UCAST: + return qed_configure_filter_ucast(cdev, ¶ms->filter.ucast); + case QED_FILTER_TYPE_MCAST: + return qed_configure_filter_mcast(cdev, ¶ms->filter.mcast); + case QED_FILTER_TYPE_RX_MODE: + accept_flags = params->filter.accept_flags; + return qed_configure_filter_rx_mode(cdev, accept_flags); + default: + DP_NOTICE(cdev, "Unknown filter type %d\n", + (int)params->type); + return -EINVAL; + } +} + +static int qed_fp_cqe_completion(struct qed_dev *dev, + u8 rss_id, + struct eth_slow_path_rx_cqe *cqe) +{ + return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns], + cqe); +} + static const struct qed_eth_ops qed_eth_ops_pass = { .common = &qed_common_ops_pass, .fill_dev_info = &qed_fill_eth_dev_info, + .vport_start = &qed_start_vport, + .vport_stop = &qed_stop_vport, + .vport_update = &qed_update_vport, + .q_rx_start = &qed_start_rxq, + .q_rx_stop = &qed_stop_rxq, + .q_tx_start = &qed_start_txq, + .q_tx_stop = &qed_stop_txq, + .filter_config = &qed_configure_filter, + .fastpath_stop = &qed_fastpath_stop, + .eth_cqe_completion = &qed_fp_cqe_completion, }; const struct qed_eth_ops *qed_get_eth_ops(u32 version) diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index d0b1ff0ca3c8..1659418eec88 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -182,6 +182,8 @@ err0: int qed_fill_dev_info(struct qed_dev *cdev, struct qed_dev_info *dev_info) { + struct qed_ptt *ptt; + memset(dev_info, 0, sizeof(struct qed_dev_info)); dev_info->num_hwfns = cdev->num_hwfns; @@ -199,6 +201,14 @@ int qed_fill_dev_info(struct qed_dev *cdev, qed_mcp_get_mfw_ver(cdev, &dev_info->mfw_rev); + ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); + if (ptt) { + qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt, + &dev_info->flash_size); + + qed_ptt_release(QED_LEADING_HWFN(cdev), ptt); + } + return 0; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 601d3f5daf13..8a5c3849bfe0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -516,6 +516,22 @@ int qed_mcp_drain(struct qed_hwfn *p_hwfn, return rc; } +int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *p_flash_size) +{ + u32 flash_size; + + flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4); + flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >> + MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT; + flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT)); + + *p_flash_size = flash_size; + + return 0; +} + int qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 230c2550dc89..106d78a19937 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -89,6 +89,19 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn, int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +/** + * @brief Get the flash size value + * + * @param p_hwfn + * @param p_ptt + * @param p_flash_size - flash size in bytes to be filled. + * + * @return int - 0 - operation was successul. + */ +int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *p_flash_size); + /** * @brief Send driver version to MFW * diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 74657d227583..31a1f1eb4f56 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -32,8 +32,35 @@ struct qed_spq_comp_cb { void *cookie; }; +/** + * @brief qed_eth_cqe_completion - handles the completion of a + * ramrod on the cqe ring + * + * @param p_hwfn + * @param cqe + * + * @return int + */ +int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn, + struct eth_slow_path_rx_cqe *cqe); + +/** + * @file + * + * QED Slow-hwfn queue interface + */ + union ramrod_data { struct pf_start_ramrod_data pf_start; + struct rx_queue_start_ramrod_data rx_queue_start; + struct rx_queue_update_ramrod_data rx_queue_update; + struct rx_queue_stop_ramrod_data rx_queue_stop; + struct tx_queue_start_ramrod_data tx_queue_start; + struct tx_queue_stop_ramrod_data tx_queue_stop; + struct vport_start_ramrod_data vport_start; + struct vport_stop_ramrod_data vport_stop; + struct vport_update_ramrod_data vport_update; + struct vport_filter_update_ramrod_data vport_filter_update; }; #define EQ_MAX_CREDIT 0xffffffff diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index f28ecb197309..7c0b8459666e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -373,6 +373,35 @@ void qed_eq_free(struct qed_hwfn *p_hwfn, kfree(p_eq); } +/*************************************************************************** +* CQE API - manipulate EQ functionality +***************************************************************************/ +static int qed_cqe_completion( + struct qed_hwfn *p_hwfn, + struct eth_slow_path_rx_cqe *cqe, + enum protocol_type protocol) +{ + /* @@@tmp - it's possible we'll eventually want to handle some + * actual commands that can arrive here, but for now this is only + * used to complete the ramrod using the echo value on the cqe + */ + return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL); +} + +int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn, + struct eth_slow_path_rx_cqe *cqe) +{ + int rc; + + rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH); + if (rc) + DP_NOTICE(p_hwfn, + "Failed to handle RXQ CQE [cmd 0x%02x]\n", + cqe->ramrod_cmd_id); + + return rc; +} + /*************************************************************************** * Slow hwfn Queue (spq) ***************************************************************************/ diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index fbd8700f0b31..67a7b41b70aa 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -24,12 +24,132 @@ struct qed_dev_eth_info { u8 num_vlan_filters; }; +struct qed_update_vport_rss_params { + u16 rss_ind_table[128]; + u32 rss_key[10]; +}; + +struct qed_update_vport_params { + u8 vport_id; + u8 update_vport_active_flg; + u8 vport_active_flg; + u8 update_rss_flg; + struct qed_update_vport_rss_params rss_params; +}; + +struct qed_stop_rxq_params { + u8 rss_id; + u8 rx_queue_id; + u8 vport_id; + bool eq_completion_only; +}; + +struct qed_stop_txq_params { + u8 rss_id; + u8 tx_queue_id; +}; + +enum qed_filter_rx_mode_type { + QED_FILTER_RX_MODE_TYPE_REGULAR, + QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC, + QED_FILTER_RX_MODE_TYPE_PROMISC, +}; + +enum qed_filter_xcast_params_type { + QED_FILTER_XCAST_TYPE_ADD, + QED_FILTER_XCAST_TYPE_DEL, + QED_FILTER_XCAST_TYPE_REPLACE, +}; + +struct qed_filter_ucast_params { + enum qed_filter_xcast_params_type type; + u8 vlan_valid; + u16 vlan; + u8 mac_valid; + unsigned char mac[ETH_ALEN]; +}; + +struct qed_filter_mcast_params { + enum qed_filter_xcast_params_type type; + u8 num; + unsigned char mac[64][ETH_ALEN]; +}; + +union qed_filter_type_params { + enum qed_filter_rx_mode_type accept_flags; + struct qed_filter_ucast_params ucast; + struct qed_filter_mcast_params mcast; +}; + +enum qed_filter_type { + QED_FILTER_TYPE_UCAST, + QED_FILTER_TYPE_MCAST, + QED_FILTER_TYPE_RX_MODE, + QED_MAX_FILTER_TYPES, +}; + +struct qed_filter_params { + enum qed_filter_type type; + union qed_filter_type_params filter; +}; + +struct qed_queue_start_common_params { + u8 rss_id; + u8 queue_id; + u8 vport_id; + u16 sb; + u16 sb_idx; +}; + +struct qed_eth_cb_ops { + struct qed_common_cb_ops common; +}; + struct qed_eth_ops { const struct qed_common_ops *common; int (*fill_dev_info)(struct qed_dev *cdev, struct qed_dev_eth_info *info); + int (*vport_start)(struct qed_dev *cdev, + u8 vport_id, u16 mtu, + u8 drop_ttl0_flg, + u8 inner_vlan_removal_en_flg); + + int (*vport_stop)(struct qed_dev *cdev, + u8 vport_id); + + int (*vport_update)(struct qed_dev *cdev, + struct qed_update_vport_params *params); + + int (*q_rx_start)(struct qed_dev *cdev, + struct qed_queue_start_common_params *params, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, + void __iomem **pp_prod); + + int (*q_rx_stop)(struct qed_dev *cdev, + struct qed_stop_rxq_params *params); + + int (*q_tx_start)(struct qed_dev *cdev, + struct qed_queue_start_common_params *params, + dma_addr_t pbl_addr, + u16 pbl_size, + void __iomem **pp_doorbell); + + int (*q_tx_stop)(struct qed_dev *cdev, + struct qed_stop_txq_params *params); + + int (*filter_config)(struct qed_dev *cdev, + struct qed_filter_params *params); + + int (*fastpath_stop)(struct qed_dev *cdev); + + int (*eth_cqe_completion)(struct qed_dev *cdev, + u8 rss_id, + struct eth_slow_path_rx_cqe *cqe); }; const struct qed_eth_ops *qed_get_eth_ops(u32 version); -- cgit v1.2.3-71-gd317 From cc875c2e4f34e86c2f562f18b6e917cfcc560bcb Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Mon, 26 Oct 2015 11:02:31 +0200 Subject: qed: Add link support Physical link is handled by the management Firmware. This patch lays the infrastructure for attention handling in the driver, as link change notifications arrive via async. attentions, as well the handling of such notifications. This patch also extends the API with the protocol drivers by adding registered callbacks which the protocol driver passes to qed in order to be notified of async. events originating from the FW/HW. Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 20 ++ drivers/net/ethernet/qlogic/qed/qed_dev.c | 106 ++++++++- drivers/net/ethernet/qlogic/qed/qed_int.c | 336 ++++++++++++++++++++++++++++- drivers/net/ethernet/qlogic/qed/qed_l2.c | 9 + drivers/net/ethernet/qlogic/qed/qed_main.c | 211 ++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_mcp.c | 295 +++++++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_mcp.h | 126 ++++++++++- include/linux/qed/qed_eth_if.h | 4 + 8 files changed, 1102 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index e03371d3e622..ca6cc8a7fc64 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -108,6 +108,18 @@ enum QED_FEATURE { QED_MAX_FEATURES, }; +enum QED_PORT_MODE { + QED_PORT_MODE_DE_2X40G, + QED_PORT_MODE_DE_2X50G, + QED_PORT_MODE_DE_1X100G, + QED_PORT_MODE_DE_4X10G_F, + QED_PORT_MODE_DE_4X10G_E, + QED_PORT_MODE_DE_4X20G, + QED_PORT_MODE_DE_1X40G, + QED_PORT_MODE_DE_2X25G, + QED_PORT_MODE_DE_1X25G +}; + struct qed_hw_info { /* PCI personality */ enum qed_pci_personality personality; @@ -404,6 +416,13 @@ struct qed_dev { u8 protocol; #define IS_QED_ETH_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_ETH) + /* Callbacks to protocol driver */ + union { + struct qed_common_cb_ops *common; + struct qed_eth_cb_ops *eth; + } protocol_ops; + void *ops_cookie; + const struct firmware *firmware; }; @@ -453,6 +472,7 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, /* Prototypes */ int qed_fill_dev_info(struct qed_dev *cdev, struct qed_dev_info *dev_info); +void qed_link_update(struct qed_hwfn *hwfn); u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, u8 *input_buf, u32 max_size, u8 *unzip_buf); diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 3d1bdbf9ade1..7fd3d78d94f1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1039,8 +1039,9 @@ static void qed_hw_get_resc(struct qed_hwfn *p_hwfn) static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, nvm_cfg_addr; - u32 val; + u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; + u32 port_cfg_addr, link_temp, val, nvm_cfg_addr; + struct qed_mcp_link_params *link; /* Read global nvm_cfg address */ nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); @@ -1060,6 +1061,48 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, offsetof(struct nvm_cfg1_glob, pci_id); p_hwfn->hw_info.vendor_id = qed_rd(p_hwfn, p_ptt, addr) & NVM_CFG1_GLOB_VENDOR_ID_MASK; + + addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, glob) + + offsetof(struct nvm_cfg1_glob, core_cfg); + + core_cfg = qed_rd(p_hwfn, p_ptt, addr); + + switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >> + NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) { + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G: + p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G: + p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G: + p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F: + p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E: + p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G: + p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G: + p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G: + p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G: + p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G; + break; + default: + DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", + core_cfg); + break; + } + addr = MCP_REG_SCRATCH + nvm_cfg1_offset + offsetof(struct nvm_cfg1, func[MCP_PF_ID(p_hwfn)]) + offsetof(struct nvm_cfg1_func, device_id); @@ -1075,6 +1118,65 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET; } + /* Read default link configuration */ + link = &p_hwfn->mcp_info->link_input; + port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); + link_temp = qed_rd(p_hwfn, p_ptt, + port_cfg_addr + + offsetof(struct nvm_cfg1_port, speed_cap_mask)); + link->speed.advertised_speeds = + link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK; + + p_hwfn->mcp_info->link_capabilities.speed_capabilities = + link->speed.advertised_speeds; + + link_temp = qed_rd(p_hwfn, p_ptt, + port_cfg_addr + + offsetof(struct nvm_cfg1_port, link_settings)); + switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >> + NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) { + case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG: + link->speed.autoneg = true; + break; + case NVM_CFG1_PORT_DRV_LINK_SPEED_1G: + link->speed.forced_speed = 1000; + break; + case NVM_CFG1_PORT_DRV_LINK_SPEED_10G: + link->speed.forced_speed = 10000; + break; + case NVM_CFG1_PORT_DRV_LINK_SPEED_25G: + link->speed.forced_speed = 25000; + break; + case NVM_CFG1_PORT_DRV_LINK_SPEED_40G: + link->speed.forced_speed = 40000; + break; + case NVM_CFG1_PORT_DRV_LINK_SPEED_50G: + link->speed.forced_speed = 50000; + break; + case NVM_CFG1_PORT_DRV_LINK_SPEED_100G: + link->speed.forced_speed = 100000; + break; + default: + DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", + link_temp); + } + + link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK; + link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET; + link->pause.autoneg = !!(link_temp & + NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG); + link->pause.forced_rx = !!(link_temp & + NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX); + link->pause.forced_tx = !!(link_temp & + NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); + link->loopback_mode = 0; + + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n", + link->speed.forced_speed, link->speed.advertised_speeds, + link->speed.autoneg, link->pause.autoneg); + /* Read Multi-function information from shmem */ addr = MCP_REG_SCRATCH + nvm_cfg1_offset + offsetof(struct nvm_cfg1, glob) + diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 37d926a5fae5..2e399b6137a2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -39,10 +39,214 @@ struct qed_sb_sp_info { struct qed_pi_info pi_info_arr[PIS_PER_SB]; }; +#define SB_ATTN_ALIGNED_SIZE(p_hwfn) \ + ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn) + +#define ATTN_STATE_BITS (0xfff) +#define ATTN_BITS_MASKABLE (0x3ff) +struct qed_sb_attn_info { + /* Virtual & Physical address of the SB */ + struct atten_status_block *sb_attn; + dma_addr_t sb_phys; + + /* Last seen running index */ + u16 index; + + /* Previously asserted attentions, which are still unasserted */ + u16 known_attn; + + /* Cleanup address for the link's general hw attention */ + u32 mfw_attn_addr; +}; + +static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn, + struct qed_sb_attn_info *p_sb_desc) +{ + u16 rc = 0; + u16 index; + + /* Make certain HW write took affect */ + mmiowb(); + + index = le16_to_cpu(p_sb_desc->sb_attn->sb_index); + if (p_sb_desc->index != index) { + p_sb_desc->index = index; + rc = QED_SB_ATT_IDX; + } + + /* Make certain we got a consistent view with HW */ + mmiowb(); + + return rc; +} + +/** + * @brief qed_int_assertion - handles asserted attention bits + * + * @param p_hwfn + * @param asserted_bits newly asserted bits + * @return int + */ +static int qed_int_assertion(struct qed_hwfn *p_hwfn, + u16 asserted_bits) +{ + struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; + u32 igu_mask; + + /* Mask the source of the attention in the IGU */ + igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + IGU_REG_ATTENTION_ENABLE); + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n", + igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE)); + igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE); + qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask); + + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "inner known ATTN state: 0x%04x --> 0x%04x\n", + sb_attn_sw->known_attn, + sb_attn_sw->known_attn | asserted_bits); + sb_attn_sw->known_attn |= asserted_bits; + + /* Handle MCP events */ + if (asserted_bits & 0x100) { + qed_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt); + /* Clean the MCP attention */ + qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, + sb_attn_sw->mfw_attn_addr, 0); + } + + DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + + GTT_BAR0_MAP_REG_IGU_CMD + + ((IGU_CMD_ATTN_BIT_SET_UPPER - + IGU_CMD_INT_ACK_BASE) << 3), + (u32)asserted_bits); + + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "set cmd IGU: 0x%04x\n", + asserted_bits); + + return 0; +} + +/** + * @brief - handles deassertion of previously asserted attentions. + * + * @param p_hwfn + * @param deasserted_bits - newly deasserted bits + * @return int + * + */ +static int qed_int_deassertion(struct qed_hwfn *p_hwfn, + u16 deasserted_bits) +{ + struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; + u32 aeu_mask; + + if (deasserted_bits != 0x100) + DP_ERR(p_hwfn, "Unexpected - non-link deassertion\n"); + + /* Clear IGU indication for the deasserted bits */ + DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + + GTT_BAR0_MAP_REG_IGU_CMD + + ((IGU_CMD_ATTN_BIT_CLR_UPPER - + IGU_CMD_INT_ACK_BASE) << 3), + ~((u32)deasserted_bits)); + + /* Unmask deasserted attentions in IGU */ + aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + IGU_REG_ATTENTION_ENABLE); + aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE); + qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask); + + /* Clear deassertion from inner state */ + sb_attn_sw->known_attn &= ~deasserted_bits; + + return 0; +} + +static int qed_int_attentions(struct qed_hwfn *p_hwfn) +{ + struct qed_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn; + struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn; + u32 attn_bits = 0, attn_acks = 0; + u16 asserted_bits, deasserted_bits; + __le16 index; + int rc = 0; + + /* Read current attention bits/acks - safeguard against attentions + * by guaranting work on a synchronized timeframe + */ + do { + index = p_sb_attn->sb_index; + attn_bits = le32_to_cpu(p_sb_attn->atten_bits); + attn_acks = le32_to_cpu(p_sb_attn->atten_ack); + } while (index != p_sb_attn->sb_index); + p_sb_attn->sb_index = index; + + /* Attention / Deassertion are meaningful (and in correct state) + * only when they differ and consistent with known state - deassertion + * when previous attention & current ack, and assertion when current + * attention with no previous attention + */ + asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) & + ~p_sb_attn_sw->known_attn; + deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) & + p_sb_attn_sw->known_attn; + + if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) { + DP_INFO(p_hwfn, + "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n", + index, attn_bits, attn_acks, asserted_bits, + deasserted_bits, p_sb_attn_sw->known_attn); + } else if (asserted_bits == 0x100) { + DP_INFO(p_hwfn, + "MFW indication via attention\n"); + } else { + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "MFW indication [deassertion]\n"); + } + + if (asserted_bits) { + rc = qed_int_assertion(p_hwfn, asserted_bits); + if (rc) + return rc; + } + + if (deasserted_bits) { + rc = qed_int_deassertion(p_hwfn, deasserted_bits); + if (rc) + return rc; + } + + return rc; +} + +static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn, + void __iomem *igu_addr, + u32 ack_cons) +{ + struct igu_prod_cons_update igu_ack = { 0 }; + + igu_ack.sb_id_and_flags = + ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | + (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | + (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | + (IGU_SEG_ACCESS_ATTN << + IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); + + DIRECT_REG_WR(igu_addr, igu_ack.sb_id_and_flags); + + /* Both segments (interrupts & acks) are written to same place address; + * Need to guarantee all commands will be received (in-order) by HW. + */ + mmiowb(); + barrier(); +} + void qed_int_sp_dpc(unsigned long hwfn_cookie) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)hwfn_cookie; struct qed_pi_info *pi_info = NULL; + struct qed_sb_attn_info *sb_attn; struct qed_sb_info *sb_info; int arr_size; u16 rc = 0; @@ -65,6 +269,12 @@ void qed_int_sp_dpc(unsigned long hwfn_cookie) return; } + if (!p_hwfn->p_sb_attn) { + DP_ERR(p_hwfn->cdev, "DPC called - no p_sb_attn"); + return; + } + sb_attn = p_hwfn->p_sb_attn; + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "DPC Called! (hwfn %p %d)\n", p_hwfn, p_hwfn->my_id); @@ -87,6 +297,19 @@ void qed_int_sp_dpc(unsigned long hwfn_cookie) tmp_index, sb_info->sb_ack); } + if (!sb_attn || !sb_attn->sb_attn) { + DP_ERR( + p_hwfn->cdev, + "Attentions Status block is NULL - cannot check for new attentions!\n"); + } else { + u16 tmp_index = sb_attn->index; + + rc |= qed_attn_update_idx(p_hwfn, sb_attn); + DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR, + "Attention indices: 0x%08x --> 0x%08x\n", + tmp_index, sb_attn->index); + } + /* Check if we expect interrupts at this time. if not just ack them */ if (!(rc & QED_SB_EVENT_MASK)) { qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); @@ -100,6 +323,9 @@ void qed_int_sp_dpc(unsigned long hwfn_cookie) return; } + if (rc & QED_SB_ATT_IDX) + qed_int_attentions(p_hwfn); + if (rc & QED_SB_IDX) { int pi; @@ -111,9 +337,97 @@ void qed_int_sp_dpc(unsigned long hwfn_cookie) } } + if (sb_attn && (rc & QED_SB_ATT_IDX)) + /* This should be done before the interrupts are enabled, + * since otherwise a new attention will be generated. + */ + qed_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index); + qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); } +static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn) +{ + struct qed_dev *cdev = p_hwfn->cdev; + struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn; + + if (p_sb) { + if (p_sb->sb_attn) + dma_free_coherent(&cdev->pdev->dev, + SB_ATTN_ALIGNED_SIZE(p_hwfn), + p_sb->sb_attn, + p_sb->sb_phys); + kfree(p_sb); + } +} + +static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn; + + memset(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn)); + + sb_info->index = 0; + sb_info->known_attn = 0; + + /* Configure Attention Status Block in IGU */ + qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L, + lower_32_bits(p_hwfn->p_sb_attn->sb_phys)); + qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H, + upper_32_bits(p_hwfn->p_sb_attn->sb_phys)); +} + +static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + void *sb_virt_addr, + dma_addr_t sb_phy_addr) +{ + struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn; + + sb_info->sb_attn = sb_virt_addr; + sb_info->sb_phys = sb_phy_addr; + + /* Set the address of cleanup for the mcp attention */ + sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) + + MISC_REG_AEU_GENERAL_ATTN_0; + + qed_int_sb_attn_setup(p_hwfn, p_ptt); +} + +static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + struct qed_dev *cdev = p_hwfn->cdev; + struct qed_sb_attn_info *p_sb; + void *p_virt; + dma_addr_t p_phys = 0; + + /* SB struct */ + p_sb = kmalloc(sizeof(*p_sb), GFP_ATOMIC); + if (!p_sb) { + DP_NOTICE(cdev, "Failed to allocate `struct qed_sb_attn_info'\n"); + return -ENOMEM; + } + + /* SB ring */ + p_virt = dma_alloc_coherent(&cdev->pdev->dev, + SB_ATTN_ALIGNED_SIZE(p_hwfn), + &p_phys, GFP_KERNEL); + + if (!p_virt) { + DP_NOTICE(cdev, "Failed to allocate status block (attentions)\n"); + kfree(p_sb); + return -ENOMEM; + } + + /* Attention setup */ + p_hwfn->p_sb_attn = p_sb; + qed_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys); + + return 0; +} + /* coalescing timeout = timeset << (timer_res + 1) */ #define QED_CAU_DEF_RX_USECS 24 #define QED_CAU_DEF_TX_USECS 48 @@ -394,6 +708,12 @@ static void qed_int_sp_sb_setup(struct qed_hwfn *p_hwfn, else DP_NOTICE(p_hwfn->cdev, "Failed to setup Slow path status block - NULL pointer\n"); + + if (p_hwfn->p_sb_attn) + qed_int_sb_attn_setup(p_hwfn, p_ptt); + else + DP_NOTICE(p_hwfn->cdev, + "Failed to setup attentions status block - NULL pointer\n"); } int qed_int_register_cb(struct qed_hwfn *p_hwfn, @@ -444,7 +764,7 @@ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_int_mode int_mode) { - u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN; + u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN; p_hwfn->cdev->int_mode = int_mode; switch (p_hwfn->cdev->int_mode) { @@ -484,8 +804,15 @@ void qed_int_igu_enable(struct qed_hwfn *p_hwfn, /* Enable interrupt Generation */ qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode); + /* Configure AEU signal change to produce attentions for link */ + qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); + qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); + /* Flush the writes to IGU */ mmiowb(); + + /* Unmask AEU signals toward IGU */ + qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); } void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, @@ -770,13 +1097,18 @@ int qed_int_alloc(struct qed_hwfn *p_hwfn, DP_ERR(p_hwfn->cdev, "Failed to allocate sp sb mem\n"); return rc; } - + rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt); + if (rc) { + DP_ERR(p_hwfn->cdev, "Failed to allocate sb attn mem\n"); + return rc; + } return rc; } void qed_int_free(struct qed_hwfn *p_hwfn) { qed_int_sp_sb_free(p_hwfn); + qed_int_sb_attn_free(p_hwfn); qed_int_sp_dpc_free(p_hwfn); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 2772573593a4..7049e4139d3c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -1259,6 +1259,14 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, return 0; } +static void qed_register_eth_ops(struct qed_dev *cdev, + struct qed_eth_cb_ops *ops, + void *cookie) +{ + cdev->protocol_ops.eth = ops; + cdev->ops_cookie = cookie; +} + static int qed_start_vport(struct qed_dev *cdev, u8 vport_id, u16 mtu, @@ -1661,6 +1669,7 @@ static int qed_fp_cqe_completion(struct qed_dev *dev, static const struct qed_eth_ops qed_eth_ops_pass = { .common = &qed_common_ops_pass, .fill_dev_info = &qed_fill_eth_dev_info, + .register_ops = &qed_register_eth_ops, .vport_start = &qed_start_vport, .vport_stop = &qed_stop_vport, .vport_update = &qed_update_vport, diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 1659418eec88..947c7af72b25 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -904,6 +904,215 @@ static u32 qed_sb_release(struct qed_dev *cdev, return rc; } +static int qed_set_link(struct qed_dev *cdev, + struct qed_link_params *params) +{ + struct qed_hwfn *hwfn; + struct qed_mcp_link_params *link_params; + struct qed_ptt *ptt; + int rc; + + if (!cdev) + return -ENODEV; + + /* The link should be set only once per PF */ + hwfn = &cdev->hwfns[0]; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EBUSY; + + link_params = qed_mcp_get_link_params(hwfn); + if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) + link_params->speed.autoneg = params->autoneg; + if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { + link_params->speed.advertised_speeds = 0; + if ((params->adv_speeds & SUPPORTED_1000baseT_Half) || + (params->adv_speeds & SUPPORTED_1000baseT_Full)) + link_params->speed.advertised_speeds |= + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + if (params->adv_speeds & SUPPORTED_10000baseKR_Full) + link_params->speed.advertised_speeds |= + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + if (params->adv_speeds & SUPPORTED_40000baseLR4_Full) + link_params->speed.advertised_speeds |= + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; + if (params->adv_speeds & 0) + link_params->speed.advertised_speeds |= + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; + if (params->adv_speeds & 0) + link_params->speed.advertised_speeds |= + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G; + } + if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) + link_params->speed.forced_speed = params->forced_speed; + + rc = qed_mcp_set_link(hwfn, ptt, params->link_up); + + qed_ptt_release(hwfn, ptt); + + return rc; +} + +static int qed_get_port_type(u32 media_type) +{ + int port_type; + + switch (media_type) { + case MEDIA_SFPP_10G_FIBER: + case MEDIA_SFP_1G_FIBER: + case MEDIA_XFP_FIBER: + case MEDIA_KR: + port_type = PORT_FIBRE; + break; + case MEDIA_DA_TWINAX: + port_type = PORT_DA; + break; + case MEDIA_BASE_T: + port_type = PORT_TP; + break; + case MEDIA_NOT_PRESENT: + port_type = PORT_NONE; + break; + case MEDIA_UNSPECIFIED: + default: + port_type = PORT_OTHER; + break; + } + return port_type; +} + +static void qed_fill_link(struct qed_hwfn *hwfn, + struct qed_link_output *if_link) +{ + struct qed_mcp_link_params params; + struct qed_mcp_link_state link; + struct qed_mcp_link_capabilities link_caps; + u32 media_type; + + memset(if_link, 0, sizeof(*if_link)); + + /* Prepare source inputs */ + memcpy(¶ms, qed_mcp_get_link_params(hwfn), sizeof(params)); + memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link)); + memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn), + sizeof(link_caps)); + + /* Set the link parameters to pass to protocol driver */ + if (link.link_up) + if_link->link_up = true; + + /* TODO - at the moment assume supported and advertised speed equal */ + if_link->supported_caps = SUPPORTED_FIBRE; + if (params.speed.autoneg) + if_link->supported_caps |= SUPPORTED_Autoneg; + if (params.pause.autoneg || + (params.pause.forced_rx && params.pause.forced_tx)) + if_link->supported_caps |= SUPPORTED_Asym_Pause; + if (params.pause.autoneg || params.pause.forced_rx || + params.pause.forced_tx) + if_link->supported_caps |= SUPPORTED_Pause; + + if_link->advertised_caps = if_link->supported_caps; + if (params.speed.advertised_speeds & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) + if_link->advertised_caps |= SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full; + if (params.speed.advertised_speeds & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) + if_link->advertised_caps |= SUPPORTED_10000baseKR_Full; + if (params.speed.advertised_speeds & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) + if_link->advertised_caps |= SUPPORTED_40000baseLR4_Full; + if (params.speed.advertised_speeds & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) + if_link->advertised_caps |= 0; + if (params.speed.advertised_speeds & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G) + if_link->advertised_caps |= 0; + + if (link_caps.speed_capabilities & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) + if_link->supported_caps |= SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full; + if (link_caps.speed_capabilities & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) + if_link->supported_caps |= SUPPORTED_10000baseKR_Full; + if (link_caps.speed_capabilities & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) + if_link->supported_caps |= SUPPORTED_40000baseLR4_Full; + if (link_caps.speed_capabilities & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) + if_link->supported_caps |= 0; + if (link_caps.speed_capabilities & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G) + if_link->supported_caps |= 0; + + if (link.link_up) + if_link->speed = link.speed; + + /* TODO - fill duplex properly */ + if_link->duplex = DUPLEX_FULL; + qed_mcp_get_media_type(hwfn->cdev, &media_type); + if_link->port = qed_get_port_type(media_type); + + if_link->autoneg = params.speed.autoneg; + + if (params.pause.autoneg) + if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; + if (params.pause.forced_rx) + if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE; + if (params.pause.forced_tx) + if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE; + + /* Link partner capabilities */ + if (link.partner_adv_speed & + QED_LINK_PARTNER_SPEED_1G_HD) + if_link->lp_caps |= SUPPORTED_1000baseT_Half; + if (link.partner_adv_speed & + QED_LINK_PARTNER_SPEED_1G_FD) + if_link->lp_caps |= SUPPORTED_1000baseT_Full; + if (link.partner_adv_speed & + QED_LINK_PARTNER_SPEED_10G) + if_link->lp_caps |= SUPPORTED_10000baseKR_Full; + if (link.partner_adv_speed & + QED_LINK_PARTNER_SPEED_40G) + if_link->lp_caps |= SUPPORTED_40000baseLR4_Full; + if (link.partner_adv_speed & + QED_LINK_PARTNER_SPEED_50G) + if_link->lp_caps |= 0; + if (link.partner_adv_speed & + QED_LINK_PARTNER_SPEED_100G) + if_link->lp_caps |= 0; + + if (link.an_complete) + if_link->lp_caps |= SUPPORTED_Autoneg; + + if (link.partner_adv_pause) + if_link->lp_caps |= SUPPORTED_Pause; + if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE || + link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE) + if_link->lp_caps |= SUPPORTED_Asym_Pause; +} + +static void qed_get_current_link(struct qed_dev *cdev, + struct qed_link_output *if_link) +{ + qed_fill_link(&cdev->hwfns[0], if_link); +} + +void qed_link_update(struct qed_hwfn *hwfn) +{ + void *cookie = hwfn->cdev->ops_cookie; + struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; + struct qed_link_output if_link; + + qed_fill_link(hwfn, &if_link); + + if (IS_LEAD_HWFN(hwfn) && cookie) + op->link_update(cookie, &if_link); +} + static int qed_drain(struct qed_dev *cdev) { struct qed_hwfn *hwfn; @@ -940,6 +1149,8 @@ const struct qed_common_ops qed_common_ops_pass = { .sb_release = &qed_sb_release, .simd_handler_config = &qed_simd_handler_config, .simd_handler_clean = &qed_simd_handler_clean, + .set_link = &qed_set_link, + .get_link = &qed_get_current_link, .drain = &qed_drain, .update_msglvl = &qed_init_dp, .chain_alloc = &qed_chain_alloc, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 8a5c3849bfe0..20d048cdcb88 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -365,6 +365,252 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, return 0; } +static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + bool b_reset) +{ + struct qed_mcp_link_state *p_link; + u32 status = 0; + + p_link = &p_hwfn->mcp_info->link_output; + memset(p_link, 0, sizeof(*p_link)); + if (!b_reset) { + status = qed_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, link_status)); + DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP), + "Received link update [0x%08x] from mfw [Addr 0x%x]\n", + status, + (u32)(p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, + link_status))); + } else { + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Resetting link indications\n"); + return; + } + + p_link->link_up = !!(status & LINK_STATUS_LINK_UP); + + p_link->full_duplex = true; + switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) { + case LINK_STATUS_SPEED_AND_DUPLEX_100G: + p_link->speed = 100000; + break; + case LINK_STATUS_SPEED_AND_DUPLEX_50G: + p_link->speed = 50000; + break; + case LINK_STATUS_SPEED_AND_DUPLEX_40G: + p_link->speed = 40000; + break; + case LINK_STATUS_SPEED_AND_DUPLEX_25G: + p_link->speed = 25000; + break; + case LINK_STATUS_SPEED_AND_DUPLEX_20G: + p_link->speed = 20000; + break; + case LINK_STATUS_SPEED_AND_DUPLEX_10G: + p_link->speed = 10000; + break; + case LINK_STATUS_SPEED_AND_DUPLEX_1000THD: + p_link->full_duplex = false; + /* Fall-through */ + case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD: + p_link->speed = 1000; + break; + default: + p_link->speed = 0; + } + + /* Correct speed according to bandwidth allocation */ + if (p_hwfn->mcp_info->func_info.bandwidth_max && p_link->speed) { + p_link->speed = p_link->speed * + p_hwfn->mcp_info->func_info.bandwidth_max / + 100; + qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id, + p_link->speed); + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Configured MAX bandwidth to be %08x Mb/sec\n", + p_link->speed); + } + + p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED); + p_link->an_complete = !!(status & + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE); + p_link->parallel_detection = !!(status & + LINK_STATUS_PARALLEL_DETECTION_USED); + p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED); + + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ? + QED_LINK_PARTNER_SPEED_1G_FD : 0; + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ? + QED_LINK_PARTNER_SPEED_1G_HD : 0; + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ? + QED_LINK_PARTNER_SPEED_10G : 0; + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ? + QED_LINK_PARTNER_SPEED_20G : 0; + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ? + QED_LINK_PARTNER_SPEED_40G : 0; + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ? + QED_LINK_PARTNER_SPEED_50G : 0; + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ? + QED_LINK_PARTNER_SPEED_100G : 0; + + p_link->partner_tx_flow_ctrl_en = + !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED); + p_link->partner_rx_flow_ctrl_en = + !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED); + + switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) { + case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE: + p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE; + break; + case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE: + p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE; + break; + case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE: + p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE; + break; + default: + p_link->partner_adv_pause = 0; + } + + p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT); + + qed_link_update(p_hwfn); +} + +int qed_mcp_set_link(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + bool b_up) +{ + struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input; + u32 param = 0, reply = 0, cmd; + struct pmm_phy_cfg phy_cfg; + int rc = 0; + u32 i; + + if (!qed_mcp_is_init(p_hwfn)) { + DP_NOTICE(p_hwfn, "MFW is not initialized !\n"); + return -EBUSY; + } + + /* Set the shmem configuration according to params */ + memset(&phy_cfg, 0, sizeof(phy_cfg)); + cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET; + if (!params->speed.autoneg) + phy_cfg.speed = params->speed.forced_speed; + phy_cfg.pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0; + phy_cfg.pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0; + phy_cfg.pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0; + phy_cfg.adv_speed = params->speed.advertised_speeds; + phy_cfg.loopback_mode = params->loopback_mode; + + /* Write the requested configuration to shmem */ + for (i = 0; i < sizeof(phy_cfg); i += 4) + qed_wr(p_hwfn, p_ptt, + p_hwfn->mcp_info->drv_mb_addr + + offsetof(struct public_drv_mb, union_data) + i, + ((u32 *)&phy_cfg)[i >> 2]); + + if (b_up) { + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n", + phy_cfg.speed, + phy_cfg.pause, + phy_cfg.adv_speed, + phy_cfg.loopback_mode, + phy_cfg.feature_config_flags); + } else { + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Resetting link\n"); + } + + DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n", + p_hwfn->mcp_info->drv_mb_seq, + p_hwfn->mcp_info->drv_pulse_seq); + + /* Load Request */ + rc = qed_mcp_cmd(p_hwfn, p_ptt, cmd, 0, &reply, ¶m); + + /* if mcp fails to respond we must abort */ + if (rc) { + DP_ERR(p_hwfn, "MCP response failure, aborting\n"); + return rc; + } + + /* Reset the link status if needed */ + if (!b_up) + qed_mcp_handle_link_change(p_hwfn, p_ptt, true); + + return 0; +} + +int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + struct qed_mcp_info *info = p_hwfn->mcp_info; + int rc = 0; + bool found = false; + u16 i; + + DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n"); + + /* Read Messages from MFW */ + qed_mcp_read_mb(p_hwfn, p_ptt); + + /* Compare current messages to old ones */ + for (i = 0; i < info->mfw_mb_length; i++) { + if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i]) + continue; + + found = true; + + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n", + i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]); + + switch (i) { + case MFW_DRV_MSG_LINK_CHANGE: + qed_mcp_handle_link_change(p_hwfn, p_ptt, false); + break; + default: + DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i); + rc = -EINVAL; + } + } + + /* ACK everything */ + for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) { + __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]); + + /* MFW expect answer in BE, so we force write in that format */ + qed_wr(p_hwfn, p_ptt, + info->mfw_mb_addr + sizeof(u32) + + MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) * + sizeof(u32) + i * sizeof(u32), + (__force u32)val); + } + + if (!found) { + DP_NOTICE(p_hwfn, + "Received an MFW message indication but no new message!\n"); + rc = -EINVAL; + } + + /* Copy the new mfw messages into the shadow */ + memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length); + + return rc; +} + int qed_mcp_get_mfw_ver(struct qed_dev *cdev, u32 *p_mfw_ver) { @@ -389,6 +635,31 @@ int qed_mcp_get_mfw_ver(struct qed_dev *cdev, return 0; } +int qed_mcp_get_media_type(struct qed_dev *cdev, + u32 *p_media_type) +{ + struct qed_hwfn *p_hwfn = &cdev->hwfns[0]; + struct qed_ptt *p_ptt; + + if (!qed_mcp_is_init(p_hwfn)) { + DP_NOTICE(p_hwfn, "MFW is not initialized !\n"); + return -EBUSY; + } + + *p_media_type = MEDIA_UNSPECIFIED; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EBUSY; + + *p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, media_type)); + + qed_ptt_release(p_hwfn, p_ptt); + + return 0; +} + static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct public_func *p_data, @@ -500,6 +771,30 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn, return 0; } +struct qed_mcp_link_params +*qed_mcp_get_link_params(struct qed_hwfn *p_hwfn) +{ + if (!p_hwfn || !p_hwfn->mcp_info) + return NULL; + return &p_hwfn->mcp_info->link_input; +} + +struct qed_mcp_link_state +*qed_mcp_get_link_state(struct qed_hwfn *p_hwfn) +{ + if (!p_hwfn || !p_hwfn->mcp_info) + return NULL; + return &p_hwfn->mcp_info->link_output; +} + +struct qed_mcp_link_capabilities +*qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn) +{ + if (!p_hwfn || !p_hwfn->mcp_info) + return NULL; + return &p_hwfn->mcp_info->link_capabilities; +} + int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 106d78a19937..dbaae586b4a7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -15,6 +15,59 @@ #include #include "qed_hsi.h" +struct qed_mcp_link_speed_params { + bool autoneg; + u32 advertised_speeds; /* bitmask of DRV_SPEED_CAPABILITY */ + u32 forced_speed; /* In Mb/s */ +}; + +struct qed_mcp_link_pause_params { + bool autoneg; + bool forced_rx; + bool forced_tx; +}; + +struct qed_mcp_link_params { + struct qed_mcp_link_speed_params speed; + struct qed_mcp_link_pause_params pause; + u32 loopback_mode; +}; + +struct qed_mcp_link_capabilities { + u32 speed_capabilities; +}; + +struct qed_mcp_link_state { + bool link_up; + + u32 speed; /* In Mb/s */ + bool full_duplex; + + bool an; + bool an_complete; + bool parallel_detection; + bool pfc_enabled; + +#define QED_LINK_PARTNER_SPEED_1G_HD BIT(0) +#define QED_LINK_PARTNER_SPEED_1G_FD BIT(1) +#define QED_LINK_PARTNER_SPEED_10G BIT(2) +#define QED_LINK_PARTNER_SPEED_20G BIT(3) +#define QED_LINK_PARTNER_SPEED_40G BIT(4) +#define QED_LINK_PARTNER_SPEED_50G BIT(5) +#define QED_LINK_PARTNER_SPEED_100G BIT(6) + u32 partner_adv_speed; + + bool partner_tx_flow_ctrl_en; + bool partner_rx_flow_ctrl_en; + +#define QED_LINK_PARTNER_SYMMETRIC_PAUSE (1) +#define QED_LINK_PARTNER_ASYMMETRIC_PAUSE (2) +#define QED_LINK_PARTNER_BOTH_PAUSE (3) + u8 partner_adv_pause; + + bool sfp_tx_fault; +}; + struct qed_mcp_function_info { u8 pause_on_host; @@ -44,6 +97,47 @@ struct qed_mcp_drv_version { u8 name[MCP_DRV_VER_STR_SIZE - 4]; }; +/** + * @brief - returns the link params of the hw function + * + * @param p_hwfn + * + * @returns pointer to link params + */ +struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *); + +/** + * @brief - return the link state of the hw function + * + * @param p_hwfn + * + * @returns pointer to link state + */ +struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *); + +/** + * @brief - return the link capabilities of the hw function + * + * @param p_hwfn + * + * @returns pointer to link capabilities + */ +struct qed_mcp_link_capabilities + *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn); + +/** + * @brief Request the MFW to set the the link according to 'link_input'. + * + * @param p_hwfn + * @param p_ptt + * @param b_up - raise link if `true'. Reset link if `false'. + * + * @return int + */ +int qed_mcp_set_link(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + bool b_up); + /** * @brief Get the management firmware version value * @@ -55,6 +149,19 @@ struct qed_mcp_drv_version { int qed_mcp_get_mfw_ver(struct qed_dev *cdev, u32 *mfw_ver); +/** + * @brief Get media type value of the port. + * + * @param cdev - qed dev pointer + * @param mfw_ver - media type value + * + * @return int - + * 0 - Operation was successul. + * -EBUSY - Operation failed + */ +int qed_mcp_get_media_type(struct qed_dev *cdev, + u32 *media_type); + /** * @brief General function for sending commands to the MCP * mailbox. It acquire mutex lock for the entire @@ -142,8 +249,10 @@ struct qed_mcp_info { u32 port_addr; u16 drv_mb_seq; u16 drv_pulse_seq; + struct qed_mcp_link_params link_input; + struct qed_mcp_link_state link_output; + struct qed_mcp_link_capabilities link_capabilities; struct qed_mcp_function_info func_info; - u8 *mfw_mb_cur; u8 *mfw_mb_shadow; u16 mfw_mb_length; @@ -181,6 +290,21 @@ void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, int qed_mcp_free(struct qed_hwfn *p_hwfn); +/** + * @brief This function is called from the DPC context. After + * pointing PTT to the mfw mb, check for events sent by the MCP + * to the driver and ack them. In case a critical event + * detected, it will be handled here, otherwise the work will be + * queued to a sleepable work-queue. + * + * @param p_hwfn - HW function + * @param p_ptt - PTT required for register access + * @return int - 0 - operation + * was successul. + */ +int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + /** * @brief Sends a LOAD_REQ to the MFW, and in case operation * succeed, returns whether this PF is the first on the diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index 67a7b41b70aa..ab1041424013 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -111,6 +111,10 @@ struct qed_eth_ops { int (*fill_dev_info)(struct qed_dev *cdev, struct qed_dev_eth_info *info); + void (*register_ops)(struct qed_dev *cdev, + struct qed_eth_cb_ops *ops, + void *cookie); + int (*vport_start)(struct qed_dev *cdev, u8 vport_id, u16 mtu, u8 drop_ttl0_flg, -- cgit v1.2.3-71-gd317 From 9df2ed0415b13218f84262c2372323ef028310fc Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Mon, 26 Oct 2015 11:02:33 +0200 Subject: qed: Add statistics support Device statistics can be gathered on-demand. This adds the qed support for reading the statistics [both function and port] from the device, and adds to the public API a method for requesting the current statistics. Signed-off-by: Manish Chopra Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 14 ++ drivers/net/ethernet/qlogic/qed/qed_dev.c | 244 +++++++++++++++++++++++++- drivers/net/ethernet/qlogic/qed/qed_dev_api.h | 3 + drivers/net/ethernet/qlogic/qed/qed_hsi.h | 30 ++++ drivers/net/ethernet/qlogic/qed/qed_l2.c | 3 + include/linux/qed/qed_eth_if.h | 3 + 6 files changed, 296 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index ca6cc8a7fc64..ac17d8669b1a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -212,7 +212,20 @@ struct qed_qm_info { u32 pf_rl; }; +struct storm_stats { + u32 address; + u32 len; +}; + +struct qed_storm_stats { + struct storm_stats mstats; + struct storm_stats pstats; + struct storm_stats tstats; + struct storm_stats ustats; +}; + struct qed_fw_data { + struct fw_ver_info *fw_ver_info; const u8 *modes_tree_buf; union init_op *init_ops; const u32 *arr_data; @@ -296,6 +309,7 @@ struct qed_hwfn { /* QM init */ struct qed_qm_info qm_info; + struct qed_storm_stats storm_stats; /* Buffer for unzipping firmware data */ void *unzip_buf; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 7fd3d78d94f1..b9b7b7e6fa53 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -649,8 +649,10 @@ int qed_hw_init(struct qed_dev *cdev, bool allow_npar_tx_switch, const u8 *bin_fw_data) { - u32 load_code, param; + struct qed_storm_stats *p_stat; + u32 load_code, param, *p_address; int rc, mfw_rc, i; + u8 fw_vport = 0; rc = qed_init_fw_data(cdev, bin_fw_data); if (rc != 0) @@ -659,6 +661,10 @@ int qed_hw_init(struct qed_dev *cdev, for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + rc = qed_fw_vport(p_hwfn, 0, &fw_vport); + if (rc != 0) + return rc; + /* Enable DMAE in PXP */ rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true); @@ -722,6 +728,25 @@ int qed_hw_init(struct qed_dev *cdev, } p_hwfn->hw_init_done = true; + + /* init PF stats */ + p_stat = &p_hwfn->storm_stats; + p_stat->mstats.address = BAR0_MAP_REG_MSDM_RAM + + MSTORM_QUEUE_STAT_OFFSET(fw_vport); + p_stat->mstats.len = sizeof(struct eth_mstorm_per_queue_stat); + + p_stat->ustats.address = BAR0_MAP_REG_USDM_RAM + + USTORM_QUEUE_STAT_OFFSET(fw_vport); + p_stat->ustats.len = sizeof(struct eth_ustorm_per_queue_stat); + + p_stat->pstats.address = BAR0_MAP_REG_PSDM_RAM + + PSTORM_QUEUE_STAT_OFFSET(fw_vport); + p_stat->pstats.len = sizeof(struct eth_pstorm_per_queue_stat); + + p_address = &p_stat->tstats.address; + *p_address = BAR0_MAP_REG_TSDM_RAM + + TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)); + p_stat->tstats.len = sizeof(struct tstorm_per_port_stat); } return 0; @@ -1494,6 +1519,223 @@ void qed_chain_free(struct qed_dev *cdev, p_chain->p_phys_addr); } +static void __qed_get_vport_stats(struct qed_dev *cdev, + struct qed_eth_stats *stats) +{ + int i, j; + + memset(stats, 0, sizeof(*stats)); + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + struct eth_mstorm_per_queue_stat mstats; + struct eth_ustorm_per_queue_stat ustats; + struct eth_pstorm_per_queue_stat pstats; + struct tstorm_per_port_stat tstats; + struct port_stats port_stats; + struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); + + if (!p_ptt) { + DP_ERR(p_hwfn, "Failed to acquire ptt\n"); + continue; + } + + memset(&mstats, 0, sizeof(mstats)); + qed_memcpy_from(p_hwfn, p_ptt, &mstats, + p_hwfn->storm_stats.mstats.address, + p_hwfn->storm_stats.mstats.len); + + memset(&ustats, 0, sizeof(ustats)); + qed_memcpy_from(p_hwfn, p_ptt, &ustats, + p_hwfn->storm_stats.ustats.address, + p_hwfn->storm_stats.ustats.len); + + memset(&pstats, 0, sizeof(pstats)); + qed_memcpy_from(p_hwfn, p_ptt, &pstats, + p_hwfn->storm_stats.pstats.address, + p_hwfn->storm_stats.pstats.len); + + memset(&tstats, 0, sizeof(tstats)); + qed_memcpy_from(p_hwfn, p_ptt, &tstats, + p_hwfn->storm_stats.tstats.address, + p_hwfn->storm_stats.tstats.len); + + memset(&port_stats, 0, sizeof(port_stats)); + + if (p_hwfn->mcp_info) + qed_memcpy_from(p_hwfn, p_ptt, &port_stats, + p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, stats), + sizeof(port_stats)); + qed_ptt_release(p_hwfn, p_ptt); + + stats->no_buff_discards += + HILO_64_REGPAIR(mstats.no_buff_discard); + stats->packet_too_big_discard += + HILO_64_REGPAIR(mstats.packet_too_big_discard); + stats->ttl0_discard += + HILO_64_REGPAIR(mstats.ttl0_discard); + stats->tpa_coalesced_pkts += + HILO_64_REGPAIR(mstats.tpa_coalesced_pkts); + stats->tpa_coalesced_events += + HILO_64_REGPAIR(mstats.tpa_coalesced_events); + stats->tpa_aborts_num += + HILO_64_REGPAIR(mstats.tpa_aborts_num); + stats->tpa_coalesced_bytes += + HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); + + stats->rx_ucast_bytes += + HILO_64_REGPAIR(ustats.rcv_ucast_bytes); + stats->rx_mcast_bytes += + HILO_64_REGPAIR(ustats.rcv_mcast_bytes); + stats->rx_bcast_bytes += + HILO_64_REGPAIR(ustats.rcv_bcast_bytes); + stats->rx_ucast_pkts += + HILO_64_REGPAIR(ustats.rcv_ucast_pkts); + stats->rx_mcast_pkts += + HILO_64_REGPAIR(ustats.rcv_mcast_pkts); + stats->rx_bcast_pkts += + HILO_64_REGPAIR(ustats.rcv_bcast_pkts); + + stats->mftag_filter_discards += + HILO_64_REGPAIR(tstats.mftag_filter_discard); + stats->mac_filter_discards += + HILO_64_REGPAIR(tstats.eth_mac_filter_discard); + + stats->tx_ucast_bytes += + HILO_64_REGPAIR(pstats.sent_ucast_bytes); + stats->tx_mcast_bytes += + HILO_64_REGPAIR(pstats.sent_mcast_bytes); + stats->tx_bcast_bytes += + HILO_64_REGPAIR(pstats.sent_bcast_bytes); + stats->tx_ucast_pkts += + HILO_64_REGPAIR(pstats.sent_ucast_pkts); + stats->tx_mcast_pkts += + HILO_64_REGPAIR(pstats.sent_mcast_pkts); + stats->tx_bcast_pkts += + HILO_64_REGPAIR(pstats.sent_bcast_pkts); + stats->tx_err_drop_pkts += + HILO_64_REGPAIR(pstats.error_drop_pkts); + stats->rx_64_byte_packets += port_stats.pmm.r64; + stats->rx_127_byte_packets += port_stats.pmm.r127; + stats->rx_255_byte_packets += port_stats.pmm.r255; + stats->rx_511_byte_packets += port_stats.pmm.r511; + stats->rx_1023_byte_packets += port_stats.pmm.r1023; + stats->rx_1518_byte_packets += port_stats.pmm.r1518; + stats->rx_1522_byte_packets += port_stats.pmm.r1522; + stats->rx_2047_byte_packets += port_stats.pmm.r2047; + stats->rx_4095_byte_packets += port_stats.pmm.r4095; + stats->rx_9216_byte_packets += port_stats.pmm.r9216; + stats->rx_16383_byte_packets += port_stats.pmm.r16383; + stats->rx_crc_errors += port_stats.pmm.rfcs; + stats->rx_mac_crtl_frames += port_stats.pmm.rxcf; + stats->rx_pause_frames += port_stats.pmm.rxpf; + stats->rx_pfc_frames += port_stats.pmm.rxpp; + stats->rx_align_errors += port_stats.pmm.raln; + stats->rx_carrier_errors += port_stats.pmm.rfcr; + stats->rx_oversize_packets += port_stats.pmm.rovr; + stats->rx_jabbers += port_stats.pmm.rjbr; + stats->rx_undersize_packets += port_stats.pmm.rund; + stats->rx_fragments += port_stats.pmm.rfrg; + stats->tx_64_byte_packets += port_stats.pmm.t64; + stats->tx_65_to_127_byte_packets += port_stats.pmm.t127; + stats->tx_128_to_255_byte_packets += port_stats.pmm.t255; + stats->tx_256_to_511_byte_packets += port_stats.pmm.t511; + stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023; + stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518; + stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047; + stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095; + stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216; + stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383; + stats->tx_pause_frames += port_stats.pmm.txpf; + stats->tx_pfc_frames += port_stats.pmm.txpp; + stats->tx_lpi_entry_count += port_stats.pmm.tlpiec; + stats->tx_total_collisions += port_stats.pmm.tncl; + stats->rx_mac_bytes += port_stats.pmm.rbyte; + stats->rx_mac_uc_packets += port_stats.pmm.rxuca; + stats->rx_mac_mc_packets += port_stats.pmm.rxmca; + stats->rx_mac_bc_packets += port_stats.pmm.rxbca; + stats->rx_mac_frames_ok += port_stats.pmm.rxpok; + stats->tx_mac_bytes += port_stats.pmm.tbyte; + stats->tx_mac_uc_packets += port_stats.pmm.txuca; + stats->tx_mac_mc_packets += port_stats.pmm.txmca; + stats->tx_mac_bc_packets += port_stats.pmm.txbca; + stats->tx_mac_ctrl_frames += port_stats.pmm.txcf; + + for (j = 0; j < 8; j++) { + stats->brb_truncates += port_stats.brb.brb_truncate[j]; + stats->brb_discards += port_stats.brb.brb_discard[j]; + } + } +} + +void qed_get_vport_stats(struct qed_dev *cdev, + struct qed_eth_stats *stats) +{ + u32 i; + + if (!cdev) { + memset(stats, 0, sizeof(*stats)); + return; + } + + __qed_get_vport_stats(cdev, stats); + + if (!cdev->reset_stats) + return; + + /* Reduce the statistics baseline */ + for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++) + ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i]; +} + +/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */ +void qed_reset_vport_stats(struct qed_dev *cdev) +{ + int i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + struct eth_mstorm_per_queue_stat mstats; + struct eth_ustorm_per_queue_stat ustats; + struct eth_pstorm_per_queue_stat pstats; + struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); + + if (!p_ptt) { + DP_ERR(p_hwfn, "Failed to acquire ptt\n"); + continue; + } + + memset(&mstats, 0, sizeof(mstats)); + qed_memcpy_to(p_hwfn, p_ptt, + p_hwfn->storm_stats.mstats.address, + &mstats, + p_hwfn->storm_stats.mstats.len); + + memset(&ustats, 0, sizeof(ustats)); + qed_memcpy_to(p_hwfn, p_ptt, + p_hwfn->storm_stats.ustats.address, + &ustats, + p_hwfn->storm_stats.ustats.len); + + memset(&pstats, 0, sizeof(pstats)); + qed_memcpy_to(p_hwfn, p_ptt, + p_hwfn->storm_stats.pstats.address, + &pstats, + p_hwfn->storm_stats.pstats.len); + + qed_ptt_release(p_hwfn, p_ptt); + } + + /* PORT statistics are not necessarily reset, so we need to + * read and create a baseline for future statistics. + */ + if (!cdev->reset_stats) + DP_INFO(cdev, "Reset stats not allocated\n"); + else + __qed_get_vport_stats(cdev, cdev->reset_stats); +} + int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index 773070d04ab8..e29a3ba6c8b0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -156,6 +156,9 @@ struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn); */ void qed_ptt_release(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +void qed_get_vport_stats(struct qed_dev *cdev, + struct qed_eth_stats *stats); +void qed_reset_vport_stats(struct qed_dev *cdev); enum qed_dmae_address_type_t { QED_DMAE_ADDRESS_HOST_VIRT, diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 5909823463ab..b2f8e854dfd1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -355,6 +355,36 @@ struct core_conn_context { struct regpair ustorm_st_padding[2] /* padding */; }; +struct eth_mstorm_per_queue_stat { + struct regpair ttl0_discard; + struct regpair packet_too_big_discard; + struct regpair no_buff_discard; + struct regpair not_active_discard; + struct regpair tpa_coalesced_pkts; + struct regpair tpa_coalesced_events; + struct regpair tpa_aborts_num; + struct regpair tpa_coalesced_bytes; +}; + +struct eth_pstorm_per_queue_stat { + struct regpair sent_ucast_bytes; + struct regpair sent_mcast_bytes; + struct regpair sent_bcast_bytes; + struct regpair sent_ucast_pkts; + struct regpair sent_mcast_pkts; + struct regpair sent_bcast_pkts; + struct regpair error_drop_pkts; +}; + +struct eth_ustorm_per_queue_stat { + struct regpair rcv_ucast_bytes; + struct regpair rcv_mcast_bytes; + struct regpair rcv_bcast_bytes; + struct regpair rcv_ucast_pkts; + struct regpair rcv_mcast_pkts; + struct regpair rcv_bcast_pkts; +}; + /* Event Ring Next Page Address */ struct event_ring_next_addr { struct regpair addr /* Next Page Address */; diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 7049e4139d3c..f72036a2ef5b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -1298,6 +1298,8 @@ static int qed_start_vport(struct qed_dev *cdev, vport_id, mtu); } + qed_reset_vport_stats(cdev); + return 0; } @@ -1680,6 +1682,7 @@ static const struct qed_eth_ops qed_eth_ops_pass = { .filter_config = &qed_configure_filter, .fastpath_stop = &qed_fastpath_stop, .eth_cqe_completion = &qed_fp_cqe_completion, + .get_vport_stats = &qed_get_vport_stats, }; const struct qed_eth_ops *qed_get_eth_ops(u32 version) diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index ab1041424013..81ab178e31c1 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -154,6 +154,9 @@ struct qed_eth_ops { int (*eth_cqe_completion)(struct qed_dev *cdev, u8 rss_id, struct eth_slow_path_rx_cqe *cqe); + + void (*get_vport_stats)(struct qed_dev *cdev, + struct qed_eth_stats *stats); }; const struct qed_eth_ops *qed_get_eth_ops(u32 version); -- cgit v1.2.3-71-gd317