hw_atl_a0.c (27128B)
1// SPDX-License-Identifier: GPL-2.0-only 2/* Atlantic Network Driver 3 * 4 * Copyright (C) 2014-2019 aQuantia Corporation 5 * Copyright (C) 2019-2020 Marvell International Ltd. 6 */ 7 8/* File hw_atl_a0.c: Definition of Atlantic hardware specific functions. */ 9 10#include "../aq_hw.h" 11#include "../aq_hw_utils.h" 12#include "../aq_ring.h" 13#include "../aq_nic.h" 14#include "hw_atl_a0.h" 15#include "hw_atl_utils.h" 16#include "hw_atl_llh.h" 17#include "hw_atl_a0_internal.h" 18 19#define DEFAULT_A0_BOARD_BASIC_CAPABILITIES \ 20 .is_64_dma = true, \ 21 .op64bit = false, \ 22 .msix_irqs = 4U, \ 23 .irq_mask = ~0U, \ 24 .vecs = HW_ATL_A0_RSS_MAX, \ 25 .tcs_max = HW_ATL_A0_TC_MAX, \ 26 .rxd_alignment = 1U, \ 27 .rxd_size = HW_ATL_A0_RXD_SIZE, \ 28 .rxds_max = HW_ATL_A0_MAX_RXD, \ 29 .rxds_min = HW_ATL_A0_MIN_RXD, \ 30 .txd_alignment = 1U, \ 31 .txd_size = HW_ATL_A0_TXD_SIZE, \ 32 .txds_max = HW_ATL_A0_MAX_TXD, \ 33 .txds_min = HW_ATL_A0_MIN_RXD, \ 34 .txhwb_alignment = 4096U, \ 35 .tx_rings = HW_ATL_A0_TX_RINGS, \ 36 .rx_rings = HW_ATL_A0_RX_RINGS, \ 37 .hw_features = NETIF_F_HW_CSUM | \ 38 NETIF_F_RXHASH | \ 39 NETIF_F_RXCSUM | \ 40 NETIF_F_SG | \ 41 NETIF_F_TSO | \ 42 NETIF_F_NTUPLE | \ 43 NETIF_F_HW_VLAN_CTAG_FILTER, \ 44 .hw_priv_flags = IFF_UNICAST_FLT, \ 45 .flow_control = true, \ 46 .mtu = HW_ATL_A0_MTU_JUMBO, \ 47 .mac_regs_count = 88, \ 48 .hw_alive_check_addr = 0x10U 49 50const struct aq_hw_caps_s hw_atl_a0_caps_aqc100 = { 51 DEFAULT_A0_BOARD_BASIC_CAPABILITIES, 52 .media_type = AQ_HW_MEDIA_TYPE_FIBRE, 53 .link_speed_msk = AQ_NIC_RATE_5G | 54 AQ_NIC_RATE_2G5 | 55 AQ_NIC_RATE_1G | 56 AQ_NIC_RATE_100M, 57}; 58 59const struct aq_hw_caps_s hw_atl_a0_caps_aqc107 = { 60 DEFAULT_A0_BOARD_BASIC_CAPABILITIES, 61 .media_type = AQ_HW_MEDIA_TYPE_TP, 62 .link_speed_msk = AQ_NIC_RATE_10G | 63 AQ_NIC_RATE_5G | 64 AQ_NIC_RATE_2G5 | 65 AQ_NIC_RATE_1G | 66 AQ_NIC_RATE_100M, 67}; 68 69const struct aq_hw_caps_s hw_atl_a0_caps_aqc108 = { 70 DEFAULT_A0_BOARD_BASIC_CAPABILITIES, 71 .media_type = AQ_HW_MEDIA_TYPE_TP, 72 .link_speed_msk = AQ_NIC_RATE_5G | 73 AQ_NIC_RATE_2G5 | 74 AQ_NIC_RATE_1G | 75 AQ_NIC_RATE_100M, 76}; 77 78const struct aq_hw_caps_s hw_atl_a0_caps_aqc109 = { 79 DEFAULT_A0_BOARD_BASIC_CAPABILITIES, 80 .media_type = AQ_HW_MEDIA_TYPE_TP, 81 .link_speed_msk = AQ_NIC_RATE_2G5 | 82 AQ_NIC_RATE_1G | 83 AQ_NIC_RATE_100M, 84}; 85 86static int hw_atl_a0_hw_reset(struct aq_hw_s *self) 87{ 88 int err = 0; 89 u32 val; 90 91 hw_atl_glb_glb_reg_res_dis_set(self, 1U); 92 hw_atl_pci_pci_reg_res_dis_set(self, 0U); 93 hw_atl_rx_rx_reg_res_dis_set(self, 0U); 94 hw_atl_tx_tx_reg_res_dis_set(self, 0U); 95 96 HW_ATL_FLUSH(); 97 hw_atl_glb_soft_res_set(self, 1); 98 99 /* check 10 times by 1ms */ 100 err = readx_poll_timeout_atomic(hw_atl_glb_soft_res_get, 101 self, val, val == 0, 102 1000U, 10000U); 103 if (err < 0) 104 goto err_exit; 105 106 hw_atl_itr_irq_reg_res_dis_set(self, 0U); 107 hw_atl_itr_res_irq_set(self, 1U); 108 109 /* check 10 times by 1ms */ 110 err = readx_poll_timeout_atomic(hw_atl_itr_res_irq_get, 111 self, val, val == 0, 112 1000U, 10000U); 113 if (err < 0) 114 goto err_exit; 115 116 self->aq_fw_ops->set_state(self, MPI_RESET); 117 118 err = aq_hw_err_from_flags(self); 119 120err_exit: 121 return err; 122} 123 124static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self) 125{ 126 bool is_rx_flow_control = false; 127 unsigned int i_priority = 0U; 128 u32 buff_size = 0U; 129 u32 tc = 0U; 130 131 /* TPS Descriptor rate init */ 132 hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U); 133 hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA); 134 135 /* TPS VM init */ 136 hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U); 137 138 /* TPS TC credits init */ 139 hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U); 140 hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U); 141 142 hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0U, 0xFFF); 143 hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0U, 0x64); 144 hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0U, 0x50); 145 hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0U, 0x1E); 146 147 /* Tx buf size */ 148 buff_size = HW_ATL_A0_TXBUF_MAX; 149 150 hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc); 151 hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self, 152 (buff_size * 153 (1024 / 32U) * 66U) / 154 100U, tc); 155 hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self, 156 (buff_size * 157 (1024 / 32U) * 50U) / 158 100U, tc); 159 160 /* QoS Rx buf size per TC */ 161 tc = 0; 162 is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->fc.req); 163 buff_size = HW_ATL_A0_RXBUF_MAX; 164 165 hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc); 166 hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self, 167 (buff_size * 168 (1024U / 32U) * 66U) / 169 100U, tc); 170 hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self, 171 (buff_size * 172 (1024U / 32U) * 50U) / 173 100U, tc); 174 hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc); 175 176 /* QoS 802.1p priority -> TC mapping */ 177 for (i_priority = 8U; i_priority--;) 178 hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U); 179 180 return aq_hw_err_from_flags(self); 181} 182 183static int hw_atl_a0_hw_rss_hash_set(struct aq_hw_s *self, 184 struct aq_rss_parameters *rss_params) 185{ 186 struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; 187 unsigned int addr = 0U; 188 unsigned int i = 0U; 189 int err = 0; 190 u32 val; 191 192 for (i = 10, addr = 0U; i--; ++addr) { 193 u32 key_data = cfg->is_rss ? 194 __swab32(rss_params->hash_secret_key[i]) : 0U; 195 hw_atl_rpf_rss_key_wr_data_set(self, key_data); 196 hw_atl_rpf_rss_key_addr_set(self, addr); 197 hw_atl_rpf_rss_key_wr_en_set(self, 1U); 198 err = readx_poll_timeout_atomic(hw_atl_rpf_rss_key_wr_en_get, 199 self, val, val == 0, 200 1000U, 10000U); 201 if (err < 0) 202 goto err_exit; 203 } 204 205 err = aq_hw_err_from_flags(self); 206 207err_exit: 208 return err; 209} 210 211static int hw_atl_a0_hw_rss_set(struct aq_hw_s *self, 212 struct aq_rss_parameters *rss_params) 213{ 214 u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues); 215 u8 *indirection_table = rss_params->indirection_table; 216 u16 bitary[1 + (HW_ATL_A0_RSS_REDIRECTION_MAX * 217 HW_ATL_A0_RSS_REDIRECTION_BITS / 16U)]; 218 int err = 0; 219 u32 i = 0U; 220 u32 val; 221 222 memset(bitary, 0, sizeof(bitary)); 223 224 for (i = HW_ATL_A0_RSS_REDIRECTION_MAX; i--; ) { 225 (*(u32 *)(bitary + ((i * 3U) / 16U))) |= 226 ((indirection_table[i] % num_rss_queues) << 227 ((i * 3U) & 0xFU)); 228 } 229 230 for (i = ARRAY_SIZE(bitary); i--;) { 231 hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]); 232 hw_atl_rpf_rss_redir_tbl_addr_set(self, i); 233 hw_atl_rpf_rss_redir_wr_en_set(self, 1U); 234 err = readx_poll_timeout_atomic(hw_atl_rpf_rss_redir_wr_en_get, 235 self, val, val == 0, 236 1000U, 10000U); 237 if (err < 0) 238 goto err_exit; 239 } 240 241 err = aq_hw_err_from_flags(self); 242 243err_exit: 244 return err; 245} 246 247static int hw_atl_a0_hw_offload_set(struct aq_hw_s *self, 248 struct aq_nic_cfg_s *aq_nic_cfg) 249{ 250 /* TX checksums offloads*/ 251 hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1); 252 hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1); 253 254 /* RX checksums offloads*/ 255 hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1); 256 hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1); 257 258 /* LSO offloads*/ 259 hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); 260 261 return aq_hw_err_from_flags(self); 262} 263 264static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self) 265{ 266 hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U); 267 hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U); 268 hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU); 269 270 /* Tx interrupts */ 271 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U); 272 273 /* misc */ 274 aq_hw_write_reg(self, 0x00007040U, 0x00000000U); 275 hw_atl_tdm_tx_dca_en_set(self, 0U); 276 hw_atl_tdm_tx_dca_mode_set(self, 0U); 277 278 hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U); 279 280 return aq_hw_err_from_flags(self); 281} 282 283static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self) 284{ 285 struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; 286 int i; 287 288 /* Rx TC/RSS number config */ 289 hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U); 290 291 /* Rx flow control */ 292 hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U); 293 294 /* RSS Ring selection */ 295 hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ? 296 0xB3333333U : 0x00000000U); 297 298 /* Multicast filters */ 299 for (i = HW_ATL_A0_MAC_MAX; i--;) { 300 hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i); 301 hw_atl_rpfl2unicast_flr_act_set(self, 1U, i); 302 } 303 304 hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U); 305 hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U); 306 307 /* Vlan filters */ 308 hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U); 309 hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U); 310 hw_atl_rpf_vlan_prom_mode_en_set(self, 1); 311 312 /* Rx Interrupts */ 313 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U); 314 315 /* misc */ 316 hw_atl_rpfl2broadcast_flr_act_set(self, 1U); 317 hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U)); 318 319 hw_atl_rdm_rx_dca_en_set(self, 0U); 320 hw_atl_rdm_rx_dca_mode_set(self, 0U); 321 322 return aq_hw_err_from_flags(self); 323} 324 325static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, const u8 *mac_addr) 326{ 327 unsigned int h = 0U; 328 unsigned int l = 0U; 329 int err = 0; 330 331 if (!mac_addr) { 332 err = -EINVAL; 333 goto err_exit; 334 } 335 336 h = (mac_addr[0] << 8) | (mac_addr[1]); 337 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 338 (mac_addr[4] << 8) | mac_addr[5]; 339 340 hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC); 341 hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC); 342 hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_A0_MAC); 343 hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_A0_MAC); 344 345 err = aq_hw_err_from_flags(self); 346 347err_exit: 348 return err; 349} 350 351static int hw_atl_a0_hw_init(struct aq_hw_s *self, const u8 *mac_addr) 352{ 353 static u32 aq_hw_atl_igcr_table_[4][2] = { 354 [AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U }, 355 [AQ_HW_IRQ_LEGACY] = { 0x20000080U, 0x20000080U }, 356 [AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U }, 357 [AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U }, 358 }; 359 struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg; 360 int err = 0; 361 362 hw_atl_a0_hw_init_tx_path(self); 363 hw_atl_a0_hw_init_rx_path(self); 364 365 hw_atl_a0_hw_mac_addr_set(self, mac_addr); 366 367 self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk); 368 self->aq_fw_ops->set_state(self, MPI_INIT); 369 370 hw_atl_reg_tx_dma_debug_ctl_set(self, 0x800000b8U); 371 hw_atl_reg_tx_dma_debug_ctl_set(self, 0x000000b8U); 372 373 hw_atl_a0_hw_qos_set(self); 374 hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss); 375 hw_atl_a0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); 376 377 /* Reset link status and read out initial hardware counters */ 378 self->aq_link_status.mbps = 0; 379 self->aq_fw_ops->update_stats(self); 380 381 err = aq_hw_err_from_flags(self); 382 if (err < 0) 383 goto err_exit; 384 385 /* Interrupts */ 386 hw_atl_reg_irq_glb_ctl_set(self, 387 aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type] 388 [(aq_nic_cfg->vecs > 1U) ? 1 : 0]); 389 390 hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask); 391 392 /* Interrupts */ 393 hw_atl_reg_gen_irq_map_set(self, 394 ((HW_ATL_A0_ERR_INT << 0x18) | (1U << 0x1F)) | 395 ((HW_ATL_A0_ERR_INT << 0x10) | (1U << 0x17)) | 396 ((HW_ATL_A0_ERR_INT << 8) | (1U << 0xF)) | 397 ((HW_ATL_A0_ERR_INT) | (1U << 0x7)), 0U); 398 399 hw_atl_a0_hw_offload_set(self, aq_nic_cfg); 400 401err_exit: 402 return err; 403} 404 405static int hw_atl_a0_hw_ring_tx_start(struct aq_hw_s *self, 406 struct aq_ring_s *ring) 407{ 408 hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx); 409 410 return aq_hw_err_from_flags(self); 411} 412 413static int hw_atl_a0_hw_ring_rx_start(struct aq_hw_s *self, 414 struct aq_ring_s *ring) 415{ 416 hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx); 417 418 return aq_hw_err_from_flags(self); 419} 420 421static int hw_atl_a0_hw_start(struct aq_hw_s *self) 422{ 423 hw_atl_tpb_tx_buff_en_set(self, 1); 424 hw_atl_rpb_rx_buff_en_set(self, 1); 425 426 return aq_hw_err_from_flags(self); 427} 428 429static int hw_atl_a0_hw_tx_ring_tail_update(struct aq_hw_s *self, 430 struct aq_ring_s *ring) 431{ 432 hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx); 433 434 return 0; 435} 436 437static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self, 438 struct aq_ring_s *ring, 439 unsigned int frags) 440{ 441 struct aq_ring_buff_s *buff = NULL; 442 struct hw_atl_txd_s *txd = NULL; 443 unsigned int buff_pa_len = 0U; 444 unsigned int frag_count = 0U; 445 unsigned int pkt_len = 0U; 446 bool is_gso = false; 447 448 buff = &ring->buff_ring[ring->sw_tail]; 449 pkt_len = (buff->is_eop && buff->is_sop) ? buff->len : buff->len_pkt; 450 451 for (frag_count = 0; frag_count < frags; frag_count++) { 452 txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail * 453 HW_ATL_A0_TXD_SIZE]; 454 txd->ctl = 0; 455 txd->ctl2 = 0; 456 txd->buf_addr = 0; 457 458 buff = &ring->buff_ring[ring->sw_tail]; 459 460 if (buff->is_gso_tcp) { 461 txd->ctl |= (buff->len_l3 << 31) | 462 (buff->len_l2 << 24) | 463 HW_ATL_A0_TXD_CTL_CMD_TCP | 464 HW_ATL_A0_TXD_CTL_DESC_TYPE_TXC; 465 txd->ctl2 |= (buff->mss << 16) | 466 (buff->len_l4 << 8) | 467 (buff->len_l3 >> 1); 468 469 pkt_len -= (buff->len_l4 + 470 buff->len_l3 + 471 buff->len_l2); 472 is_gso = true; 473 474 if (buff->is_ipv6) 475 txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_IPV6; 476 } else { 477 buff_pa_len = buff->len; 478 479 txd->buf_addr = buff->pa; 480 txd->ctl |= (HW_ATL_A0_TXD_CTL_BLEN & 481 ((u32)buff_pa_len << 4)); 482 txd->ctl |= HW_ATL_A0_TXD_CTL_DESC_TYPE_TXD; 483 /* PAY_LEN */ 484 txd->ctl2 |= HW_ATL_A0_TXD_CTL2_LEN & (pkt_len << 14); 485 486 if (is_gso) { 487 txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_LSO; 488 txd->ctl2 |= HW_ATL_A0_TXD_CTL2_CTX_EN; 489 } 490 491 /* Tx checksum offloads */ 492 if (buff->is_ip_cso) 493 txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_IPCSO; 494 495 if (buff->is_udp_cso || buff->is_tcp_cso) 496 txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_TUCSO; 497 498 if (unlikely(buff->is_eop)) { 499 txd->ctl |= HW_ATL_A0_TXD_CTL_EOP; 500 txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_WB; 501 is_gso = false; 502 } 503 } 504 505 ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail); 506 } 507 508 hw_atl_a0_hw_tx_ring_tail_update(self, ring); 509 510 return aq_hw_err_from_flags(self); 511} 512 513static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self, 514 struct aq_ring_s *aq_ring, 515 struct aq_ring_param_s *aq_ring_param) 516{ 517 u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32); 518 u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa; 519 520 hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx); 521 522 hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); 523 524 hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw, 525 aq_ring->idx); 526 527 hw_atl_reg_rx_dma_desc_base_addressmswset(self, 528 dma_desc_addr_msw, 529 aq_ring->idx); 530 531 hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); 532 533 hw_atl_rdm_rx_desc_data_buff_size_set(self, 534 aq_ring->frame_max / 1024U, 535 aq_ring->idx); 536 537 hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx); 538 hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); 539 hw_atl_rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx); 540 541 /* Rx ring set mode */ 542 543 /* Mapping interrupt vector */ 544 hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx); 545 hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx); 546 547 hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx); 548 hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx); 549 hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx); 550 hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx); 551 552 return aq_hw_err_from_flags(self); 553} 554 555static int hw_atl_a0_hw_ring_tx_init(struct aq_hw_s *self, 556 struct aq_ring_s *aq_ring, 557 struct aq_ring_param_s *aq_ring_param) 558{ 559 u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32); 560 u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa; 561 562 hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr, 563 aq_ring->idx); 564 565 hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr, 566 aq_ring->idx); 567 568 hw_atl_tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); 569 570 hw_atl_a0_hw_tx_ring_tail_update(self, aq_ring); 571 572 /* Set Tx threshold */ 573 hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx); 574 575 /* Mapping interrupt vector */ 576 hw_atl_itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx); 577 hw_atl_itr_irq_map_en_tx_set(self, true, aq_ring->idx); 578 579 hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx); 580 hw_atl_tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx); 581 582 return aq_hw_err_from_flags(self); 583} 584 585static int hw_atl_a0_hw_ring_rx_fill(struct aq_hw_s *self, 586 struct aq_ring_s *ring, 587 unsigned int sw_tail_old) 588{ 589 for (; sw_tail_old != ring->sw_tail; 590 sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) { 591 struct hw_atl_rxd_s *rxd = 592 (struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old * 593 HW_ATL_A0_RXD_SIZE]; 594 595 struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old]; 596 597 rxd->buf_addr = buff->pa; 598 rxd->hdr_addr = 0U; 599 } 600 601 hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx); 602 603 return aq_hw_err_from_flags(self); 604} 605 606static int hw_atl_a0_hw_ring_tx_head_update(struct aq_hw_s *self, 607 struct aq_ring_s *ring) 608{ 609 unsigned int hw_head = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx); 610 int err = 0; 611 612 if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) { 613 err = -ENXIO; 614 goto err_exit; 615 } 616 ring->hw_head = hw_head; 617 err = aq_hw_err_from_flags(self); 618 619err_exit: 620 return err; 621} 622 623static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self, 624 struct aq_ring_s *ring) 625{ 626 for (; ring->hw_head != ring->sw_tail; 627 ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) { 628 struct aq_ring_buff_s *buff = NULL; 629 struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *) 630 &ring->dx_ring[ring->hw_head * HW_ATL_A0_RXD_SIZE]; 631 632 unsigned int is_err = 1U; 633 unsigned int is_rx_check_sum_enabled = 0U; 634 unsigned int pkt_type = 0U; 635 636 if (!(rxd_wb->status & 0x5U)) { /* RxD is not done */ 637 if ((1U << 4) & 638 hw_atl_reg_rx_dma_desc_status_get(self, ring->idx)) { 639 hw_atl_rdm_rx_desc_en_set(self, false, ring->idx); 640 hw_atl_rdm_rx_desc_res_set(self, true, ring->idx); 641 hw_atl_rdm_rx_desc_res_set(self, false, ring->idx); 642 hw_atl_rdm_rx_desc_en_set(self, true, ring->idx); 643 } 644 645 if (ring->hw_head || 646 (hw_atl_rdm_rx_desc_head_ptr_get(self, 647 ring->idx) < 2U)) { 648 break; 649 } else if (!(rxd_wb->status & 0x1U)) { 650 struct hw_atl_rxd_wb_s *rxd_wb1 = 651 (struct hw_atl_rxd_wb_s *) 652 (&ring->dx_ring[(1U) * 653 HW_ATL_A0_RXD_SIZE]); 654 655 if ((rxd_wb1->status & 0x1U)) { 656 rxd_wb->pkt_len = 1514U; 657 rxd_wb->status = 3U; 658 } else { 659 break; 660 } 661 } 662 } 663 664 buff = &ring->buff_ring[ring->hw_head]; 665 666 if (0x3U != (rxd_wb->status & 0x3U)) 667 rxd_wb->status |= 4; 668 669 is_err = (0x0000001CU & rxd_wb->status); 670 is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19); 671 pkt_type = 0xFFU & (rxd_wb->type >> 4); 672 673 if (is_rx_check_sum_enabled) { 674 if (0x0U == (pkt_type & 0x3U)) 675 buff->is_ip_cso = (is_err & 0x08U) ? 0 : 1; 676 677 if (0x4U == (pkt_type & 0x1CU)) 678 buff->is_udp_cso = (is_err & 0x10U) ? 0 : 1; 679 else if (0x0U == (pkt_type & 0x1CU)) 680 buff->is_tcp_cso = (is_err & 0x10U) ? 0 : 1; 681 682 /* Checksum offload workaround for small packets */ 683 if (rxd_wb->pkt_len <= 60) { 684 buff->is_ip_cso = 0U; 685 buff->is_cso_err = 0U; 686 } 687 } 688 689 is_err &= ~0x18U; 690 is_err &= ~0x04U; 691 692 if (is_err || rxd_wb->type & 0x1000U) { 693 /* status error or DMA error */ 694 buff->is_error = 1U; 695 } else { 696 if (self->aq_nic_cfg->is_rss) { 697 /* last 4 byte */ 698 u16 rss_type = rxd_wb->type & 0xFU; 699 700 if (rss_type && rss_type < 0x8U) { 701 buff->is_hash_l4 = (rss_type == 0x4 || 702 rss_type == 0x5); 703 buff->rss_hash = rxd_wb->rss_hash; 704 } 705 } 706 707 if (HW_ATL_A0_RXD_WB_STAT2_EOP & rxd_wb->status) { 708 buff->len = rxd_wb->pkt_len % 709 ring->frame_max; 710 buff->len = buff->len ? 711 buff->len : ring->frame_max; 712 buff->next = 0U; 713 buff->is_eop = 1U; 714 } else { 715 /* jumbo */ 716 buff->next = aq_ring_next_dx(ring, 717 ring->hw_head); 718 ++ring->stats.rx.jumbo_packets; 719 } 720 } 721 } 722 723 return aq_hw_err_from_flags(self); 724} 725 726static int hw_atl_a0_hw_irq_enable(struct aq_hw_s *self, u64 mask) 727{ 728 hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask) | 729 (1U << HW_ATL_A0_ERR_INT)); 730 731 return aq_hw_err_from_flags(self); 732} 733 734static int hw_atl_a0_hw_irq_disable(struct aq_hw_s *self, u64 mask) 735{ 736 hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask)); 737 hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask)); 738 739 if ((1U << 16) & hw_atl_reg_gen_irq_status_get(self)) 740 atomic_inc(&self->dpc); 741 742 return aq_hw_err_from_flags(self); 743} 744 745static int hw_atl_a0_hw_irq_read(struct aq_hw_s *self, u64 *mask) 746{ 747 *mask = hw_atl_itr_irq_statuslsw_get(self); 748 749 return aq_hw_err_from_flags(self); 750} 751 752#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U) 753 754static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self, 755 unsigned int packet_filter) 756{ 757 struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; 758 unsigned int i = 0U; 759 760 hw_atl_rpfl2promiscuous_mode_en_set(self, 761 IS_FILTER_ENABLED(IFF_PROMISC)); 762 hw_atl_rpfl2multicast_flr_en_set(self, 763 IS_FILTER_ENABLED(IFF_MULTICAST), 0); 764 hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST)); 765 766 cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST); 767 768 for (i = HW_ATL_A0_MAC_MIN; i < HW_ATL_A0_MAC_MAX; ++i) 769 hw_atl_rpfl2_uc_flr_en_set(self, 770 (cfg->is_mc_list_enabled && 771 (i <= cfg->mc_list_count)) ? 1U : 0U, 772 i); 773 774 return aq_hw_err_from_flags(self); 775} 776 777#undef IS_FILTER_ENABLED 778 779static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self, 780 u8 ar_mac 781 [AQ_HW_MULTICAST_ADDRESS_MAX] 782 [ETH_ALEN], 783 u32 count) 784{ 785 struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; 786 int err = 0; 787 788 if (count > (HW_ATL_A0_MAC_MAX - HW_ATL_A0_MAC_MIN)) { 789 err = -EBADRQC; 790 goto err_exit; 791 } 792 for (cfg->mc_list_count = 0U; cfg->mc_list_count < count; ++cfg->mc_list_count) { 793 u32 i = cfg->mc_list_count; 794 u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]); 795 u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) | 796 (ar_mac[i][4] << 8) | ar_mac[i][5]; 797 798 hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC_MIN + i); 799 800 hw_atl_rpfl2unicast_dest_addresslsw_set(self, 801 l, 802 HW_ATL_A0_MAC_MIN + i); 803 804 hw_atl_rpfl2unicast_dest_addressmsw_set(self, 805 h, 806 HW_ATL_A0_MAC_MIN + i); 807 808 hw_atl_rpfl2_uc_flr_en_set(self, 809 (cfg->is_mc_list_enabled), 810 HW_ATL_A0_MAC_MIN + i); 811 } 812 813 err = aq_hw_err_from_flags(self); 814 815err_exit: 816 return err; 817} 818 819static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self) 820{ 821 unsigned int i = 0U; 822 u32 itr_rx; 823 824 if (self->aq_nic_cfg->itr) { 825 if (self->aq_nic_cfg->itr != AQ_CFG_INTERRUPT_MODERATION_AUTO) { 826 u32 itr_ = (self->aq_nic_cfg->itr >> 1); 827 828 itr_ = min(AQ_CFG_IRQ_MASK, itr_); 829 830 itr_rx = 0x80000000U | (itr_ << 0x10); 831 } else { 832 u32 n = 0xFFFFU & aq_hw_read_reg(self, 0x00002A00U); 833 834 if (n < self->aq_link_status.mbps) { 835 itr_rx = 0U; 836 } else { 837 static unsigned int hw_timers_tbl_[] = { 838 0x01CU, /* 10Gbit */ 839 0x039U, /* 5Gbit */ 840 0x039U, /* 5Gbit 5GS */ 841 0x073U, /* 2.5Gbit */ 842 0x120U, /* 1Gbit */ 843 0x1FFU, /* 100Mbit */ 844 }; 845 846 unsigned int speed_index = 847 hw_atl_utils_mbps_2_speed_index( 848 self->aq_link_status.mbps); 849 850 itr_rx = 0x80000000U | 851 (hw_timers_tbl_[speed_index] << 0x10U); 852 } 853 854 aq_hw_write_reg(self, 0x00002A00U, 0x40000000U); 855 aq_hw_write_reg(self, 0x00002A00U, 0x8D000000U); 856 } 857 } else { 858 itr_rx = 0U; 859 } 860 861 for (i = HW_ATL_A0_RINGS_MAX; i--;) 862 hw_atl_reg_irq_thr_set(self, itr_rx, i); 863 864 return aq_hw_err_from_flags(self); 865} 866 867static int hw_atl_a0_hw_stop(struct aq_hw_s *self) 868{ 869 hw_atl_a0_hw_irq_disable(self, HW_ATL_A0_INT_MASK); 870 871 return aq_hw_err_from_flags(self); 872} 873 874static int hw_atl_a0_hw_ring_tx_stop(struct aq_hw_s *self, 875 struct aq_ring_s *ring) 876{ 877 hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx); 878 879 return aq_hw_err_from_flags(self); 880} 881 882static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self, 883 struct aq_ring_s *ring) 884{ 885 hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx); 886 887 return aq_hw_err_from_flags(self); 888} 889 890static int hw_atl_a0_hw_fl3l4_clear(struct aq_hw_s *self, 891 struct aq_rx_filter_l3l4 *data) 892{ 893 u8 location = data->location; 894 895 if (!data->is_ipv6) { 896 hw_atl_rpfl3l4_cmd_clear(self, location); 897 hw_atl_rpf_l4_spd_set(self, 0U, location); 898 hw_atl_rpf_l4_dpd_set(self, 0U, location); 899 hw_atl_rpfl3l4_ipv4_src_addr_clear(self, location); 900 hw_atl_rpfl3l4_ipv4_dest_addr_clear(self, location); 901 } else { 902 int i; 903 904 for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) { 905 hw_atl_rpfl3l4_cmd_clear(self, location + i); 906 hw_atl_rpf_l4_spd_set(self, 0U, location + i); 907 hw_atl_rpf_l4_dpd_set(self, 0U, location + i); 908 } 909 hw_atl_rpfl3l4_ipv6_src_addr_clear(self, location); 910 hw_atl_rpfl3l4_ipv6_dest_addr_clear(self, location); 911 } 912 913 return aq_hw_err_from_flags(self); 914} 915 916static int hw_atl_a0_hw_fl3l4_set(struct aq_hw_s *self, 917 struct aq_rx_filter_l3l4 *data) 918{ 919 u8 location = data->location; 920 921 hw_atl_a0_hw_fl3l4_clear(self, data); 922 923 if (data->cmd) { 924 if (!data->is_ipv6) { 925 hw_atl_rpfl3l4_ipv4_dest_addr_set(self, 926 location, 927 data->ip_dst[0]); 928 hw_atl_rpfl3l4_ipv4_src_addr_set(self, 929 location, 930 data->ip_src[0]); 931 } else { 932 hw_atl_rpfl3l4_ipv6_dest_addr_set(self, 933 location, 934 data->ip_dst); 935 hw_atl_rpfl3l4_ipv6_src_addr_set(self, 936 location, 937 data->ip_src); 938 } 939 } 940 hw_atl_rpf_l4_dpd_set(self, data->p_dst, location); 941 hw_atl_rpf_l4_spd_set(self, data->p_src, location); 942 hw_atl_rpfl3l4_cmd_set(self, location, data->cmd); 943 944 return aq_hw_err_from_flags(self); 945} 946 947const struct aq_hw_ops hw_atl_ops_a0 = { 948 .hw_soft_reset = hw_atl_utils_soft_reset, 949 .hw_prepare = hw_atl_utils_initfw, 950 .hw_set_mac_address = hw_atl_a0_hw_mac_addr_set, 951 .hw_init = hw_atl_a0_hw_init, 952 .hw_reset = hw_atl_a0_hw_reset, 953 .hw_start = hw_atl_a0_hw_start, 954 .hw_ring_tx_start = hw_atl_a0_hw_ring_tx_start, 955 .hw_ring_tx_stop = hw_atl_a0_hw_ring_tx_stop, 956 .hw_ring_rx_start = hw_atl_a0_hw_ring_rx_start, 957 .hw_ring_rx_stop = hw_atl_a0_hw_ring_rx_stop, 958 .hw_stop = hw_atl_a0_hw_stop, 959 960 .hw_ring_tx_xmit = hw_atl_a0_hw_ring_tx_xmit, 961 .hw_ring_tx_head_update = hw_atl_a0_hw_ring_tx_head_update, 962 963 .hw_ring_rx_receive = hw_atl_a0_hw_ring_rx_receive, 964 .hw_ring_rx_fill = hw_atl_a0_hw_ring_rx_fill, 965 966 .hw_irq_enable = hw_atl_a0_hw_irq_enable, 967 .hw_irq_disable = hw_atl_a0_hw_irq_disable, 968 .hw_irq_read = hw_atl_a0_hw_irq_read, 969 970 .hw_ring_rx_init = hw_atl_a0_hw_ring_rx_init, 971 .hw_ring_tx_init = hw_atl_a0_hw_ring_tx_init, 972 .hw_packet_filter_set = hw_atl_a0_hw_packet_filter_set, 973 .hw_filter_l3l4_set = hw_atl_a0_hw_fl3l4_set, 974 .hw_multicast_list_set = hw_atl_a0_hw_multicast_list_set, 975 .hw_interrupt_moderation_set = hw_atl_a0_hw_interrupt_moderation_set, 976 .hw_rss_set = hw_atl_a0_hw_rss_set, 977 .hw_rss_hash_set = hw_atl_a0_hw_rss_hash_set, 978 .hw_get_regs = hw_atl_utils_hw_get_regs, 979 .hw_get_hw_stats = hw_atl_utils_get_hw_stats, 980 .hw_get_fw_version = hw_atl_utils_get_fw_version, 981};