hw_atl_b0.c (48342B)
1// SPDX-License-Identifier: GPL-2.0-only 2/* Atlantic Network Driver 3 * 4 * Copyright (C) 2014-2019 aQuantia Corporation 5 * Copyright (C) 2019-2020 Marvell International Ltd. 6 */ 7 8/* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */ 9 10#include "../aq_hw.h" 11#include "../aq_hw_utils.h" 12#include "../aq_ring.h" 13#include "../aq_nic.h" 14#include "../aq_phy.h" 15#include "hw_atl_b0.h" 16#include "hw_atl_utils.h" 17#include "hw_atl_llh.h" 18#include "hw_atl_b0_internal.h" 19#include "hw_atl_llh_internal.h" 20 21#define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \ 22 .is_64_dma = true, \ 23 .op64bit = false, \ 24 .msix_irqs = 8U, \ 25 .irq_mask = ~0U, \ 26 .vecs = HW_ATL_B0_RSS_MAX, \ 27 .tcs_max = HW_ATL_B0_TC_MAX, \ 28 .rxd_alignment = 1U, \ 29 .rxd_size = HW_ATL_B0_RXD_SIZE, \ 30 .rxds_max = HW_ATL_B0_MAX_RXD, \ 31 .rxds_min = HW_ATL_B0_MIN_RXD, \ 32 .txd_alignment = 1U, \ 33 .txd_size = HW_ATL_B0_TXD_SIZE, \ 34 .txds_max = HW_ATL_B0_MAX_TXD, \ 35 .txds_min = HW_ATL_B0_MIN_TXD, \ 36 .txhwb_alignment = 4096U, \ 37 .tx_rings = HW_ATL_B0_TX_RINGS, \ 38 .rx_rings = HW_ATL_B0_RX_RINGS, \ 39 .hw_features = NETIF_F_HW_CSUM | \ 40 NETIF_F_RXCSUM | \ 41 NETIF_F_RXHASH | \ 42 NETIF_F_SG | \ 43 NETIF_F_TSO | \ 44 NETIF_F_TSO6 | \ 45 NETIF_F_LRO | \ 46 NETIF_F_NTUPLE | \ 47 NETIF_F_HW_VLAN_CTAG_FILTER | \ 48 NETIF_F_HW_VLAN_CTAG_RX | \ 49 NETIF_F_HW_VLAN_CTAG_TX | \ 50 NETIF_F_GSO_UDP_L4 | \ 51 NETIF_F_GSO_PARTIAL | \ 52 NETIF_F_HW_TC, \ 53 .hw_priv_flags = IFF_UNICAST_FLT, \ 54 .flow_control = true, \ 55 .mtu = HW_ATL_B0_MTU_JUMBO, \ 56 .mac_regs_count = 88, \ 57 .hw_alive_check_addr = 0x10U 58 59const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = { 60 DEFAULT_B0_BOARD_BASIC_CAPABILITIES, 61 .media_type = AQ_HW_MEDIA_TYPE_FIBRE, 62 .link_speed_msk = AQ_NIC_RATE_10G | 63 AQ_NIC_RATE_5G | 64 AQ_NIC_RATE_2G5 | 65 AQ_NIC_RATE_1G | 66 AQ_NIC_RATE_100M, 67}; 68 69const struct aq_hw_caps_s hw_atl_b0_caps_aqc107 = { 70 DEFAULT_B0_BOARD_BASIC_CAPABILITIES, 71 .media_type = AQ_HW_MEDIA_TYPE_TP, 72 .link_speed_msk = AQ_NIC_RATE_10G | 73 AQ_NIC_RATE_5G | 74 AQ_NIC_RATE_2G5 | 75 AQ_NIC_RATE_1G | 76 AQ_NIC_RATE_100M, 77}; 78 79const struct aq_hw_caps_s hw_atl_b0_caps_aqc108 = { 80 DEFAULT_B0_BOARD_BASIC_CAPABILITIES, 81 .media_type = AQ_HW_MEDIA_TYPE_TP, 82 .link_speed_msk = AQ_NIC_RATE_5G | 83 AQ_NIC_RATE_2G5 | 84 AQ_NIC_RATE_1G | 85 AQ_NIC_RATE_100M, 86}; 87 88const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = { 89 DEFAULT_B0_BOARD_BASIC_CAPABILITIES, 90 .media_type = AQ_HW_MEDIA_TYPE_TP, 91 .link_speed_msk = AQ_NIC_RATE_2G5 | 92 AQ_NIC_RATE_1G | 93 AQ_NIC_RATE_100M, 94}; 95 96const struct aq_hw_caps_s hw_atl_b0_caps_aqc111 = { 97 DEFAULT_B0_BOARD_BASIC_CAPABILITIES, 98 .media_type = AQ_HW_MEDIA_TYPE_TP, 99 .link_speed_msk = AQ_NIC_RATE_5G | 100 AQ_NIC_RATE_2G5 | 101 AQ_NIC_RATE_1G | 102 AQ_NIC_RATE_100M, 103 .quirks = AQ_NIC_QUIRK_BAD_PTP, 104}; 105 106const struct aq_hw_caps_s hw_atl_b0_caps_aqc112 = { 107 DEFAULT_B0_BOARD_BASIC_CAPABILITIES, 108 .media_type = AQ_HW_MEDIA_TYPE_TP, 109 .link_speed_msk = AQ_NIC_RATE_2G5 | 110 AQ_NIC_RATE_1G | 111 AQ_NIC_RATE_100M, 112 .quirks = AQ_NIC_QUIRK_BAD_PTP, 113}; 114 115static int hw_atl_b0_hw_reset(struct aq_hw_s *self) 116{ 117 int err = 0; 118 119 err = hw_atl_utils_soft_reset(self); 120 if (err) 121 return err; 122 123 self->aq_fw_ops->set_state(self, MPI_RESET); 124 125 err = aq_hw_err_from_flags(self); 126 127 return err; 128} 129 130int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc) 131{ 132 hw_atl_rpb_rx_xoff_en_per_tc_set(self, !!(fc & AQ_NIC_FC_RX), tc); 133 134 return 0; 135} 136 137static int hw_atl_b0_tc_ptp_set(struct aq_hw_s *self) 138{ 139 /* Init TC2 for PTP_TX */ 140 hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_TXBUF_SIZE, 141 AQ_HW_PTP_TC); 142 143 /* Init TC2 for PTP_RX */ 144 hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_RXBUF_SIZE, 145 AQ_HW_PTP_TC); 146 /* No flow control for PTP */ 147 hw_atl_rpb_rx_xoff_en_per_tc_set(self, 0U, AQ_HW_PTP_TC); 148 149 return aq_hw_err_from_flags(self); 150} 151 152static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self) 153{ 154 struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; 155 u32 tx_buff_size = HW_ATL_B0_TXBUF_MAX; 156 u32 rx_buff_size = HW_ATL_B0_RXBUF_MAX; 157 unsigned int prio = 0U; 158 u32 tc = 0U; 159 160 if (cfg->is_ptp) { 161 tx_buff_size -= HW_ATL_B0_PTP_TXBUF_SIZE; 162 rx_buff_size -= HW_ATL_B0_PTP_RXBUF_SIZE; 163 } 164 165 /* TPS Descriptor rate init */ 166 hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U); 167 hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA); 168 169 /* TPS VM init */ 170 hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U); 171 172 tx_buff_size /= cfg->tcs; 173 rx_buff_size /= cfg->tcs; 174 for (tc = 0; tc < cfg->tcs; tc++) { 175 u32 threshold = 0U; 176 177 /* Tx buf size TC0 */ 178 hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, tx_buff_size, tc); 179 180 threshold = (tx_buff_size * (1024 / 32U) * 66U) / 100U; 181 hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self, threshold, tc); 182 183 threshold = (tx_buff_size * (1024 / 32U) * 50U) / 100U; 184 hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self, threshold, tc); 185 186 /* QoS Rx buf size per TC */ 187 hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, rx_buff_size, tc); 188 189 threshold = (rx_buff_size * (1024U / 32U) * 66U) / 100U; 190 hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self, threshold, tc); 191 192 threshold = (rx_buff_size * (1024U / 32U) * 50U) / 100U; 193 hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self, threshold, tc); 194 195 hw_atl_b0_set_fc(self, self->aq_nic_cfg->fc.req, tc); 196 } 197 198 if (cfg->is_ptp) 199 hw_atl_b0_tc_ptp_set(self); 200 201 /* QoS 802.1p priority -> TC mapping */ 202 for (prio = 0; prio < 8; ++prio) 203 hw_atl_rpf_rpb_user_priority_tc_map_set(self, prio, 204 cfg->prio_tc_map[prio]); 205 206 return aq_hw_err_from_flags(self); 207} 208 209int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self, 210 struct aq_rss_parameters *rss_params) 211{ 212 struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; 213 unsigned int addr = 0U; 214 unsigned int i = 0U; 215 int err = 0; 216 u32 val; 217 218 for (i = 10, addr = 0U; i--; ++addr) { 219 u32 key_data = cfg->is_rss ? 220 __swab32(rss_params->hash_secret_key[i]) : 0U; 221 hw_atl_rpf_rss_key_wr_data_set(self, key_data); 222 hw_atl_rpf_rss_key_addr_set(self, addr); 223 hw_atl_rpf_rss_key_wr_en_set(self, 1U); 224 err = readx_poll_timeout_atomic(hw_atl_rpf_rss_key_wr_en_get, 225 self, val, val == 0, 226 1000U, 10000U); 227 if (err < 0) 228 goto err_exit; 229 } 230 231 err = aq_hw_err_from_flags(self); 232 233err_exit: 234 return err; 235} 236 237static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self, 238 struct aq_rss_parameters *rss_params) 239{ 240 u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues); 241 u8 *indirection_table = rss_params->indirection_table; 242 u16 bitary[1 + (HW_ATL_B0_RSS_REDIRECTION_MAX * 243 HW_ATL_B0_RSS_REDIRECTION_BITS / 16U)]; 244 int err = 0; 245 u32 i = 0U; 246 u32 val; 247 248 memset(bitary, 0, sizeof(bitary)); 249 250 for (i = HW_ATL_B0_RSS_REDIRECTION_MAX; i--;) { 251 (*(u32 *)(bitary + ((i * 3U) / 16U))) |= 252 ((indirection_table[i] % num_rss_queues) << 253 ((i * 3U) & 0xFU)); 254 } 255 256 for (i = ARRAY_SIZE(bitary); i--;) { 257 hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]); 258 hw_atl_rpf_rss_redir_tbl_addr_set(self, i); 259 hw_atl_rpf_rss_redir_wr_en_set(self, 1U); 260 err = readx_poll_timeout_atomic(hw_atl_rpf_rss_redir_wr_en_get, 261 self, val, val == 0, 262 1000U, 10000U); 263 if (err < 0) 264 goto err_exit; 265 } 266 267 err = aq_hw_err_from_flags(self); 268 269err_exit: 270 return err; 271} 272 273int hw_atl_b0_hw_offload_set(struct aq_hw_s *self, 274 struct aq_nic_cfg_s *aq_nic_cfg) 275{ 276 u64 rxcsum = !!(aq_nic_cfg->features & NETIF_F_RXCSUM); 277 unsigned int i; 278 279 /* TX checksums offloads*/ 280 hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1); 281 hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1); 282 283 /* RX checksums offloads*/ 284 hw_atl_rpo_ipv4header_crc_offload_en_set(self, rxcsum); 285 hw_atl_rpo_tcp_udp_crc_offload_en_set(self, rxcsum); 286 287 /* LSO offloads*/ 288 hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); 289 290 /* Outer VLAN tag offload */ 291 hw_atl_rpo_outer_vlan_tag_mode_set(self, 1U); 292 293 /* LRO offloads */ 294 { 295 unsigned int val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U : 296 ((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U : 297 ((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0)); 298 299 for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++) 300 hw_atl_rpo_lro_max_num_of_descriptors_set(self, val, i); 301 302 hw_atl_rpo_lro_time_base_divider_set(self, 0x61AU); 303 hw_atl_rpo_lro_inactive_interval_set(self, 0); 304 /* the LRO timebase divider is 5 uS (0x61a), 305 * which is multiplied by 50(0x32) 306 * to get a maximum coalescing interval of 250 uS, 307 * which is the default value 308 */ 309 hw_atl_rpo_lro_max_coalescing_interval_set(self, 50); 310 311 hw_atl_rpo_lro_qsessions_lim_set(self, 1U); 312 313 hw_atl_rpo_lro_total_desc_lim_set(self, 2U); 314 315 hw_atl_rpo_lro_patch_optimization_en_set(self, 1U); 316 317 hw_atl_rpo_lro_min_pay_of_first_pkt_set(self, 10U); 318 319 hw_atl_rpo_lro_pkt_lim_set(self, 1U); 320 321 hw_atl_rpo_lro_en_set(self, 322 aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U); 323 hw_atl_itr_rsc_en_set(self, 324 aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U); 325 326 hw_atl_itr_rsc_delay_set(self, 1U); 327 } 328 329 return aq_hw_err_from_flags(self); 330} 331 332static int hw_atl_b0_hw_init_tx_tc_rate_limit(struct aq_hw_s *self) 333{ 334 static const u32 max_weight = BIT(HW_ATL_TPS_DATA_TCTWEIGHT_WIDTH) - 1; 335 /* Scale factor is based on the number of bits in fractional portion */ 336 static const u32 scale = BIT(HW_ATL_TPS_DESC_RATE_Y_WIDTH); 337 static const u32 frac_msk = HW_ATL_TPS_DESC_RATE_Y_MSK >> 338 HW_ATL_TPS_DESC_RATE_Y_SHIFT; 339 const u32 link_speed = self->aq_link_status.mbps; 340 struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg; 341 unsigned long num_min_rated_tcs = 0; 342 u32 tc_weight[AQ_CFG_TCS_MAX]; 343 u32 fixed_max_credit; 344 u8 min_rate_msk = 0; 345 u32 sum_weight = 0; 346 int tc; 347 348 /* By default max_credit is based upon MTU (in unit of 64b) */ 349 fixed_max_credit = nic_cfg->aq_hw_caps->mtu / 64; 350 351 if (link_speed) { 352 min_rate_msk = nic_cfg->tc_min_rate_msk & 353 (BIT(nic_cfg->tcs) - 1); 354 num_min_rated_tcs = hweight8(min_rate_msk); 355 } 356 357 /* First, calculate weights where min_rate is specified */ 358 if (num_min_rated_tcs) { 359 for (tc = 0; tc != nic_cfg->tcs; tc++) { 360 if (!nic_cfg->tc_min_rate[tc]) { 361 tc_weight[tc] = 0; 362 continue; 363 } 364 365 tc_weight[tc] = (-1L + link_speed + 366 nic_cfg->tc_min_rate[tc] * 367 max_weight) / 368 link_speed; 369 tc_weight[tc] = min(tc_weight[tc], max_weight); 370 sum_weight += tc_weight[tc]; 371 } 372 } 373 374 /* WSP, if min_rate is set for at least one TC. 375 * RR otherwise. 376 * 377 * NB! MAC FW sets arb mode itself if PTP is enabled. We shouldn't 378 * overwrite it here in that case. 379 */ 380 if (!nic_cfg->is_ptp) 381 hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U); 382 383 /* Data TC Arbiter takes precedence over Descriptor TC Arbiter, 384 * leave Descriptor TC Arbiter as RR. 385 */ 386 hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U); 387 388 hw_atl_tps_tx_desc_rate_mode_set(self, nic_cfg->is_qos ? 1U : 0U); 389 390 for (tc = 0; tc != nic_cfg->tcs; tc++) { 391 const u32 en = (nic_cfg->tc_max_rate[tc] != 0) ? 1U : 0U; 392 const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0); 393 u32 weight, max_credit; 394 395 hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, tc, 396 fixed_max_credit); 397 hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, tc, 0x1E); 398 399 if (num_min_rated_tcs) { 400 weight = tc_weight[tc]; 401 402 if (!weight && sum_weight < max_weight) 403 weight = (max_weight - sum_weight) / 404 (nic_cfg->tcs - num_min_rated_tcs); 405 else if (!weight) 406 weight = 0x64; 407 408 max_credit = max(8 * weight, fixed_max_credit); 409 } else { 410 weight = 0x64; 411 max_credit = 0xFFF; 412 } 413 414 hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, tc, weight); 415 hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, tc, 416 max_credit); 417 418 hw_atl_tps_tx_desc_rate_en_set(self, desc, en); 419 420 if (en) { 421 /* Nominal rate is always 10G */ 422 const u32 rate = 10000U * scale / 423 nic_cfg->tc_max_rate[tc]; 424 const u32 rate_int = rate >> 425 HW_ATL_TPS_DESC_RATE_Y_WIDTH; 426 const u32 rate_frac = rate & frac_msk; 427 428 hw_atl_tps_tx_desc_rate_x_set(self, desc, rate_int); 429 hw_atl_tps_tx_desc_rate_y_set(self, desc, rate_frac); 430 } else { 431 /* A value of 1 indicates the queue is not 432 * rate controlled. 433 */ 434 hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U); 435 hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U); 436 } 437 } 438 for (tc = nic_cfg->tcs; tc != AQ_CFG_TCS_MAX; tc++) { 439 const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0); 440 441 hw_atl_tps_tx_desc_rate_en_set(self, desc, 0U); 442 hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U); 443 hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U); 444 } 445 446 return aq_hw_err_from_flags(self); 447} 448 449static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self) 450{ 451 struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg; 452 453 /* Tx TC/Queue number config */ 454 hw_atl_tpb_tps_tx_tc_mode_set(self, nic_cfg->tc_mode); 455 456 hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U); 457 hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U); 458 hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU); 459 460 /* Tx interrupts */ 461 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U); 462 463 /* misc */ 464 aq_hw_write_reg(self, 0x00007040U, ATL_HW_IS_CHIP_FEATURE(self, TPO2) ? 465 0x00010000U : 0x00000000U); 466 hw_atl_tdm_tx_dca_en_set(self, 0U); 467 hw_atl_tdm_tx_dca_mode_set(self, 0U); 468 469 hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U); 470 471 return aq_hw_err_from_flags(self); 472} 473 474void hw_atl_b0_hw_init_rx_rss_ctrl1(struct aq_hw_s *self) 475{ 476 struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; 477 u32 rss_ctrl1 = HW_ATL_RSS_DISABLED; 478 479 if (cfg->is_rss) 480 rss_ctrl1 = (cfg->tc_mode == AQ_TC_MODE_8TCS) ? 481 HW_ATL_RSS_ENABLED_8TCS_2INDEX_BITS : 482 HW_ATL_RSS_ENABLED_4TCS_3INDEX_BITS; 483 484 hw_atl_reg_rx_flr_rss_control1set(self, rss_ctrl1); 485} 486 487static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self) 488{ 489 struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; 490 int i; 491 492 /* Rx TC/RSS number config */ 493 hw_atl_rpb_rpf_rx_traf_class_mode_set(self, cfg->tc_mode); 494 495 /* Rx flow control */ 496 hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U); 497 498 /* RSS Ring selection */ 499 hw_atl_b0_hw_init_rx_rss_ctrl1(self); 500 501 /* Multicast filters */ 502 for (i = HW_ATL_B0_MAC_MAX; i--;) { 503 hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i); 504 hw_atl_rpfl2unicast_flr_act_set(self, 1U, i); 505 } 506 507 hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U); 508 hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U); 509 510 /* Vlan filters */ 511 hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U); 512 hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U); 513 514 hw_atl_rpf_vlan_prom_mode_en_set(self, 1); 515 516 // Always accept untagged packets 517 hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U); 518 hw_atl_rpf_vlan_untagged_act_set(self, 1U); 519 520 /* Rx Interrupts */ 521 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U); 522 523 /* misc */ 524 aq_hw_write_reg(self, 0x00005040U, ATL_HW_IS_CHIP_FEATURE(self, RPF2) ? 525 0x000F0000U : 0x00000000U); 526 527 hw_atl_rpfl2broadcast_flr_act_set(self, 1U); 528 hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U)); 529 530 hw_atl_rdm_rx_dca_en_set(self, 0U); 531 hw_atl_rdm_rx_dca_mode_set(self, 0U); 532 533 return aq_hw_err_from_flags(self); 534} 535 536int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, const u8 *mac_addr) 537{ 538 unsigned int h = 0U; 539 unsigned int l = 0U; 540 int err = 0; 541 542 if (!mac_addr) { 543 err = -EINVAL; 544 goto err_exit; 545 } 546 h = (mac_addr[0] << 8) | (mac_addr[1]); 547 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 548 (mac_addr[4] << 8) | mac_addr[5]; 549 550 hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC); 551 hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC); 552 hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC); 553 hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC); 554 555 err = aq_hw_err_from_flags(self); 556 557err_exit: 558 return err; 559} 560 561static int hw_atl_b0_hw_init(struct aq_hw_s *self, const u8 *mac_addr) 562{ 563 static u32 aq_hw_atl_igcr_table_[4][2] = { 564 [AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U }, 565 [AQ_HW_IRQ_LEGACY] = { 0x20000080U, 0x20000080U }, 566 [AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U }, 567 [AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U }, 568 }; 569 struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg; 570 int err = 0; 571 u32 val; 572 573 574 hw_atl_b0_hw_init_tx_path(self); 575 hw_atl_b0_hw_init_rx_path(self); 576 577 hw_atl_b0_hw_mac_addr_set(self, mac_addr); 578 579 self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk); 580 self->aq_fw_ops->set_state(self, MPI_INIT); 581 582 hw_atl_b0_hw_qos_set(self); 583 hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss); 584 hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); 585 586 /* Force limit MRRS on RDM/TDM to 2K */ 587 val = aq_hw_read_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR); 588 aq_hw_write_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR, 589 (val & ~0x707) | 0x404); 590 591 /* TX DMA total request limit. B0 hardware is not capable to 592 * handle more than (8K-MRRS) incoming DMA data. 593 * Value 24 in 256byte units 594 */ 595 aq_hw_write_reg(self, HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR, 24); 596 597 /* Reset link status and read out initial hardware counters */ 598 self->aq_link_status.mbps = 0; 599 self->aq_fw_ops->update_stats(self); 600 601 err = aq_hw_err_from_flags(self); 602 if (err < 0) 603 goto err_exit; 604 605 /* Interrupts */ 606 hw_atl_reg_irq_glb_ctl_set(self, 607 aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type] 608 [(aq_nic_cfg->vecs > 1U) ? 609 1 : 0]); 610 611 hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask); 612 613 /* Interrupts */ 614 hw_atl_reg_gen_irq_map_set(self, 615 ((HW_ATL_B0_ERR_INT << 0x18) | 616 (1U << 0x1F)) | 617 ((HW_ATL_B0_ERR_INT << 0x10) | 618 (1U << 0x17)), 0U); 619 620 /* Enable link interrupt */ 621 if (aq_nic_cfg->link_irq_vec) 622 hw_atl_reg_gen_irq_map_set(self, BIT(7) | 623 aq_nic_cfg->link_irq_vec, 3U); 624 625 hw_atl_b0_hw_offload_set(self, aq_nic_cfg); 626 627err_exit: 628 return err; 629} 630 631int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self, struct aq_ring_s *ring) 632{ 633 hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx); 634 635 return aq_hw_err_from_flags(self); 636} 637 638int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self, struct aq_ring_s *ring) 639{ 640 hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx); 641 642 return aq_hw_err_from_flags(self); 643} 644 645int hw_atl_b0_hw_start(struct aq_hw_s *self) 646{ 647 hw_atl_tpb_tx_buff_en_set(self, 1); 648 hw_atl_rpb_rx_buff_en_set(self, 1); 649 650 return aq_hw_err_from_flags(self); 651} 652 653static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self, 654 struct aq_ring_s *ring) 655{ 656 hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx); 657 658 return 0; 659} 660 661int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self, struct aq_ring_s *ring, 662 unsigned int frags) 663{ 664 struct aq_ring_buff_s *buff = NULL; 665 struct hw_atl_txd_s *txd = NULL; 666 unsigned int buff_pa_len = 0U; 667 unsigned int frag_count = 0U; 668 unsigned int pkt_len = 0U; 669 bool is_vlan = false; 670 bool is_gso = false; 671 672 buff = &ring->buff_ring[ring->sw_tail]; 673 pkt_len = (buff->is_eop && buff->is_sop) ? buff->len : buff->len_pkt; 674 675 for (frag_count = 0; frag_count < frags; frag_count++) { 676 txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail * 677 HW_ATL_B0_TXD_SIZE]; 678 txd->ctl = 0; 679 txd->ctl2 = 0; 680 txd->buf_addr = 0; 681 682 buff = &ring->buff_ring[ring->sw_tail]; 683 684 if (buff->is_gso_tcp || buff->is_gso_udp) { 685 if (buff->is_gso_tcp) 686 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TCP; 687 txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC; 688 txd->ctl |= (buff->len_l3 << 31) | 689 (buff->len_l2 << 24); 690 txd->ctl2 |= (buff->mss << 16); 691 is_gso = true; 692 693 pkt_len -= (buff->len_l4 + 694 buff->len_l3 + 695 buff->len_l2); 696 if (buff->is_ipv6) 697 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPV6; 698 txd->ctl2 |= (buff->len_l4 << 8) | 699 (buff->len_l3 >> 1); 700 } 701 if (buff->is_vlan) { 702 txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC; 703 txd->ctl |= buff->vlan_tx_tag << 4; 704 is_vlan = true; 705 } 706 if (!buff->is_gso_tcp && !buff->is_gso_udp && !buff->is_vlan) { 707 buff_pa_len = buff->len; 708 709 txd->buf_addr = buff->pa; 710 txd->ctl |= (HW_ATL_B0_TXD_CTL_BLEN & 711 ((u32)buff_pa_len << 4)); 712 txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD; 713 714 /* PAY_LEN */ 715 txd->ctl2 |= HW_ATL_B0_TXD_CTL2_LEN & (pkt_len << 14); 716 717 if (is_gso || is_vlan) { 718 /* enable tx context */ 719 txd->ctl2 |= HW_ATL_B0_TXD_CTL2_CTX_EN; 720 } 721 if (is_gso) 722 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_LSO; 723 724 /* Tx checksum offloads */ 725 if (buff->is_ip_cso) 726 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPCSO; 727 728 if (buff->is_udp_cso || buff->is_tcp_cso) 729 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TUCSO; 730 731 if (is_vlan) 732 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_VLAN; 733 734 if (unlikely(buff->is_eop)) { 735 txd->ctl |= HW_ATL_B0_TXD_CTL_EOP; 736 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB; 737 is_gso = false; 738 is_vlan = false; 739 } 740 } 741 ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail); 742 } 743 744 hw_atl_b0_hw_tx_ring_tail_update(self, ring); 745 746 return aq_hw_err_from_flags(self); 747} 748 749int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring, 750 struct aq_ring_param_s *aq_ring_param) 751{ 752 u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32); 753 u32 vlan_rx_stripping = self->aq_nic_cfg->is_vlan_rx_strip; 754 u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa; 755 756 hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx); 757 758 hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); 759 760 hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw, 761 aq_ring->idx); 762 763 hw_atl_reg_rx_dma_desc_base_addressmswset(self, 764 dma_desc_addr_msw, aq_ring->idx); 765 766 hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); 767 768 hw_atl_rdm_rx_desc_data_buff_size_set(self, 769 aq_ring->frame_max / 1024U, 770 aq_ring->idx); 771 772 hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx); 773 hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); 774 hw_atl_rpo_rx_desc_vlan_stripping_set(self, !!vlan_rx_stripping, 775 aq_ring->idx); 776 777 /* Rx ring set mode */ 778 779 /* Mapping interrupt vector */ 780 hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx); 781 hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx); 782 783 hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx); 784 hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx); 785 hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx); 786 hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx); 787 788 return aq_hw_err_from_flags(self); 789} 790 791int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring, 792 struct aq_ring_param_s *aq_ring_param) 793{ 794 u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32); 795 u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa; 796 797 hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr, 798 aq_ring->idx); 799 800 hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr, 801 aq_ring->idx); 802 803 hw_atl_tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); 804 805 hw_atl_b0_hw_tx_ring_tail_update(self, aq_ring); 806 807 /* Set Tx threshold */ 808 hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx); 809 810 /* Mapping interrupt vector */ 811 hw_atl_itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx); 812 hw_atl_itr_irq_map_en_tx_set(self, true, aq_ring->idx); 813 814 hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx); 815 hw_atl_tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx); 816 817 return aq_hw_err_from_flags(self); 818} 819 820int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self, struct aq_ring_s *ring, 821 unsigned int sw_tail_old) 822{ 823 for (; sw_tail_old != ring->sw_tail; 824 sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) { 825 struct hw_atl_rxd_s *rxd = 826 (struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old * 827 HW_ATL_B0_RXD_SIZE]; 828 829 struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old]; 830 831 rxd->buf_addr = buff->pa; 832 rxd->hdr_addr = 0U; 833 } 834 835 hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx); 836 837 return aq_hw_err_from_flags(self); 838} 839 840static int hw_atl_b0_hw_ring_hwts_rx_fill(struct aq_hw_s *self, 841 struct aq_ring_s *ring) 842{ 843 unsigned int i; 844 845 for (i = aq_ring_avail_dx(ring); i--; 846 ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail)) { 847 struct hw_atl_rxd_s *rxd = 848 (struct hw_atl_rxd_s *) 849 &ring->dx_ring[ring->sw_tail * HW_ATL_B0_RXD_SIZE]; 850 851 rxd->buf_addr = ring->dx_ring_pa + ring->size * ring->dx_size; 852 rxd->hdr_addr = 0U; 853 } 854 /* Make sure descriptors are updated before bump tail*/ 855 wmb(); 856 857 hw_atl_reg_rx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx); 858 859 return aq_hw_err_from_flags(self); 860} 861 862static int hw_atl_b0_hw_ring_hwts_rx_receive(struct aq_hw_s *self, 863 struct aq_ring_s *ring) 864{ 865 while (ring->hw_head != ring->sw_tail) { 866 struct hw_atl_rxd_hwts_wb_s *hwts_wb = 867 (struct hw_atl_rxd_hwts_wb_s *) 868 (ring->dx_ring + (ring->hw_head * HW_ATL_B0_RXD_SIZE)); 869 870 /* RxD is not done */ 871 if (!(hwts_wb->sec_lw0 & 0x1U)) 872 break; 873 874 ring->hw_head = aq_ring_next_dx(ring, ring->hw_head); 875 } 876 877 return aq_hw_err_from_flags(self); 878} 879 880int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self, 881 struct aq_ring_s *ring) 882{ 883 unsigned int hw_head_; 884 int err = 0; 885 886 hw_head_ = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx); 887 888 if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) { 889 err = -ENXIO; 890 goto err_exit; 891 } 892 893 /* Validate that the new hw_head_ is reasonable. */ 894 if (hw_head_ >= ring->size) { 895 err = -ENXIO; 896 goto err_exit; 897 } 898 899 ring->hw_head = hw_head_; 900 err = aq_hw_err_from_flags(self); 901 902err_exit: 903 return err; 904} 905 906int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, struct aq_ring_s *ring) 907{ 908 for (; ring->hw_head != ring->sw_tail; 909 ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) { 910 struct aq_ring_buff_s *buff = NULL; 911 struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *) 912 &ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE]; 913 914 unsigned int is_rx_check_sum_enabled = 0U; 915 unsigned int pkt_type = 0U; 916 u8 rx_stat = 0U; 917 918 if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */ 919 break; 920 } 921 922 buff = &ring->buff_ring[ring->hw_head]; 923 924 buff->flags = 0U; 925 buff->is_hash_l4 = 0U; 926 927 rx_stat = (0x0000003CU & rxd_wb->status) >> 2; 928 929 is_rx_check_sum_enabled = (rxd_wb->type >> 19) & 0x3U; 930 931 pkt_type = (rxd_wb->type & HW_ATL_B0_RXD_WB_STAT_PKTTYPE) >> 932 HW_ATL_B0_RXD_WB_STAT_PKTTYPE_SHIFT; 933 934 if (is_rx_check_sum_enabled & BIT(0) && 935 (0x0U == (pkt_type & 0x3U))) 936 buff->is_ip_cso = (rx_stat & BIT(1)) ? 0U : 1U; 937 938 if (is_rx_check_sum_enabled & BIT(1)) { 939 if (0x4U == (pkt_type & 0x1CU)) 940 buff->is_udp_cso = (rx_stat & BIT(2)) ? 0U : 941 !!(rx_stat & BIT(3)); 942 else if (0x0U == (pkt_type & 0x1CU)) 943 buff->is_tcp_cso = (rx_stat & BIT(2)) ? 0U : 944 !!(rx_stat & BIT(3)); 945 } 946 buff->is_cso_err = !!(rx_stat & 0x6); 947 /* Checksum offload workaround for small packets */ 948 if (unlikely(rxd_wb->pkt_len <= 60)) { 949 buff->is_ip_cso = 0U; 950 buff->is_cso_err = 0U; 951 } 952 953 if (self->aq_nic_cfg->is_vlan_rx_strip && 954 ((pkt_type & HW_ATL_B0_RXD_WB_PKTTYPE_VLAN) || 955 (pkt_type & HW_ATL_B0_RXD_WB_PKTTYPE_VLAN_DOUBLE))) { 956 buff->is_vlan = 1; 957 buff->vlan_rx_tag = le16_to_cpu(rxd_wb->vlan); 958 } 959 960 if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) { 961 /* MAC error or DMA error */ 962 buff->is_error = 1U; 963 } 964 if (self->aq_nic_cfg->is_rss) { 965 /* last 4 byte */ 966 u16 rss_type = rxd_wb->type & 0xFU; 967 968 if (rss_type && rss_type < 0x8U) { 969 buff->is_hash_l4 = (rss_type == 0x4 || 970 rss_type == 0x5); 971 buff->rss_hash = rxd_wb->rss_hash; 972 } 973 } 974 975 buff->is_lro = !!(HW_ATL_B0_RXD_WB_STAT2_RSCCNT & 976 rxd_wb->status); 977 if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) { 978 buff->len = rxd_wb->pkt_len % 979 ring->frame_max; 980 buff->len = buff->len ? 981 buff->len : ring->frame_max; 982 buff->next = 0U; 983 buff->is_eop = 1U; 984 } else { 985 buff->len = 986 rxd_wb->pkt_len > ring->frame_max ? 987 ring->frame_max : rxd_wb->pkt_len; 988 989 if (buff->is_lro) { 990 /* LRO */ 991 buff->next = rxd_wb->next_desc_ptr; 992 ++ring->stats.rx.lro_packets; 993 } else { 994 /* jumbo */ 995 buff->next = 996 aq_ring_next_dx(ring, 997 ring->hw_head); 998 ++ring->stats.rx.jumbo_packets; 999 } 1000 } 1001 } 1002 1003 return aq_hw_err_from_flags(self); 1004} 1005 1006int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask) 1007{ 1008 hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask)); 1009 1010 return aq_hw_err_from_flags(self); 1011} 1012 1013int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask) 1014{ 1015 hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask)); 1016 hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask)); 1017 1018 atomic_inc(&self->dpc); 1019 1020 return aq_hw_err_from_flags(self); 1021} 1022 1023int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask) 1024{ 1025 *mask = hw_atl_itr_irq_statuslsw_get(self); 1026 1027 return aq_hw_err_from_flags(self); 1028} 1029 1030#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U) 1031 1032int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self, 1033 unsigned int packet_filter) 1034{ 1035 struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; 1036 unsigned int i = 0U; 1037 u32 vlan_promisc; 1038 u32 l2_promisc; 1039 1040 l2_promisc = IS_FILTER_ENABLED(IFF_PROMISC) || 1041 !!(cfg->priv_flags & BIT(AQ_HW_LOOPBACK_DMA_NET)); 1042 vlan_promisc = l2_promisc || cfg->is_vlan_force_promisc; 1043 1044 hw_atl_rpfl2promiscuous_mode_en_set(self, l2_promisc); 1045 1046 hw_atl_rpf_vlan_prom_mode_en_set(self, vlan_promisc); 1047 1048 hw_atl_rpfl2multicast_flr_en_set(self, 1049 IS_FILTER_ENABLED(IFF_ALLMULTI) && 1050 IS_FILTER_ENABLED(IFF_MULTICAST), 0); 1051 1052 hw_atl_rpfl2_accept_all_mc_packets_set(self, 1053 IS_FILTER_ENABLED(IFF_ALLMULTI) && 1054 IS_FILTER_ENABLED(IFF_MULTICAST)); 1055 1056 hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST)); 1057 1058 1059 for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i) 1060 hw_atl_rpfl2_uc_flr_en_set(self, 1061 (cfg->is_mc_list_enabled && 1062 (i <= cfg->mc_list_count)) ? 1063 1U : 0U, i); 1064 1065 return aq_hw_err_from_flags(self); 1066} 1067 1068#undef IS_FILTER_ENABLED 1069 1070static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self, 1071 u8 ar_mac 1072 [AQ_HW_MULTICAST_ADDRESS_MAX] 1073 [ETH_ALEN], 1074 u32 count) 1075{ 1076 int err = 0; 1077 struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; 1078 1079 if (count > (HW_ATL_B0_MAC_MAX - HW_ATL_B0_MAC_MIN)) { 1080 err = -EBADRQC; 1081 goto err_exit; 1082 } 1083 for (cfg->mc_list_count = 0U; 1084 cfg->mc_list_count < count; 1085 ++cfg->mc_list_count) { 1086 u32 i = cfg->mc_list_count; 1087 u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]); 1088 u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) | 1089 (ar_mac[i][4] << 8) | ar_mac[i][5]; 1090 1091 hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i); 1092 1093 hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, 1094 HW_ATL_B0_MAC_MIN + i); 1095 1096 hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, 1097 HW_ATL_B0_MAC_MIN + i); 1098 1099 hw_atl_rpfl2_uc_flr_en_set(self, 1100 (cfg->is_mc_list_enabled), 1101 HW_ATL_B0_MAC_MIN + i); 1102 } 1103 1104 err = aq_hw_err_from_flags(self); 1105 1106err_exit: 1107 return err; 1108} 1109 1110static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self) 1111{ 1112 unsigned int i = 0U; 1113 u32 itr_tx = 2U; 1114 u32 itr_rx = 2U; 1115 1116 switch (self->aq_nic_cfg->itr) { 1117 case AQ_CFG_INTERRUPT_MODERATION_ON: 1118 case AQ_CFG_INTERRUPT_MODERATION_AUTO: 1119 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U); 1120 hw_atl_tdm_tdm_intr_moder_en_set(self, 1U); 1121 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U); 1122 hw_atl_rdm_rdm_intr_moder_en_set(self, 1U); 1123 1124 if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) { 1125 /* HW timers are in 2us units */ 1126 int tx_max_timer = self->aq_nic_cfg->tx_itr / 2; 1127 int tx_min_timer = tx_max_timer / 2; 1128 1129 int rx_max_timer = self->aq_nic_cfg->rx_itr / 2; 1130 int rx_min_timer = rx_max_timer / 2; 1131 1132 tx_max_timer = min(HW_ATL_INTR_MODER_MAX, tx_max_timer); 1133 tx_min_timer = min(HW_ATL_INTR_MODER_MIN, tx_min_timer); 1134 rx_max_timer = min(HW_ATL_INTR_MODER_MAX, rx_max_timer); 1135 rx_min_timer = min(HW_ATL_INTR_MODER_MIN, rx_min_timer); 1136 1137 itr_tx |= tx_min_timer << 0x8U; 1138 itr_tx |= tx_max_timer << 0x10U; 1139 itr_rx |= rx_min_timer << 0x8U; 1140 itr_rx |= rx_max_timer << 0x10U; 1141 } else { 1142 static unsigned int hw_atl_b0_timers_table_tx_[][2] = { 1143 {0xfU, 0xffU}, /* 10Gbit */ 1144 {0xfU, 0x1ffU}, /* 5Gbit */ 1145 {0xfU, 0x1ffU}, /* 5Gbit 5GS */ 1146 {0xfU, 0x1ffU}, /* 2.5Gbit */ 1147 {0xfU, 0x1ffU}, /* 1Gbit */ 1148 {0xfU, 0x1ffU}, /* 100Mbit */ 1149 }; 1150 1151 static unsigned int hw_atl_b0_timers_table_rx_[][2] = { 1152 {0x6U, 0x38U},/* 10Gbit */ 1153 {0xCU, 0x70U},/* 5Gbit */ 1154 {0xCU, 0x70U},/* 5Gbit 5GS */ 1155 {0x18U, 0xE0U},/* 2.5Gbit */ 1156 {0x30U, 0x80U},/* 1Gbit */ 1157 {0x4U, 0x50U},/* 100Mbit */ 1158 }; 1159 1160 unsigned int speed_index = 1161 hw_atl_utils_mbps_2_speed_index( 1162 self->aq_link_status.mbps); 1163 1164 /* Update user visible ITR settings */ 1165 self->aq_nic_cfg->tx_itr = hw_atl_b0_timers_table_tx_ 1166 [speed_index][1] * 2; 1167 self->aq_nic_cfg->rx_itr = hw_atl_b0_timers_table_rx_ 1168 [speed_index][1] * 2; 1169 1170 itr_tx |= hw_atl_b0_timers_table_tx_ 1171 [speed_index][0] << 0x8U; 1172 itr_tx |= hw_atl_b0_timers_table_tx_ 1173 [speed_index][1] << 0x10U; 1174 1175 itr_rx |= hw_atl_b0_timers_table_rx_ 1176 [speed_index][0] << 0x8U; 1177 itr_rx |= hw_atl_b0_timers_table_rx_ 1178 [speed_index][1] << 0x10U; 1179 } 1180 break; 1181 case AQ_CFG_INTERRUPT_MODERATION_OFF: 1182 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U); 1183 hw_atl_tdm_tdm_intr_moder_en_set(self, 0U); 1184 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U); 1185 hw_atl_rdm_rdm_intr_moder_en_set(self, 0U); 1186 itr_tx = 0U; 1187 itr_rx = 0U; 1188 break; 1189 } 1190 1191 for (i = HW_ATL_B0_RINGS_MAX; i--;) { 1192 hw_atl_reg_tx_intr_moder_ctrl_set(self, itr_tx, i); 1193 hw_atl_reg_rx_intr_moder_ctrl_set(self, itr_rx, i); 1194 } 1195 1196 return aq_hw_err_from_flags(self); 1197} 1198 1199static int hw_atl_b0_hw_stop(struct aq_hw_s *self) 1200{ 1201 int err; 1202 u32 val; 1203 1204 hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK); 1205 1206 /* Invalidate Descriptor Cache to prevent writing to the cached 1207 * descriptors and to the data pointer of those descriptors 1208 */ 1209 hw_atl_rdm_rx_dma_desc_cache_init_tgl(self); 1210 1211 err = aq_hw_err_from_flags(self); 1212 1213 if (err) 1214 goto err_exit; 1215 1216 readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get, 1217 self, val, val == 1, 1000U, 10000U); 1218 1219err_exit: 1220 return err; 1221} 1222 1223int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, struct aq_ring_s *ring) 1224{ 1225 hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx); 1226 1227 return aq_hw_err_from_flags(self); 1228} 1229 1230int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, struct aq_ring_s *ring) 1231{ 1232 hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx); 1233 1234 return aq_hw_err_from_flags(self); 1235} 1236 1237#define get_ptp_ts_val_u64(self, indx) \ 1238 ((u64)(hw_atl_pcs_ptp_clock_get(self, indx) & 0xffff)) 1239 1240static void hw_atl_b0_get_ptp_ts(struct aq_hw_s *self, u64 *stamp) 1241{ 1242 u64 ns; 1243 1244 hw_atl_pcs_ptp_clock_read_enable(self, 1); 1245 hw_atl_pcs_ptp_clock_read_enable(self, 0); 1246 ns = (get_ptp_ts_val_u64(self, 0) + 1247 (get_ptp_ts_val_u64(self, 1) << 16)) * NSEC_PER_SEC + 1248 (get_ptp_ts_val_u64(self, 3) + 1249 (get_ptp_ts_val_u64(self, 4) << 16)); 1250 1251 *stamp = ns + self->ptp_clk_offset; 1252} 1253 1254static void hw_atl_b0_adj_params_get(u64 freq, s64 adj, u32 *ns, u32 *fns) 1255{ 1256 /* For accuracy, the digit is extended */ 1257 s64 base_ns = ((adj + NSEC_PER_SEC) * NSEC_PER_SEC); 1258 u64 nsi_frac = 0; 1259 u64 nsi; 1260 1261 base_ns = div64_s64(base_ns, freq); 1262 nsi = div64_u64(base_ns, NSEC_PER_SEC); 1263 1264 if (base_ns != nsi * NSEC_PER_SEC) { 1265 s64 divisor = div64_s64((s64)NSEC_PER_SEC * NSEC_PER_SEC, 1266 base_ns - nsi * NSEC_PER_SEC); 1267 nsi_frac = div64_s64(AQ_FRAC_PER_NS * NSEC_PER_SEC, divisor); 1268 } 1269 1270 *ns = (u32)nsi; 1271 *fns = (u32)nsi_frac; 1272} 1273 1274static void 1275hw_atl_b0_mac_adj_param_calc(struct hw_fw_request_ptp_adj_freq *ptp_adj_freq, 1276 u64 phyfreq, u64 macfreq) 1277{ 1278 s64 adj_fns_val; 1279 s64 fns_in_sec_phy = phyfreq * (ptp_adj_freq->fns_phy + 1280 AQ_FRAC_PER_NS * ptp_adj_freq->ns_phy); 1281 s64 fns_in_sec_mac = macfreq * (ptp_adj_freq->fns_mac + 1282 AQ_FRAC_PER_NS * ptp_adj_freq->ns_mac); 1283 s64 fault_in_sec_phy = AQ_FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_phy; 1284 s64 fault_in_sec_mac = AQ_FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_mac; 1285 /* MAC MCP counter freq is macfreq / 4 */ 1286 s64 diff_in_mcp_overflow = (fault_in_sec_mac - fault_in_sec_phy) * 1287 4 * AQ_FRAC_PER_NS; 1288 1289 diff_in_mcp_overflow = div64_s64(diff_in_mcp_overflow, 1290 AQ_HW_MAC_COUNTER_HZ); 1291 adj_fns_val = (ptp_adj_freq->fns_mac + AQ_FRAC_PER_NS * 1292 ptp_adj_freq->ns_mac) + diff_in_mcp_overflow; 1293 1294 ptp_adj_freq->mac_ns_adj = div64_s64(adj_fns_val, AQ_FRAC_PER_NS); 1295 ptp_adj_freq->mac_fns_adj = adj_fns_val - ptp_adj_freq->mac_ns_adj * 1296 AQ_FRAC_PER_NS; 1297} 1298 1299static int hw_atl_b0_adj_sys_clock(struct aq_hw_s *self, s64 delta) 1300{ 1301 self->ptp_clk_offset += delta; 1302 1303 self->aq_fw_ops->adjust_ptp(self, self->ptp_clk_offset); 1304 1305 return 0; 1306} 1307 1308static int hw_atl_b0_set_sys_clock(struct aq_hw_s *self, u64 time, u64 ts) 1309{ 1310 s64 delta = time - (self->ptp_clk_offset + ts); 1311 1312 return hw_atl_b0_adj_sys_clock(self, delta); 1313} 1314 1315static int hw_atl_b0_ts_to_sys_clock(struct aq_hw_s *self, u64 ts, u64 *time) 1316{ 1317 *time = self->ptp_clk_offset + ts; 1318 return 0; 1319} 1320 1321static int hw_atl_b0_adj_clock_freq(struct aq_hw_s *self, s32 ppb) 1322{ 1323 struct hw_fw_request_iface fwreq; 1324 size_t size; 1325 1326 memset(&fwreq, 0, sizeof(fwreq)); 1327 1328 fwreq.msg_id = HW_AQ_FW_REQUEST_PTP_ADJ_FREQ; 1329 hw_atl_b0_adj_params_get(AQ_HW_MAC_COUNTER_HZ, ppb, 1330 &fwreq.ptp_adj_freq.ns_mac, 1331 &fwreq.ptp_adj_freq.fns_mac); 1332 hw_atl_b0_adj_params_get(AQ_HW_PHY_COUNTER_HZ, ppb, 1333 &fwreq.ptp_adj_freq.ns_phy, 1334 &fwreq.ptp_adj_freq.fns_phy); 1335 hw_atl_b0_mac_adj_param_calc(&fwreq.ptp_adj_freq, 1336 AQ_HW_PHY_COUNTER_HZ, 1337 AQ_HW_MAC_COUNTER_HZ); 1338 1339 size = sizeof(fwreq.msg_id) + sizeof(fwreq.ptp_adj_freq); 1340 return self->aq_fw_ops->send_fw_request(self, &fwreq, size); 1341} 1342 1343static int hw_atl_b0_gpio_pulse(struct aq_hw_s *self, u32 index, 1344 u64 start, u32 period) 1345{ 1346 struct hw_fw_request_iface fwreq; 1347 size_t size; 1348 1349 memset(&fwreq, 0, sizeof(fwreq)); 1350 1351 fwreq.msg_id = HW_AQ_FW_REQUEST_PTP_GPIO_CTRL; 1352 fwreq.ptp_gpio_ctrl.index = index; 1353 fwreq.ptp_gpio_ctrl.period = period; 1354 /* Apply time offset */ 1355 fwreq.ptp_gpio_ctrl.start = start; 1356 1357 size = sizeof(fwreq.msg_id) + sizeof(fwreq.ptp_gpio_ctrl); 1358 return self->aq_fw_ops->send_fw_request(self, &fwreq, size); 1359} 1360 1361static int hw_atl_b0_extts_gpio_enable(struct aq_hw_s *self, u32 index, 1362 u32 enable) 1363{ 1364 /* Enable/disable Sync1588 GPIO Timestamping */ 1365 aq_phy_write_reg(self, MDIO_MMD_PCS, 0xc611, enable ? 0x71 : 0); 1366 1367 return 0; 1368} 1369 1370static int hw_atl_b0_get_sync_ts(struct aq_hw_s *self, u64 *ts) 1371{ 1372 u64 sec_l; 1373 u64 sec_h; 1374 u64 nsec_l; 1375 u64 nsec_h; 1376 1377 if (!ts) 1378 return -1; 1379 1380 /* PTP external GPIO clock seconds count 15:0 */ 1381 sec_l = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc914); 1382 /* PTP external GPIO clock seconds count 31:16 */ 1383 sec_h = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc915); 1384 /* PTP external GPIO clock nanoseconds count 15:0 */ 1385 nsec_l = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc916); 1386 /* PTP external GPIO clock nanoseconds count 31:16 */ 1387 nsec_h = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc917); 1388 1389 *ts = (nsec_h << 16) + nsec_l + ((sec_h << 16) + sec_l) * NSEC_PER_SEC; 1390 1391 return 0; 1392} 1393 1394static u16 hw_atl_b0_rx_extract_ts(struct aq_hw_s *self, u8 *p, 1395 unsigned int len, u64 *timestamp) 1396{ 1397 unsigned int offset = 14; 1398 struct ethhdr *eth; 1399 __be64 sec; 1400 __be32 ns; 1401 u8 *ptr; 1402 1403 if (len <= offset || !timestamp) 1404 return 0; 1405 1406 /* The TIMESTAMP in the end of package has following format: 1407 * (big-endian) 1408 * struct { 1409 * uint64_t sec; 1410 * uint32_t ns; 1411 * uint16_t stream_id; 1412 * }; 1413 */ 1414 ptr = p + (len - offset); 1415 memcpy(&sec, ptr, sizeof(sec)); 1416 ptr += sizeof(sec); 1417 memcpy(&ns, ptr, sizeof(ns)); 1418 1419 *timestamp = (be64_to_cpu(sec) & 0xffffffffffffllu) * NSEC_PER_SEC + 1420 be32_to_cpu(ns) + self->ptp_clk_offset; 1421 1422 eth = (struct ethhdr *)p; 1423 1424 return (eth->h_proto == htons(ETH_P_1588)) ? 12 : 14; 1425} 1426 1427static int hw_atl_b0_extract_hwts(struct aq_hw_s *self, u8 *p, unsigned int len, 1428 u64 *timestamp) 1429{ 1430 struct hw_atl_rxd_hwts_wb_s *hwts_wb = (struct hw_atl_rxd_hwts_wb_s *)p; 1431 u64 tmp, sec, ns; 1432 1433 sec = 0; 1434 tmp = (hwts_wb->sec_lw0 >> 2) & 0x3ff; 1435 sec += tmp; 1436 tmp = (u64)((hwts_wb->sec_lw1 >> 16) & 0xffff) << 10; 1437 sec += tmp; 1438 tmp = (u64)(hwts_wb->sec_hw & 0xfff) << 26; 1439 sec += tmp; 1440 tmp = (u64)((hwts_wb->sec_hw >> 22) & 0x3ff) << 38; 1441 sec += tmp; 1442 ns = sec * NSEC_PER_SEC + hwts_wb->ns; 1443 if (timestamp) 1444 *timestamp = ns + self->ptp_clk_offset; 1445 return 0; 1446} 1447 1448static int hw_atl_b0_hw_fl3l4_clear(struct aq_hw_s *self, 1449 struct aq_rx_filter_l3l4 *data) 1450{ 1451 u8 location = data->location; 1452 1453 if (!data->is_ipv6) { 1454 hw_atl_rpfl3l4_cmd_clear(self, location); 1455 hw_atl_rpf_l4_spd_set(self, 0U, location); 1456 hw_atl_rpf_l4_dpd_set(self, 0U, location); 1457 hw_atl_rpfl3l4_ipv4_src_addr_clear(self, location); 1458 hw_atl_rpfl3l4_ipv4_dest_addr_clear(self, location); 1459 } else { 1460 int i; 1461 1462 for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) { 1463 hw_atl_rpfl3l4_cmd_clear(self, location + i); 1464 hw_atl_rpf_l4_spd_set(self, 0U, location + i); 1465 hw_atl_rpf_l4_dpd_set(self, 0U, location + i); 1466 } 1467 hw_atl_rpfl3l4_ipv6_src_addr_clear(self, location); 1468 hw_atl_rpfl3l4_ipv6_dest_addr_clear(self, location); 1469 } 1470 1471 return aq_hw_err_from_flags(self); 1472} 1473 1474static int hw_atl_b0_hw_fl3l4_set(struct aq_hw_s *self, 1475 struct aq_rx_filter_l3l4 *data) 1476{ 1477 u8 location = data->location; 1478 1479 hw_atl_b0_hw_fl3l4_clear(self, data); 1480 1481 if (data->cmd & (HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3 | 1482 HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3)) { 1483 if (!data->is_ipv6) { 1484 hw_atl_rpfl3l4_ipv4_dest_addr_set(self, 1485 location, 1486 data->ip_dst[0]); 1487 hw_atl_rpfl3l4_ipv4_src_addr_set(self, 1488 location, 1489 data->ip_src[0]); 1490 } else { 1491 hw_atl_rpfl3l4_ipv6_dest_addr_set(self, 1492 location, 1493 data->ip_dst); 1494 hw_atl_rpfl3l4_ipv6_src_addr_set(self, 1495 location, 1496 data->ip_src); 1497 } 1498 } 1499 1500 if (data->cmd & (HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4 | 1501 HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4)) { 1502 hw_atl_rpf_l4_dpd_set(self, data->p_dst, location); 1503 hw_atl_rpf_l4_spd_set(self, data->p_src, location); 1504 } 1505 1506 hw_atl_rpfl3l4_cmd_set(self, location, data->cmd); 1507 1508 return aq_hw_err_from_flags(self); 1509} 1510 1511static int hw_atl_b0_hw_fl2_set(struct aq_hw_s *self, 1512 struct aq_rx_filter_l2 *data) 1513{ 1514 hw_atl_rpf_etht_flr_en_set(self, 1U, data->location); 1515 hw_atl_rpf_etht_flr_set(self, data->ethertype, data->location); 1516 hw_atl_rpf_etht_user_priority_en_set(self, 1517 !!data->user_priority_en, 1518 data->location); 1519 if (data->user_priority_en) 1520 hw_atl_rpf_etht_user_priority_set(self, 1521 data->user_priority, 1522 data->location); 1523 1524 if (data->queue < 0) { 1525 hw_atl_rpf_etht_flr_act_set(self, 0U, data->location); 1526 hw_atl_rpf_etht_rx_queue_en_set(self, 0U, data->location); 1527 } else { 1528 hw_atl_rpf_etht_flr_act_set(self, 1U, data->location); 1529 hw_atl_rpf_etht_rx_queue_en_set(self, 1U, data->location); 1530 hw_atl_rpf_etht_rx_queue_set(self, data->queue, data->location); 1531 } 1532 1533 return aq_hw_err_from_flags(self); 1534} 1535 1536static int hw_atl_b0_hw_fl2_clear(struct aq_hw_s *self, 1537 struct aq_rx_filter_l2 *data) 1538{ 1539 hw_atl_rpf_etht_flr_en_set(self, 0U, data->location); 1540 hw_atl_rpf_etht_flr_set(self, 0U, data->location); 1541 hw_atl_rpf_etht_user_priority_en_set(self, 0U, data->location); 1542 1543 return aq_hw_err_from_flags(self); 1544} 1545 1546/* 1547 * @brief Set VLAN filter table 1548 * @details Configure VLAN filter table to accept (and assign the queue) traffic 1549 * for the particular vlan ids. 1550 * Note: use this function under vlan promisc mode not to lost the traffic 1551 * 1552 * @param aq_hw_s 1553 * @param aq_rx_filter_vlan VLAN filter configuration 1554 * @return 0 - OK, <0 - error 1555 */ 1556static int hw_atl_b0_hw_vlan_set(struct aq_hw_s *self, 1557 struct aq_rx_filter_vlan *aq_vlans) 1558{ 1559 int i; 1560 1561 for (i = 0; i < AQ_VLAN_MAX_FILTERS; i++) { 1562 hw_atl_rpf_vlan_flr_en_set(self, 0U, i); 1563 hw_atl_rpf_vlan_rxq_en_flr_set(self, 0U, i); 1564 if (aq_vlans[i].enable) { 1565 hw_atl_rpf_vlan_id_flr_set(self, 1566 aq_vlans[i].vlan_id, 1567 i); 1568 hw_atl_rpf_vlan_flr_act_set(self, 1U, i); 1569 hw_atl_rpf_vlan_flr_en_set(self, 1U, i); 1570 if (aq_vlans[i].queue != 0xFF) { 1571 hw_atl_rpf_vlan_rxq_flr_set(self, 1572 aq_vlans[i].queue, 1573 i); 1574 hw_atl_rpf_vlan_rxq_en_flr_set(self, 1U, i); 1575 } 1576 } 1577 } 1578 1579 return aq_hw_err_from_flags(self); 1580} 1581 1582static int hw_atl_b0_hw_vlan_ctrl(struct aq_hw_s *self, bool enable) 1583{ 1584 /* set promisc in case of disabing the vland filter */ 1585 hw_atl_rpf_vlan_prom_mode_en_set(self, !enable); 1586 1587 return aq_hw_err_from_flags(self); 1588} 1589 1590int hw_atl_b0_set_loopback(struct aq_hw_s *self, u32 mode, bool enable) 1591{ 1592 switch (mode) { 1593 case AQ_HW_LOOPBACK_DMA_SYS: 1594 hw_atl_tpb_tx_dma_sys_lbk_en_set(self, enable); 1595 hw_atl_rpb_dma_sys_lbk_set(self, enable); 1596 break; 1597 case AQ_HW_LOOPBACK_PKT_SYS: 1598 hw_atl_tpo_tx_pkt_sys_lbk_en_set(self, enable); 1599 hw_atl_rpf_tpo_to_rpf_sys_lbk_set(self, enable); 1600 break; 1601 case AQ_HW_LOOPBACK_DMA_NET: 1602 hw_atl_rpf_vlan_prom_mode_en_set(self, enable); 1603 hw_atl_rpfl2promiscuous_mode_en_set(self, enable); 1604 hw_atl_tpb_tx_tx_clk_gate_en_set(self, !enable); 1605 hw_atl_tpb_tx_dma_net_lbk_en_set(self, enable); 1606 hw_atl_rpb_dma_net_lbk_set(self, enable); 1607 break; 1608 default: 1609 return -EINVAL; 1610 } 1611 1612 return 0; 1613} 1614 1615static u32 hw_atl_b0_ts_ready_and_latch_high_get(struct aq_hw_s *self) 1616{ 1617 if (hw_atl_ts_ready_get(self) && hw_atl_ts_ready_latch_high_get(self)) 1618 return 1; 1619 1620 return 0; 1621} 1622 1623static int hw_atl_b0_get_mac_temp(struct aq_hw_s *self, u32 *temp) 1624{ 1625 bool ts_disabled; 1626 int err; 1627 u32 val; 1628 u32 ts; 1629 1630 ts_disabled = (hw_atl_ts_power_down_get(self) == 1U); 1631 1632 if (ts_disabled) { 1633 // Set AFE Temperature Sensor to on (off by default) 1634 hw_atl_ts_power_down_set(self, 0U); 1635 1636 // Reset internal capacitors, biasing, and counters 1637 hw_atl_ts_reset_set(self, 1); 1638 hw_atl_ts_reset_set(self, 0); 1639 } 1640 1641 err = readx_poll_timeout(hw_atl_b0_ts_ready_and_latch_high_get, self, 1642 val, val == 1, 10000U, 500000U); 1643 if (err) 1644 return err; 1645 1646 ts = hw_atl_ts_data_get(self); 1647 *temp = ts * ts * 16 / 100000 + 60 * ts - 83410; 1648 1649 if (ts_disabled) { 1650 // Set AFE Temperature Sensor back to off 1651 hw_atl_ts_power_down_set(self, 1U); 1652 } 1653 1654 return 0; 1655} 1656 1657const struct aq_hw_ops hw_atl_ops_b0 = { 1658 .hw_soft_reset = hw_atl_utils_soft_reset, 1659 .hw_prepare = hw_atl_utils_initfw, 1660 .hw_set_mac_address = hw_atl_b0_hw_mac_addr_set, 1661 .hw_init = hw_atl_b0_hw_init, 1662 .hw_reset = hw_atl_b0_hw_reset, 1663 .hw_start = hw_atl_b0_hw_start, 1664 .hw_ring_tx_start = hw_atl_b0_hw_ring_tx_start, 1665 .hw_ring_tx_stop = hw_atl_b0_hw_ring_tx_stop, 1666 .hw_ring_rx_start = hw_atl_b0_hw_ring_rx_start, 1667 .hw_ring_rx_stop = hw_atl_b0_hw_ring_rx_stop, 1668 .hw_stop = hw_atl_b0_hw_stop, 1669 1670 .hw_ring_tx_xmit = hw_atl_b0_hw_ring_tx_xmit, 1671 .hw_ring_tx_head_update = hw_atl_b0_hw_ring_tx_head_update, 1672 1673 .hw_ring_rx_receive = hw_atl_b0_hw_ring_rx_receive, 1674 .hw_ring_rx_fill = hw_atl_b0_hw_ring_rx_fill, 1675 1676 .hw_irq_enable = hw_atl_b0_hw_irq_enable, 1677 .hw_irq_disable = hw_atl_b0_hw_irq_disable, 1678 .hw_irq_read = hw_atl_b0_hw_irq_read, 1679 1680 .hw_ring_rx_init = hw_atl_b0_hw_ring_rx_init, 1681 .hw_ring_tx_init = hw_atl_b0_hw_ring_tx_init, 1682 .hw_packet_filter_set = hw_atl_b0_hw_packet_filter_set, 1683 .hw_filter_l2_set = hw_atl_b0_hw_fl2_set, 1684 .hw_filter_l2_clear = hw_atl_b0_hw_fl2_clear, 1685 .hw_filter_l3l4_set = hw_atl_b0_hw_fl3l4_set, 1686 .hw_filter_vlan_set = hw_atl_b0_hw_vlan_set, 1687 .hw_filter_vlan_ctrl = hw_atl_b0_hw_vlan_ctrl, 1688 .hw_multicast_list_set = hw_atl_b0_hw_multicast_list_set, 1689 .hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set, 1690 .hw_rss_set = hw_atl_b0_hw_rss_set, 1691 .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set, 1692 .hw_tc_rate_limit_set = hw_atl_b0_hw_init_tx_tc_rate_limit, 1693 .hw_get_regs = hw_atl_utils_hw_get_regs, 1694 .hw_get_hw_stats = hw_atl_utils_get_hw_stats, 1695 .hw_get_fw_version = hw_atl_utils_get_fw_version, 1696 1697 .hw_ring_hwts_rx_fill = hw_atl_b0_hw_ring_hwts_rx_fill, 1698 .hw_ring_hwts_rx_receive = hw_atl_b0_hw_ring_hwts_rx_receive, 1699 1700 .hw_get_ptp_ts = hw_atl_b0_get_ptp_ts, 1701 .hw_adj_sys_clock = hw_atl_b0_adj_sys_clock, 1702 .hw_set_sys_clock = hw_atl_b0_set_sys_clock, 1703 .hw_ts_to_sys_clock = hw_atl_b0_ts_to_sys_clock, 1704 .hw_adj_clock_freq = hw_atl_b0_adj_clock_freq, 1705 .hw_gpio_pulse = hw_atl_b0_gpio_pulse, 1706 .hw_extts_gpio_enable = hw_atl_b0_extts_gpio_enable, 1707 .hw_get_sync_ts = hw_atl_b0_get_sync_ts, 1708 .rx_extract_ts = hw_atl_b0_rx_extract_ts, 1709 .extract_hwts = hw_atl_b0_extract_hwts, 1710 .hw_set_offload = hw_atl_b0_hw_offload_set, 1711 .hw_set_loopback = hw_atl_b0_set_loopback, 1712 .hw_set_fc = hw_atl_b0_set_fc, 1713 1714 .hw_get_mac_temp = hw_atl_b0_get_mac_temp, 1715};