qed_hw.c (25394B)
1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2/* QLogic qed NIC Driver 3 * Copyright (c) 2015-2017 QLogic Corporation 4 * Copyright (c) 2019-2020 Marvell International Ltd. 5 */ 6 7#include <linux/types.h> 8#include <linux/io.h> 9#include <linux/delay.h> 10#include <linux/dma-mapping.h> 11#include <linux/errno.h> 12#include <linux/kernel.h> 13#include <linux/list.h> 14#include <linux/mutex.h> 15#include <linux/pci.h> 16#include <linux/slab.h> 17#include <linux/spinlock.h> 18#include <linux/string.h> 19#include <linux/qed/qed_chain.h> 20#include "qed.h" 21#include "qed_hsi.h" 22#include "qed_hw.h" 23#include "qed_reg_addr.h" 24#include "qed_sriov.h" 25 26#define QED_BAR_ACQUIRE_TIMEOUT 1000 27 28/* Invalid values */ 29#define QED_BAR_INVALID_OFFSET (cpu_to_le32(-1)) 30 31struct qed_ptt { 32 struct list_head list_entry; 33 unsigned int idx; 34 struct pxp_ptt_entry pxp; 35 u8 hwfn_id; 36}; 37 38struct qed_ptt_pool { 39 struct list_head free_list; 40 spinlock_t lock; /* ptt synchronized access */ 41 struct qed_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM]; 42}; 43 44int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn) 45{ 46 struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool), GFP_KERNEL); 47 int i; 48 49 if (!p_pool) 50 return -ENOMEM; 51 52 INIT_LIST_HEAD(&p_pool->free_list); 53 for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) { 54 p_pool->ptts[i].idx = i; 55 p_pool->ptts[i].pxp.offset = QED_BAR_INVALID_OFFSET; 56 p_pool->ptts[i].pxp.pretend.control = 0; 57 p_pool->ptts[i].hwfn_id = p_hwfn->my_id; 58 if (i >= RESERVED_PTT_MAX) 59 list_add(&p_pool->ptts[i].list_entry, 60 &p_pool->free_list); 61 } 62 63 p_hwfn->p_ptt_pool = p_pool; 64 spin_lock_init(&p_pool->lock); 65 66 return 0; 67} 68 69void qed_ptt_invalidate(struct qed_hwfn *p_hwfn) 70{ 71 struct qed_ptt *p_ptt; 72 int i; 73 74 for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) { 75 p_ptt = &p_hwfn->p_ptt_pool->ptts[i]; 76 p_ptt->pxp.offset = QED_BAR_INVALID_OFFSET; 77 } 78} 79 80void qed_ptt_pool_free(struct qed_hwfn *p_hwfn) 81{ 82 kfree(p_hwfn->p_ptt_pool); 83 p_hwfn->p_ptt_pool = NULL; 84} 85 86struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn) 87{ 88 struct qed_ptt *p_ptt; 89 unsigned int i; 90 91 /* Take the free PTT from the list */ 92 for (i = 0; i < QED_BAR_ACQUIRE_TIMEOUT; i++) { 93 spin_lock_bh(&p_hwfn->p_ptt_pool->lock); 94 95 if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) { 96 p_ptt = list_first_entry(&p_hwfn->p_ptt_pool->free_list, 97 struct qed_ptt, list_entry); 98 list_del(&p_ptt->list_entry); 99 100 spin_unlock_bh(&p_hwfn->p_ptt_pool->lock); 101 102 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 103 "allocated ptt %d\n", p_ptt->idx); 104 return p_ptt; 105 } 106 107 spin_unlock_bh(&p_hwfn->p_ptt_pool->lock); 108 usleep_range(1000, 2000); 109 } 110 111 DP_NOTICE(p_hwfn, "PTT acquire timeout - failed to allocate PTT\n"); 112 return NULL; 113} 114 115void qed_ptt_release(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 116{ 117 spin_lock_bh(&p_hwfn->p_ptt_pool->lock); 118 list_add(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list); 119 spin_unlock_bh(&p_hwfn->p_ptt_pool->lock); 120} 121 122u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 123{ 124 /* The HW is using DWORDS and we need to translate it to Bytes */ 125 return le32_to_cpu(p_ptt->pxp.offset) << 2; 126} 127 128static u32 qed_ptt_config_addr(struct qed_ptt *p_ptt) 129{ 130 return PXP_PF_WINDOW_ADMIN_PER_PF_START + 131 p_ptt->idx * sizeof(struct pxp_ptt_entry); 132} 133 134u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt) 135{ 136 return PXP_EXTERNAL_BAR_PF_WINDOW_START + 137 p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE; 138} 139 140void qed_ptt_set_win(struct qed_hwfn *p_hwfn, 141 struct qed_ptt *p_ptt, u32 new_hw_addr) 142{ 143 u32 prev_hw_addr; 144 145 prev_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt); 146 147 if (new_hw_addr == prev_hw_addr) 148 return; 149 150 /* Update PTT entery in admin window */ 151 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 152 "Updating PTT entry %d to offset 0x%x\n", 153 p_ptt->idx, new_hw_addr); 154 155 /* The HW is using DWORDS and the address is in Bytes */ 156 p_ptt->pxp.offset = cpu_to_le32(new_hw_addr >> 2); 157 158 REG_WR(p_hwfn, 159 qed_ptt_config_addr(p_ptt) + 160 offsetof(struct pxp_ptt_entry, offset), 161 le32_to_cpu(p_ptt->pxp.offset)); 162} 163 164static u32 qed_set_ptt(struct qed_hwfn *p_hwfn, 165 struct qed_ptt *p_ptt, u32 hw_addr) 166{ 167 u32 win_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt); 168 u32 offset; 169 170 offset = hw_addr - win_hw_addr; 171 172 if (p_ptt->hwfn_id != p_hwfn->my_id) 173 DP_NOTICE(p_hwfn, 174 "ptt[%d] of hwfn[%02x] is used by hwfn[%02x]!\n", 175 p_ptt->idx, p_ptt->hwfn_id, p_hwfn->my_id); 176 177 /* Verify the address is within the window */ 178 if (hw_addr < win_hw_addr || 179 offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) { 180 qed_ptt_set_win(p_hwfn, p_ptt, hw_addr); 181 offset = 0; 182 } 183 184 return qed_ptt_get_bar_addr(p_ptt) + offset; 185} 186 187struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn, 188 enum reserved_ptts ptt_idx) 189{ 190 if (ptt_idx >= RESERVED_PTT_MAX) { 191 DP_NOTICE(p_hwfn, 192 "Requested PTT %d is out of range\n", ptt_idx); 193 return NULL; 194 } 195 196 return &p_hwfn->p_ptt_pool->ptts[ptt_idx]; 197} 198 199void qed_wr(struct qed_hwfn *p_hwfn, 200 struct qed_ptt *p_ptt, 201 u32 hw_addr, u32 val) 202{ 203 u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr); 204 205 REG_WR(p_hwfn, bar_addr, val); 206 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 207 "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n", 208 bar_addr, hw_addr, val); 209} 210 211u32 qed_rd(struct qed_hwfn *p_hwfn, 212 struct qed_ptt *p_ptt, 213 u32 hw_addr) 214{ 215 u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr); 216 u32 val = REG_RD(p_hwfn, bar_addr); 217 218 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 219 "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n", 220 bar_addr, hw_addr, val); 221 222 return val; 223} 224 225static void qed_memcpy_hw(struct qed_hwfn *p_hwfn, 226 struct qed_ptt *p_ptt, 227 void *addr, u32 hw_addr, size_t n, bool to_device) 228{ 229 u32 dw_count, *host_addr, hw_offset; 230 size_t quota, done = 0; 231 u32 __iomem *reg_addr; 232 233 while (done < n) { 234 quota = min_t(size_t, n - done, 235 PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE); 236 237 if (IS_PF(p_hwfn->cdev)) { 238 qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done); 239 hw_offset = qed_ptt_get_bar_addr(p_ptt); 240 } else { 241 hw_offset = hw_addr + done; 242 } 243 244 dw_count = quota / 4; 245 host_addr = (u32 *)((u8 *)addr + done); 246 reg_addr = (u32 __iomem *)REG_ADDR(p_hwfn, hw_offset); 247 if (to_device) 248 while (dw_count--) 249 DIRECT_REG_WR(reg_addr++, *host_addr++); 250 else 251 while (dw_count--) 252 *host_addr++ = DIRECT_REG_RD(reg_addr++); 253 254 done += quota; 255 } 256} 257 258void qed_memcpy_from(struct qed_hwfn *p_hwfn, 259 struct qed_ptt *p_ptt, void *dest, u32 hw_addr, size_t n) 260{ 261 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 262 "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n", 263 hw_addr, dest, hw_addr, (unsigned long)n); 264 265 qed_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false); 266} 267 268void qed_memcpy_to(struct qed_hwfn *p_hwfn, 269 struct qed_ptt *p_ptt, u32 hw_addr, void *src, size_t n) 270{ 271 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 272 "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n", 273 hw_addr, hw_addr, src, (unsigned long)n); 274 275 qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true); 276} 277 278void qed_fid_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 fid) 279{ 280 u16 control = 0; 281 282 SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1); 283 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1); 284 285 /* Every pretend undos previous pretends, including 286 * previous port pretend. 287 */ 288 SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0); 289 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0); 290 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); 291 292 if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID)) 293 fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID); 294 295 p_ptt->pxp.pretend.control = cpu_to_le16(control); 296 p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid); 297 298 REG_WR(p_hwfn, 299 qed_ptt_config_addr(p_ptt) + 300 offsetof(struct pxp_ptt_entry, pretend), 301 *(u32 *)&p_ptt->pxp.pretend); 302} 303 304void qed_port_pretend(struct qed_hwfn *p_hwfn, 305 struct qed_ptt *p_ptt, u8 port_id) 306{ 307 u16 control = 0; 308 309 SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id); 310 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1); 311 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); 312 313 p_ptt->pxp.pretend.control = cpu_to_le16(control); 314 315 REG_WR(p_hwfn, 316 qed_ptt_config_addr(p_ptt) + 317 offsetof(struct pxp_ptt_entry, pretend), 318 *(u32 *)&p_ptt->pxp.pretend); 319} 320 321void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 322{ 323 u16 control = 0; 324 325 SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0); 326 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0); 327 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); 328 329 p_ptt->pxp.pretend.control = cpu_to_le16(control); 330 331 REG_WR(p_hwfn, 332 qed_ptt_config_addr(p_ptt) + 333 offsetof(struct pxp_ptt_entry, pretend), 334 *(u32 *)&p_ptt->pxp.pretend); 335} 336 337void qed_port_fid_pretend(struct qed_hwfn *p_hwfn, 338 struct qed_ptt *p_ptt, u8 port_id, u16 fid) 339{ 340 u16 control = 0; 341 342 SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id); 343 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1); 344 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); 345 SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1); 346 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1); 347 if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID)) 348 fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID); 349 p_ptt->pxp.pretend.control = cpu_to_le16(control); 350 p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid); 351 REG_WR(p_hwfn, 352 qed_ptt_config_addr(p_ptt) + 353 offsetof(struct pxp_ptt_entry, pretend), 354 *(u32 *)&p_ptt->pxp.pretend); 355} 356 357u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid) 358{ 359 u32 concrete_fid = 0; 360 361 SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id); 362 SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid); 363 SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1); 364 365 return concrete_fid; 366} 367 368/* DMAE */ 369#define QED_DMAE_FLAGS_IS_SET(params, flag) \ 370 ((params) != NULL && GET_FIELD((params)->flags, QED_DMAE_PARAMS_##flag)) 371 372static void qed_dmae_opcode(struct qed_hwfn *p_hwfn, 373 const u8 is_src_type_grc, 374 const u8 is_dst_type_grc, 375 struct qed_dmae_params *p_params) 376{ 377 u8 src_pfid, dst_pfid, port_id; 378 u16 opcode_b = 0; 379 u32 opcode = 0; 380 381 /* Whether the source is the PCIe or the GRC. 382 * 0- The source is the PCIe 383 * 1- The source is the GRC. 384 */ 385 SET_FIELD(opcode, DMAE_CMD_SRC, 386 (is_src_type_grc ? dmae_cmd_src_grc : dmae_cmd_src_pcie)); 387 src_pfid = QED_DMAE_FLAGS_IS_SET(p_params, SRC_PF_VALID) ? 388 p_params->src_pfid : p_hwfn->rel_pf_id; 389 SET_FIELD(opcode, DMAE_CMD_SRC_PF_ID, src_pfid); 390 391 /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */ 392 SET_FIELD(opcode, DMAE_CMD_DST, 393 (is_dst_type_grc ? dmae_cmd_dst_grc : dmae_cmd_dst_pcie)); 394 dst_pfid = QED_DMAE_FLAGS_IS_SET(p_params, DST_PF_VALID) ? 395 p_params->dst_pfid : p_hwfn->rel_pf_id; 396 SET_FIELD(opcode, DMAE_CMD_DST_PF_ID, dst_pfid); 397 398 399 /* Whether to write a completion word to the completion destination: 400 * 0-Do not write a completion word 401 * 1-Write the completion word 402 */ 403 SET_FIELD(opcode, DMAE_CMD_COMP_WORD_EN, 1); 404 SET_FIELD(opcode, DMAE_CMD_SRC_ADDR_RESET, 1); 405 406 if (QED_DMAE_FLAGS_IS_SET(p_params, COMPLETION_DST)) 407 SET_FIELD(opcode, DMAE_CMD_COMP_FUNC, 1); 408 409 /* swapping mode 3 - big endian */ 410 SET_FIELD(opcode, DMAE_CMD_ENDIANITY_MODE, DMAE_CMD_ENDIANITY); 411 412 port_id = (QED_DMAE_FLAGS_IS_SET(p_params, PORT_VALID)) ? 413 p_params->port_id : p_hwfn->port_id; 414 SET_FIELD(opcode, DMAE_CMD_PORT_ID, port_id); 415 416 /* reset source address in next go */ 417 SET_FIELD(opcode, DMAE_CMD_SRC_ADDR_RESET, 1); 418 419 /* reset dest address in next go */ 420 SET_FIELD(opcode, DMAE_CMD_DST_ADDR_RESET, 1); 421 422 /* SRC/DST VFID: all 1's - pf, otherwise VF id */ 423 if (QED_DMAE_FLAGS_IS_SET(p_params, SRC_VF_VALID)) { 424 SET_FIELD(opcode, DMAE_CMD_SRC_VF_ID_VALID, 1); 425 SET_FIELD(opcode_b, DMAE_CMD_SRC_VF_ID, p_params->src_vfid); 426 } else { 427 SET_FIELD(opcode_b, DMAE_CMD_SRC_VF_ID, 0xFF); 428 } 429 if (QED_DMAE_FLAGS_IS_SET(p_params, DST_VF_VALID)) { 430 SET_FIELD(opcode, DMAE_CMD_DST_VF_ID_VALID, 1); 431 SET_FIELD(opcode_b, DMAE_CMD_DST_VF_ID, p_params->dst_vfid); 432 } else { 433 SET_FIELD(opcode_b, DMAE_CMD_DST_VF_ID, 0xFF); 434 } 435 436 p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode); 437 p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcode_b); 438} 439 440u32 qed_dmae_idx_to_go_cmd(u8 idx) 441{ 442 /* All the DMAE 'go' registers form an array in internal memory */ 443 return DMAE_REG_GO_C0 + (idx << 2); 444} 445 446static int qed_dmae_post_command(struct qed_hwfn *p_hwfn, 447 struct qed_ptt *p_ptt) 448{ 449 struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd; 450 u8 idx_cmd = p_hwfn->dmae_info.channel, i; 451 int qed_status = 0; 452 453 /* verify address is not NULL */ 454 if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) || 455 ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) { 456 DP_NOTICE(p_hwfn, 457 "source or destination address 0 idx_cmd=%d\n" 458 "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n", 459 idx_cmd, 460 le32_to_cpu(p_command->opcode), 461 le16_to_cpu(p_command->opcode_b), 462 le16_to_cpu(p_command->length_dw), 463 le32_to_cpu(p_command->src_addr_hi), 464 le32_to_cpu(p_command->src_addr_lo), 465 le32_to_cpu(p_command->dst_addr_hi), 466 le32_to_cpu(p_command->dst_addr_lo)); 467 468 return -EINVAL; 469 } 470 471 DP_VERBOSE(p_hwfn, 472 NETIF_MSG_HW, 473 "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n", 474 idx_cmd, 475 le32_to_cpu(p_command->opcode), 476 le16_to_cpu(p_command->opcode_b), 477 le16_to_cpu(p_command->length_dw), 478 le32_to_cpu(p_command->src_addr_hi), 479 le32_to_cpu(p_command->src_addr_lo), 480 le32_to_cpu(p_command->dst_addr_hi), 481 le32_to_cpu(p_command->dst_addr_lo)); 482 483 /* Copy the command to DMAE - need to do it before every call 484 * for source/dest address no reset. 485 * The first 9 DWs are the command registers, the 10 DW is the 486 * GO register, and the rest are result registers 487 * (which are read only by the client). 488 */ 489 for (i = 0; i < DMAE_CMD_SIZE; i++) { 490 u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ? 491 *(((u32 *)p_command) + i) : 0; 492 493 qed_wr(p_hwfn, p_ptt, 494 DMAE_REG_CMD_MEM + 495 (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) + 496 (i * sizeof(u32)), data); 497 } 498 499 qed_wr(p_hwfn, p_ptt, qed_dmae_idx_to_go_cmd(idx_cmd), DMAE_GO_VALUE); 500 501 return qed_status; 502} 503 504int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn) 505{ 506 dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr; 507 struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd; 508 u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer; 509 u32 **p_comp = &p_hwfn->dmae_info.p_completion_word; 510 511 *p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 512 sizeof(u32), p_addr, GFP_KERNEL); 513 if (!*p_comp) 514 goto err; 515 516 p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr; 517 *p_cmd = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 518 sizeof(struct dmae_cmd), 519 p_addr, GFP_KERNEL); 520 if (!*p_cmd) 521 goto err; 522 523 p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr; 524 *p_buff = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 525 sizeof(u32) * DMAE_MAX_RW_SIZE, 526 p_addr, GFP_KERNEL); 527 if (!*p_buff) 528 goto err; 529 530 p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id; 531 532 return 0; 533err: 534 qed_dmae_info_free(p_hwfn); 535 return -ENOMEM; 536} 537 538void qed_dmae_info_free(struct qed_hwfn *p_hwfn) 539{ 540 dma_addr_t p_phys; 541 542 /* Just make sure no one is in the middle */ 543 mutex_lock(&p_hwfn->dmae_info.mutex); 544 545 if (p_hwfn->dmae_info.p_completion_word) { 546 p_phys = p_hwfn->dmae_info.completion_word_phys_addr; 547 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 548 sizeof(u32), 549 p_hwfn->dmae_info.p_completion_word, p_phys); 550 p_hwfn->dmae_info.p_completion_word = NULL; 551 } 552 553 if (p_hwfn->dmae_info.p_dmae_cmd) { 554 p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr; 555 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 556 sizeof(struct dmae_cmd), 557 p_hwfn->dmae_info.p_dmae_cmd, p_phys); 558 p_hwfn->dmae_info.p_dmae_cmd = NULL; 559 } 560 561 if (p_hwfn->dmae_info.p_intermediate_buffer) { 562 p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr; 563 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 564 sizeof(u32) * DMAE_MAX_RW_SIZE, 565 p_hwfn->dmae_info.p_intermediate_buffer, 566 p_phys); 567 p_hwfn->dmae_info.p_intermediate_buffer = NULL; 568 } 569 570 mutex_unlock(&p_hwfn->dmae_info.mutex); 571} 572 573static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn) 574{ 575 u32 wait_cnt_limit = 10000, wait_cnt = 0; 576 int qed_status = 0; 577 578 barrier(); 579 while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) { 580 udelay(DMAE_MIN_WAIT_TIME); 581 if (++wait_cnt > wait_cnt_limit) { 582 DP_NOTICE(p_hwfn->cdev, 583 "Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n", 584 *p_hwfn->dmae_info.p_completion_word, 585 DMAE_COMPLETION_VAL); 586 qed_status = -EBUSY; 587 break; 588 } 589 590 /* to sync the completion_word since we are not 591 * using the volatile keyword for p_completion_word 592 */ 593 barrier(); 594 } 595 596 if (qed_status == 0) 597 *p_hwfn->dmae_info.p_completion_word = 0; 598 599 return qed_status; 600} 601 602static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn, 603 struct qed_ptt *p_ptt, 604 u64 src_addr, 605 u64 dst_addr, 606 u8 src_type, 607 u8 dst_type, 608 u32 length_dw) 609{ 610 dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr; 611 struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd; 612 int qed_status = 0; 613 614 switch (src_type) { 615 case QED_DMAE_ADDRESS_GRC: 616 case QED_DMAE_ADDRESS_HOST_PHYS: 617 cmd->src_addr_hi = cpu_to_le32(upper_32_bits(src_addr)); 618 cmd->src_addr_lo = cpu_to_le32(lower_32_bits(src_addr)); 619 break; 620 /* for virtual source addresses we use the intermediate buffer. */ 621 case QED_DMAE_ADDRESS_HOST_VIRT: 622 cmd->src_addr_hi = cpu_to_le32(upper_32_bits(phys)); 623 cmd->src_addr_lo = cpu_to_le32(lower_32_bits(phys)); 624 memcpy(&p_hwfn->dmae_info.p_intermediate_buffer[0], 625 (void *)(uintptr_t)src_addr, 626 length_dw * sizeof(u32)); 627 break; 628 default: 629 return -EINVAL; 630 } 631 632 switch (dst_type) { 633 case QED_DMAE_ADDRESS_GRC: 634 case QED_DMAE_ADDRESS_HOST_PHYS: 635 cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(dst_addr)); 636 cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(dst_addr)); 637 break; 638 /* for virtual source addresses we use the intermediate buffer. */ 639 case QED_DMAE_ADDRESS_HOST_VIRT: 640 cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(phys)); 641 cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(phys)); 642 break; 643 default: 644 return -EINVAL; 645 } 646 647 cmd->length_dw = cpu_to_le16((u16)length_dw); 648 649 qed_dmae_post_command(p_hwfn, p_ptt); 650 651 qed_status = qed_dmae_operation_wait(p_hwfn); 652 653 if (qed_status) { 654 DP_NOTICE(p_hwfn, 655 "qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n", 656 src_addr, dst_addr, length_dw); 657 return qed_status; 658 } 659 660 if (dst_type == QED_DMAE_ADDRESS_HOST_VIRT) 661 memcpy((void *)(uintptr_t)(dst_addr), 662 &p_hwfn->dmae_info.p_intermediate_buffer[0], 663 length_dw * sizeof(u32)); 664 665 return 0; 666} 667 668static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn, 669 struct qed_ptt *p_ptt, 670 u64 src_addr, u64 dst_addr, 671 u8 src_type, u8 dst_type, 672 u32 size_in_dwords, 673 struct qed_dmae_params *p_params) 674{ 675 dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr; 676 u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0; 677 struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd; 678 u64 src_addr_split = 0, dst_addr_split = 0; 679 u16 length_limit = DMAE_MAX_RW_SIZE; 680 int qed_status = 0; 681 u32 offset = 0; 682 683 if (p_hwfn->cdev->recov_in_prog) { 684 DP_VERBOSE(p_hwfn, 685 NETIF_MSG_HW, 686 "Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%llx, type %d}, {dst: addr 0x%llx, type %d}, size %d].\n", 687 src_addr, src_type, dst_addr, dst_type, 688 size_in_dwords); 689 690 /* Let the flow complete w/o any error handling */ 691 return 0; 692 } 693 694 qed_dmae_opcode(p_hwfn, 695 (src_type == QED_DMAE_ADDRESS_GRC), 696 (dst_type == QED_DMAE_ADDRESS_GRC), 697 p_params); 698 699 cmd->comp_addr_lo = cpu_to_le32(lower_32_bits(phys)); 700 cmd->comp_addr_hi = cpu_to_le32(upper_32_bits(phys)); 701 cmd->comp_val = cpu_to_le32(DMAE_COMPLETION_VAL); 702 703 /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */ 704 cnt_split = size_in_dwords / length_limit; 705 length_mod = size_in_dwords % length_limit; 706 707 src_addr_split = src_addr; 708 dst_addr_split = dst_addr; 709 710 for (i = 0; i <= cnt_split; i++) { 711 offset = length_limit * i; 712 713 if (!QED_DMAE_FLAGS_IS_SET(p_params, RW_REPL_SRC)) { 714 if (src_type == QED_DMAE_ADDRESS_GRC) 715 src_addr_split = src_addr + offset; 716 else 717 src_addr_split = src_addr + (offset * 4); 718 } 719 720 if (dst_type == QED_DMAE_ADDRESS_GRC) 721 dst_addr_split = dst_addr + offset; 722 else 723 dst_addr_split = dst_addr + (offset * 4); 724 725 length_cur = (cnt_split == i) ? length_mod : length_limit; 726 727 /* might be zero on last iteration */ 728 if (!length_cur) 729 continue; 730 731 qed_status = qed_dmae_execute_sub_operation(p_hwfn, 732 p_ptt, 733 src_addr_split, 734 dst_addr_split, 735 src_type, 736 dst_type, 737 length_cur); 738 if (qed_status) { 739 qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_DMAE_FAIL, 740 "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n", 741 qed_status, src_addr, 742 dst_addr, length_cur); 743 break; 744 } 745 } 746 747 return qed_status; 748} 749 750int qed_dmae_host2grc(struct qed_hwfn *p_hwfn, 751 struct qed_ptt *p_ptt, 752 u64 source_addr, u32 grc_addr, u32 size_in_dwords, 753 struct qed_dmae_params *p_params) 754{ 755 u32 grc_addr_in_dw = grc_addr / sizeof(u32); 756 int rc; 757 758 759 mutex_lock(&p_hwfn->dmae_info.mutex); 760 761 rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr, 762 grc_addr_in_dw, 763 QED_DMAE_ADDRESS_HOST_VIRT, 764 QED_DMAE_ADDRESS_GRC, 765 size_in_dwords, p_params); 766 767 mutex_unlock(&p_hwfn->dmae_info.mutex); 768 769 return rc; 770} 771 772int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, 773 struct qed_ptt *p_ptt, 774 u32 grc_addr, 775 dma_addr_t dest_addr, u32 size_in_dwords, 776 struct qed_dmae_params *p_params) 777{ 778 u32 grc_addr_in_dw = grc_addr / sizeof(u32); 779 int rc; 780 781 782 mutex_lock(&p_hwfn->dmae_info.mutex); 783 784 rc = qed_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw, 785 dest_addr, QED_DMAE_ADDRESS_GRC, 786 QED_DMAE_ADDRESS_HOST_VIRT, 787 size_in_dwords, p_params); 788 789 mutex_unlock(&p_hwfn->dmae_info.mutex); 790 791 return rc; 792} 793 794int qed_dmae_host2host(struct qed_hwfn *p_hwfn, 795 struct qed_ptt *p_ptt, 796 dma_addr_t source_addr, 797 dma_addr_t dest_addr, 798 u32 size_in_dwords, struct qed_dmae_params *p_params) 799{ 800 int rc; 801 802 mutex_lock(&(p_hwfn->dmae_info.mutex)); 803 804 rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr, 805 dest_addr, 806 QED_DMAE_ADDRESS_HOST_PHYS, 807 QED_DMAE_ADDRESS_HOST_PHYS, 808 size_in_dwords, p_params); 809 810 mutex_unlock(&(p_hwfn->dmae_info.mutex)); 811 812 return rc; 813} 814 815void qed_hw_err_notify(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 816 enum qed_hw_err_type err_type, const char *fmt, ...) 817{ 818 char buf[QED_HW_ERR_MAX_STR_SIZE]; 819 va_list vl; 820 int len; 821 822 if (fmt) { 823 va_start(vl, fmt); 824 len = vsnprintf(buf, QED_HW_ERR_MAX_STR_SIZE, fmt, vl); 825 va_end(vl); 826 827 if (len > QED_HW_ERR_MAX_STR_SIZE - 1) 828 len = QED_HW_ERR_MAX_STR_SIZE - 1; 829 830 DP_NOTICE(p_hwfn, "%s", buf); 831 } 832 833 /* Fan failure cannot be masked by handling of another HW error */ 834 if (p_hwfn->cdev->recov_in_prog && 835 err_type != QED_HW_ERR_FAN_FAIL) { 836 DP_VERBOSE(p_hwfn, 837 NETIF_MSG_DRV, 838 "Recovery is in progress. Avoid notifying about HW error %d.\n", 839 err_type); 840 return; 841 } 842 843 qed_hw_error_occurred(p_hwfn, err_type); 844 845 if (fmt) 846 qed_mcp_send_raw_debug_data(p_hwfn, p_ptt, buf, len); 847} 848 849int qed_dmae_sanity(struct qed_hwfn *p_hwfn, 850 struct qed_ptt *p_ptt, const char *phase) 851{ 852 u32 size = PAGE_SIZE / 2, val; 853 int rc = 0; 854 dma_addr_t p_phys; 855 void *p_virt; 856 u32 *p_tmp; 857 858 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 859 2 * size, &p_phys, GFP_KERNEL); 860 if (!p_virt) { 861 DP_NOTICE(p_hwfn, 862 "DMAE sanity [%s]: failed to allocate memory\n", 863 phase); 864 return -ENOMEM; 865 } 866 867 /* Fill the bottom half of the allocated memory with a known pattern */ 868 for (p_tmp = (u32 *)p_virt; 869 p_tmp < (u32 *)((u8 *)p_virt + size); p_tmp++) { 870 /* Save the address itself as the value */ 871 val = (u32)(uintptr_t)p_tmp; 872 *p_tmp = val; 873 } 874 875 /* Zero the top half of the allocated memory */ 876 memset((u8 *)p_virt + size, 0, size); 877 878 DP_VERBOSE(p_hwfn, 879 QED_MSG_SP, 880 "DMAE sanity [%s]: src_addr={phys 0x%llx, virt %p}, dst_addr={phys 0x%llx, virt %p}, size 0x%x\n", 881 phase, 882 (u64)p_phys, 883 p_virt, (u64)(p_phys + size), (u8 *)p_virt + size, size); 884 885 rc = qed_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size, 886 size / 4, NULL); 887 if (rc) { 888 DP_NOTICE(p_hwfn, 889 "DMAE sanity [%s]: qed_dmae_host2host() failed. rc = %d.\n", 890 phase, rc); 891 goto out; 892 } 893 894 /* Verify that the top half of the allocated memory has the pattern */ 895 for (p_tmp = (u32 *)((u8 *)p_virt + size); 896 p_tmp < (u32 *)((u8 *)p_virt + (2 * size)); p_tmp++) { 897 /* The corresponding address in the bottom half */ 898 val = (u32)(uintptr_t)p_tmp - size; 899 900 if (*p_tmp != val) { 901 DP_NOTICE(p_hwfn, 902 "DMAE sanity [%s]: addr={phys 0x%llx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n", 903 phase, 904 (u64)p_phys + ((u8 *)p_tmp - (u8 *)p_virt), 905 p_tmp, *p_tmp, val); 906 rc = -EINVAL; 907 goto out; 908 } 909 } 910 911out: 912 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 2 * size, p_virt, p_phys); 913 return rc; 914}