pci.c (11712B)
1// SPDX-License-Identifier: GPL-2.0 2 3/* 4 * Copyright 2016-2019 HabanaLabs, Ltd. 5 * All Rights Reserved. 6 */ 7 8#include "../habanalabs.h" 9#include "../../include/hw_ip/pci/pci_general.h" 10 11#include <linux/pci.h> 12 13#define HL_PLDM_PCI_ELBI_TIMEOUT_MSEC (HL_PCI_ELBI_TIMEOUT_MSEC * 100) 14 15#define IATU_REGION_CTRL_REGION_EN_MASK BIT(31) 16#define IATU_REGION_CTRL_MATCH_MODE_MASK BIT(30) 17#define IATU_REGION_CTRL_NUM_MATCH_EN_MASK BIT(19) 18#define IATU_REGION_CTRL_BAR_NUM_MASK GENMASK(10, 8) 19 20/** 21 * hl_pci_bars_map() - Map PCI BARs. 22 * @hdev: Pointer to hl_device structure. 23 * @name: Array of BAR names. 24 * @is_wc: Array with flag per BAR whether a write-combined mapping is needed. 25 * 26 * Request PCI regions and map them to kernel virtual addresses. 27 * 28 * Return: 0 on success, non-zero for failure. 29 */ 30int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3], 31 bool is_wc[3]) 32{ 33 struct pci_dev *pdev = hdev->pdev; 34 int rc, i, bar; 35 36 rc = pci_request_regions(pdev, HL_NAME); 37 if (rc) { 38 dev_err(hdev->dev, "Cannot obtain PCI resources\n"); 39 return rc; 40 } 41 42 for (i = 0 ; i < 3 ; i++) { 43 bar = i * 2; /* 64-bit BARs */ 44 hdev->pcie_bar[bar] = is_wc[i] ? 45 pci_ioremap_wc_bar(pdev, bar) : 46 pci_ioremap_bar(pdev, bar); 47 if (!hdev->pcie_bar[bar]) { 48 dev_err(hdev->dev, "pci_ioremap%s_bar failed for %s\n", 49 is_wc[i] ? "_wc" : "", name[i]); 50 rc = -ENODEV; 51 goto err; 52 } 53 } 54 55 return 0; 56 57err: 58 for (i = 2 ; i >= 0 ; i--) { 59 bar = i * 2; /* 64-bit BARs */ 60 if (hdev->pcie_bar[bar]) 61 iounmap(hdev->pcie_bar[bar]); 62 } 63 64 pci_release_regions(pdev); 65 66 return rc; 67} 68 69/** 70 * hl_pci_bars_unmap() - Unmap PCI BARS. 71 * @hdev: Pointer to hl_device structure. 72 * 73 * Release all PCI BARs and unmap their virtual addresses. 74 */ 75static void hl_pci_bars_unmap(struct hl_device *hdev) 76{ 77 struct pci_dev *pdev = hdev->pdev; 78 int i, bar; 79 80 for (i = 2 ; i >= 0 ; i--) { 81 bar = i * 2; /* 64-bit BARs */ 82 iounmap(hdev->pcie_bar[bar]); 83 } 84 85 pci_release_regions(pdev); 86} 87 88int hl_pci_elbi_read(struct hl_device *hdev, u64 addr, u32 *data) 89{ 90 struct pci_dev *pdev = hdev->pdev; 91 ktime_t timeout; 92 u64 msec; 93 u32 val; 94 95 if (hdev->pldm) 96 msec = HL_PLDM_PCI_ELBI_TIMEOUT_MSEC; 97 else 98 msec = HL_PCI_ELBI_TIMEOUT_MSEC; 99 100 /* Clear previous status */ 101 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 0); 102 103 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_ADDR, (u32) addr); 104 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_CTRL, 0); 105 106 timeout = ktime_add_ms(ktime_get(), msec); 107 for (;;) { 108 pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, &val); 109 if (val & PCI_CONFIG_ELBI_STS_MASK) 110 break; 111 if (ktime_compare(ktime_get(), timeout) > 0) { 112 pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 113 &val); 114 break; 115 } 116 117 usleep_range(300, 500); 118 } 119 120 if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE) { 121 pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data); 122 123 return 0; 124 } 125 126 if (val & PCI_CONFIG_ELBI_STS_ERR) { 127 dev_err(hdev->dev, "Error reading from ELBI\n"); 128 return -EIO; 129 } 130 131 if (!(val & PCI_CONFIG_ELBI_STS_MASK)) { 132 dev_err(hdev->dev, "ELBI read didn't finish in time\n"); 133 return -EIO; 134 } 135 136 dev_err(hdev->dev, "ELBI read has undefined bits in status\n"); 137 return -EIO; 138} 139 140/** 141 * hl_pci_elbi_write() - Write through the ELBI interface. 142 * @hdev: Pointer to hl_device structure. 143 * @addr: Address to write to 144 * @data: Data to write 145 * 146 * Return: 0 on success, negative value for failure. 147 */ 148static int hl_pci_elbi_write(struct hl_device *hdev, u64 addr, u32 data) 149{ 150 struct pci_dev *pdev = hdev->pdev; 151 ktime_t timeout; 152 u64 msec; 153 u32 val; 154 155 if (hdev->pldm) 156 msec = HL_PLDM_PCI_ELBI_TIMEOUT_MSEC; 157 else 158 msec = HL_PCI_ELBI_TIMEOUT_MSEC; 159 160 /* Clear previous status */ 161 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 0); 162 163 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_ADDR, (u32) addr); 164 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data); 165 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_CTRL, 166 PCI_CONFIG_ELBI_CTRL_WRITE); 167 168 timeout = ktime_add_ms(ktime_get(), msec); 169 for (;;) { 170 pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, &val); 171 if (val & PCI_CONFIG_ELBI_STS_MASK) 172 break; 173 if (ktime_compare(ktime_get(), timeout) > 0) { 174 pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 175 &val); 176 break; 177 } 178 179 usleep_range(300, 500); 180 } 181 182 if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE) 183 return 0; 184 185 if (val & PCI_CONFIG_ELBI_STS_ERR) 186 return -EIO; 187 188 if (!(val & PCI_CONFIG_ELBI_STS_MASK)) { 189 dev_err(hdev->dev, "ELBI write didn't finish in time\n"); 190 return -EIO; 191 } 192 193 dev_err(hdev->dev, "ELBI write has undefined bits in status\n"); 194 return -EIO; 195} 196 197/** 198 * hl_pci_iatu_write() - iatu write routine. 199 * @hdev: Pointer to hl_device structure. 200 * @addr: Address to write to 201 * @data: Data to write 202 * 203 * Return: 0 on success, negative value for failure. 204 */ 205int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data) 206{ 207 struct asic_fixed_properties *prop = &hdev->asic_prop; 208 u32 dbi_offset; 209 int rc; 210 211 dbi_offset = addr & 0xFFF; 212 213 /* Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail 214 * in case the firmware security is enabled 215 */ 216 hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0x00300000); 217 218 rc = hl_pci_elbi_write(hdev, prop->pcie_dbi_base_address + dbi_offset, 219 data); 220 221 if (rc) 222 return -EIO; 223 224 return 0; 225} 226 227/** 228 * hl_pci_reset_link_through_bridge() - Reset PCI link. 229 * @hdev: Pointer to hl_device structure. 230 */ 231static void hl_pci_reset_link_through_bridge(struct hl_device *hdev) 232{ 233 struct pci_dev *pdev = hdev->pdev; 234 struct pci_dev *parent_port; 235 u16 val; 236 237 parent_port = pdev->bus->self; 238 pci_read_config_word(parent_port, PCI_BRIDGE_CONTROL, &val); 239 val |= PCI_BRIDGE_CTL_BUS_RESET; 240 pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val); 241 ssleep(1); 242 243 val &= ~(PCI_BRIDGE_CTL_BUS_RESET); 244 pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val); 245 ssleep(3); 246} 247 248/** 249 * hl_pci_set_inbound_region() - Configure inbound region 250 * @hdev: Pointer to hl_device structure. 251 * @region: Inbound region number. 252 * @pci_region: Inbound region parameters. 253 * 254 * Configure the iATU inbound region. 255 * 256 * Return: 0 on success, negative value for failure. 257 */ 258int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region, 259 struct hl_inbound_pci_region *pci_region) 260{ 261 struct asic_fixed_properties *prop = &hdev->asic_prop; 262 u64 bar_phys_base, region_base, region_end_address; 263 u32 offset, ctrl_reg_val; 264 int rc = 0; 265 266 /* region offset */ 267 offset = (0x200 * region) + 0x100; 268 269 if (pci_region->mode == PCI_ADDRESS_MATCH_MODE) { 270 bar_phys_base = hdev->pcie_bar_phys[pci_region->bar]; 271 region_base = bar_phys_base + pci_region->offset_in_bar; 272 region_end_address = region_base + pci_region->size - 1; 273 274 rc |= hl_pci_iatu_write(hdev, offset + 0x8, 275 lower_32_bits(region_base)); 276 rc |= hl_pci_iatu_write(hdev, offset + 0xC, 277 upper_32_bits(region_base)); 278 rc |= hl_pci_iatu_write(hdev, offset + 0x10, 279 lower_32_bits(region_end_address)); 280 } 281 282 /* Point to the specified address */ 283 rc |= hl_pci_iatu_write(hdev, offset + 0x14, 284 lower_32_bits(pci_region->addr)); 285 rc |= hl_pci_iatu_write(hdev, offset + 0x18, 286 upper_32_bits(pci_region->addr)); 287 rc |= hl_pci_iatu_write(hdev, offset + 0x0, 0); 288 289 /* Enable + bar/address match + match enable + bar number */ 290 ctrl_reg_val = FIELD_PREP(IATU_REGION_CTRL_REGION_EN_MASK, 1); 291 ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_MATCH_MODE_MASK, 292 pci_region->mode); 293 ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_NUM_MATCH_EN_MASK, 1); 294 295 if (pci_region->mode == PCI_BAR_MATCH_MODE) 296 ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_BAR_NUM_MASK, 297 pci_region->bar); 298 299 rc |= hl_pci_iatu_write(hdev, offset + 0x4, ctrl_reg_val); 300 301 /* Return the DBI window to the default location 302 * Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail 303 * in case the firmware security is enabled 304 */ 305 hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0); 306 307 if (rc) 308 dev_err(hdev->dev, "failed to map bar %u to 0x%08llx\n", 309 pci_region->bar, pci_region->addr); 310 311 return rc; 312} 313 314/** 315 * hl_pci_set_outbound_region() - Configure outbound region 0 316 * @hdev: Pointer to hl_device structure. 317 * @pci_region: Outbound region parameters. 318 * 319 * Configure the iATU outbound region 0. 320 * 321 * Return: 0 on success, negative value for failure. 322 */ 323int hl_pci_set_outbound_region(struct hl_device *hdev, 324 struct hl_outbound_pci_region *pci_region) 325{ 326 struct asic_fixed_properties *prop = &hdev->asic_prop; 327 u64 outbound_region_end_address; 328 int rc = 0; 329 330 /* Outbound Region 0 */ 331 outbound_region_end_address = 332 pci_region->addr + pci_region->size - 1; 333 rc |= hl_pci_iatu_write(hdev, 0x008, 334 lower_32_bits(pci_region->addr)); 335 rc |= hl_pci_iatu_write(hdev, 0x00C, 336 upper_32_bits(pci_region->addr)); 337 rc |= hl_pci_iatu_write(hdev, 0x010, 338 lower_32_bits(outbound_region_end_address)); 339 rc |= hl_pci_iatu_write(hdev, 0x014, 0); 340 341 rc |= hl_pci_iatu_write(hdev, 0x018, 0); 342 343 rc |= hl_pci_iatu_write(hdev, 0x020, 344 upper_32_bits(outbound_region_end_address)); 345 /* Increase region size */ 346 rc |= hl_pci_iatu_write(hdev, 0x000, 0x00002000); 347 /* Enable */ 348 rc |= hl_pci_iatu_write(hdev, 0x004, 0x80000000); 349 350 /* Return the DBI window to the default location 351 * Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail 352 * in case the firmware security is enabled 353 */ 354 hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0); 355 356 return rc; 357} 358 359/** 360 * hl_get_pci_memory_region() - get PCI region for given address 361 * @hdev: Pointer to hl_device structure. 362 * @addr: device address 363 * 364 * @return region index on success, otherwise PCI_REGION_NUMBER (invalid 365 * region index) 366 */ 367enum pci_region hl_get_pci_memory_region(struct hl_device *hdev, u64 addr) 368{ 369 int i; 370 371 for (i = 0 ; i < PCI_REGION_NUMBER ; i++) { 372 struct pci_mem_region *region = &hdev->pci_mem_region[i]; 373 374 if (!region->used) 375 continue; 376 377 if ((addr >= region->region_base) && 378 (addr < region->region_base + region->region_size)) 379 return i; 380 } 381 382 return PCI_REGION_NUMBER; 383} 384 385/** 386 * hl_pci_init() - PCI initialization code. 387 * @hdev: Pointer to hl_device structure. 388 * 389 * Set DMA masks, initialize the PCI controller and map the PCI BARs. 390 * 391 * Return: 0 on success, non-zero for failure. 392 */ 393int hl_pci_init(struct hl_device *hdev) 394{ 395 struct asic_fixed_properties *prop = &hdev->asic_prop; 396 struct pci_dev *pdev = hdev->pdev; 397 int rc; 398 399 if (hdev->reset_pcilink) 400 hl_pci_reset_link_through_bridge(hdev); 401 402 rc = pci_enable_device_mem(pdev); 403 if (rc) { 404 dev_err(hdev->dev, "can't enable PCI device\n"); 405 return rc; 406 } 407 408 pci_set_master(pdev); 409 410 rc = hdev->asic_funcs->pci_bars_map(hdev); 411 if (rc) { 412 dev_err(hdev->dev, "Failed to map PCI BAR addresses\n"); 413 goto disable_device; 414 } 415 416 rc = hdev->asic_funcs->init_iatu(hdev); 417 if (rc) { 418 dev_err(hdev->dev, "PCI controller was not initialized successfully\n"); 419 goto unmap_pci_bars; 420 } 421 422 /* Driver must sleep in order for FW to finish the iATU configuration */ 423 if (hdev->asic_prop.iatu_done_by_fw) 424 usleep_range(2000, 3000); 425 426 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(prop->dma_mask)); 427 if (rc) { 428 dev_err(hdev->dev, 429 "Failed to set dma mask to %d bits, error %d\n", 430 prop->dma_mask, rc); 431 goto unmap_pci_bars; 432 } 433 434 dma_set_max_seg_size(&pdev->dev, U32_MAX); 435 436 return 0; 437 438unmap_pci_bars: 439 hl_pci_bars_unmap(hdev); 440disable_device: 441 pci_clear_master(pdev); 442 pci_disable_device(pdev); 443 444 return rc; 445} 446 447/** 448 * hl_fw_fini() - PCI finalization code. 449 * @hdev: Pointer to hl_device structure 450 * 451 * Unmap PCI bars and disable PCI device. 452 */ 453void hl_pci_fini(struct hl_device *hdev) 454{ 455 hl_pci_bars_unmap(hdev); 456 457 pci_clear_master(hdev->pdev); 458 pci_disable_device(hdev->pdev); 459}