fsl_pamu.c (26182B)
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * 4 * Copyright (C) 2013 Freescale Semiconductor, Inc. 5 */ 6 7#define pr_fmt(fmt) "fsl-pamu: %s: " fmt, __func__ 8 9#include "fsl_pamu.h" 10 11#include <linux/fsl/guts.h> 12#include <linux/interrupt.h> 13#include <linux/genalloc.h> 14#include <linux/of_address.h> 15#include <linux/of_irq.h> 16#include <linux/platform_device.h> 17 18#include <asm/mpc85xx.h> 19 20/* define indexes for each operation mapping scenario */ 21#define OMI_QMAN 0x00 22#define OMI_FMAN 0x01 23#define OMI_QMAN_PRIV 0x02 24#define OMI_CAAM 0x03 25 26#define make64(high, low) (((u64)(high) << 32) | (low)) 27 28struct pamu_isr_data { 29 void __iomem *pamu_reg_base; /* Base address of PAMU regs */ 30 unsigned int count; /* The number of PAMUs */ 31}; 32 33static struct paace *ppaact; 34static struct paace *spaact; 35 36static bool probed; /* Has PAMU been probed? */ 37 38/* 39 * Table for matching compatible strings, for device tree 40 * guts node, for QorIQ SOCs. 41 * "fsl,qoriq-device-config-2.0" corresponds to T4 & B4 42 * SOCs. For the older SOCs "fsl,qoriq-device-config-1.0" 43 * string would be used. 44 */ 45static const struct of_device_id guts_device_ids[] = { 46 { .compatible = "fsl,qoriq-device-config-1.0", }, 47 { .compatible = "fsl,qoriq-device-config-2.0", }, 48 {} 49}; 50 51/* 52 * Table for matching compatible strings, for device tree 53 * L3 cache controller node. 54 * "fsl,t4240-l3-cache-controller" corresponds to T4, 55 * "fsl,b4860-l3-cache-controller" corresponds to B4 & 56 * "fsl,p4080-l3-cache-controller" corresponds to other, 57 * SOCs. 58 */ 59static const struct of_device_id l3_device_ids[] = { 60 { .compatible = "fsl,t4240-l3-cache-controller", }, 61 { .compatible = "fsl,b4860-l3-cache-controller", }, 62 { .compatible = "fsl,p4080-l3-cache-controller", }, 63 {} 64}; 65 66/* maximum subwindows permitted per liodn */ 67static u32 max_subwindow_count; 68 69/** 70 * pamu_get_ppaace() - Return the primary PACCE 71 * @liodn: liodn PAACT index for desired PAACE 72 * 73 * Returns the ppace pointer upon success else return 74 * null. 75 */ 76static struct paace *pamu_get_ppaace(int liodn) 77{ 78 if (!ppaact || liodn >= PAACE_NUMBER_ENTRIES) { 79 pr_debug("PPAACT doesn't exist\n"); 80 return NULL; 81 } 82 83 return &ppaact[liodn]; 84} 85 86/** 87 * pamu_enable_liodn() - Set valid bit of PACCE 88 * @liodn: liodn PAACT index for desired PAACE 89 * 90 * Returns 0 upon success else error code < 0 returned 91 */ 92int pamu_enable_liodn(int liodn) 93{ 94 struct paace *ppaace; 95 96 ppaace = pamu_get_ppaace(liodn); 97 if (!ppaace) { 98 pr_debug("Invalid primary paace entry\n"); 99 return -ENOENT; 100 } 101 102 if (!get_bf(ppaace->addr_bitfields, PPAACE_AF_WSE)) { 103 pr_debug("liodn %d not configured\n", liodn); 104 return -EINVAL; 105 } 106 107 /* Ensure that all other stores to the ppaace complete first */ 108 mb(); 109 110 set_bf(ppaace->addr_bitfields, PAACE_AF_V, PAACE_V_VALID); 111 mb(); 112 113 return 0; 114} 115 116/** 117 * pamu_disable_liodn() - Clears valid bit of PACCE 118 * @liodn: liodn PAACT index for desired PAACE 119 * 120 * Returns 0 upon success else error code < 0 returned 121 */ 122int pamu_disable_liodn(int liodn) 123{ 124 struct paace *ppaace; 125 126 ppaace = pamu_get_ppaace(liodn); 127 if (!ppaace) { 128 pr_debug("Invalid primary paace entry\n"); 129 return -ENOENT; 130 } 131 132 set_bf(ppaace->addr_bitfields, PAACE_AF_V, PAACE_V_INVALID); 133 mb(); 134 135 return 0; 136} 137 138/* Derive the window size encoding for a particular PAACE entry */ 139static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size) 140{ 141 /* Bug if not a power of 2 */ 142 BUG_ON(addrspace_size & (addrspace_size - 1)); 143 144 /* window size is 2^(WSE+1) bytes */ 145 return fls64(addrspace_size) - 2; 146} 147 148/* 149 * Set the PAACE type as primary and set the coherency required domain 150 * attribute 151 */ 152static void pamu_init_ppaace(struct paace *ppaace) 153{ 154 set_bf(ppaace->addr_bitfields, PAACE_AF_PT, PAACE_PT_PRIMARY); 155 156 set_bf(ppaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR, 157 PAACE_M_COHERENCE_REQ); 158} 159 160/* 161 * Function used for updating stash destination for the coressponding 162 * LIODN. 163 */ 164int pamu_update_paace_stash(int liodn, u32 value) 165{ 166 struct paace *paace; 167 168 paace = pamu_get_ppaace(liodn); 169 if (!paace) { 170 pr_debug("Invalid liodn entry\n"); 171 return -ENOENT; 172 } 173 set_bf(paace->impl_attr, PAACE_IA_CID, value); 174 175 mb(); 176 177 return 0; 178} 179 180/** 181 * pamu_config_paace() - Sets up PPAACE entry for specified liodn 182 * 183 * @liodn: Logical IO device number 184 * @omi: Operation mapping index -- if ~omi == 0 then omi not defined 185 * @stashid: cache stash id for associated cpu -- if ~stashid == 0 then 186 * stashid not defined 187 * @prot: window permissions 188 * 189 * Returns 0 upon success else error code < 0 returned 190 */ 191int pamu_config_ppaace(int liodn, u32 omi, u32 stashid, int prot) 192{ 193 struct paace *ppaace; 194 195 ppaace = pamu_get_ppaace(liodn); 196 if (!ppaace) 197 return -ENOENT; 198 199 /* window size is 2^(WSE+1) bytes */ 200 set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE, 201 map_addrspace_size_to_wse(1ULL << 36)); 202 203 pamu_init_ppaace(ppaace); 204 205 ppaace->wbah = 0; 206 set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL, 0); 207 208 /* set up operation mapping if it's configured */ 209 if (omi < OME_NUMBER_ENTRIES) { 210 set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED); 211 ppaace->op_encode.index_ot.omi = omi; 212 } else if (~omi != 0) { 213 pr_debug("bad operation mapping index: %d\n", omi); 214 return -EINVAL; 215 } 216 217 /* configure stash id */ 218 if (~stashid != 0) 219 set_bf(ppaace->impl_attr, PAACE_IA_CID, stashid); 220 221 set_bf(ppaace->impl_attr, PAACE_IA_ATM, PAACE_ATM_WINDOW_XLATE); 222 ppaace->twbah = 0; 223 set_bf(ppaace->win_bitfields, PAACE_WIN_TWBAL, 0); 224 set_bf(ppaace->addr_bitfields, PAACE_AF_AP, prot); 225 set_bf(ppaace->impl_attr, PAACE_IA_WCE, 0); 226 set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0); 227 mb(); 228 229 return 0; 230} 231 232/** 233 * get_ome_index() - Returns the index in the operation mapping table 234 * for device. 235 * @*omi_index: pointer for storing the index value 236 * 237 */ 238void get_ome_index(u32 *omi_index, struct device *dev) 239{ 240 if (of_device_is_compatible(dev->of_node, "fsl,qman-portal")) 241 *omi_index = OMI_QMAN; 242 if (of_device_is_compatible(dev->of_node, "fsl,qman")) 243 *omi_index = OMI_QMAN_PRIV; 244} 245 246/** 247 * get_stash_id - Returns stash destination id corresponding to a 248 * cache type and vcpu. 249 * @stash_dest_hint: L1, L2 or L3 250 * @vcpu: vpcu target for a particular cache type. 251 * 252 * Returs stash on success or ~(u32)0 on failure. 253 * 254 */ 255u32 get_stash_id(u32 stash_dest_hint, u32 vcpu) 256{ 257 const u32 *prop; 258 struct device_node *node; 259 u32 cache_level; 260 int len, found = 0; 261 int i; 262 263 /* Fastpath, exit early if L3/CPC cache is target for stashing */ 264 if (stash_dest_hint == PAMU_ATTR_CACHE_L3) { 265 node = of_find_matching_node(NULL, l3_device_ids); 266 if (node) { 267 prop = of_get_property(node, "cache-stash-id", NULL); 268 if (!prop) { 269 pr_debug("missing cache-stash-id at %pOF\n", 270 node); 271 of_node_put(node); 272 return ~(u32)0; 273 } 274 of_node_put(node); 275 return be32_to_cpup(prop); 276 } 277 return ~(u32)0; 278 } 279 280 for_each_of_cpu_node(node) { 281 prop = of_get_property(node, "reg", &len); 282 for (i = 0; i < len / sizeof(u32); i++) { 283 if (be32_to_cpup(&prop[i]) == vcpu) { 284 found = 1; 285 goto found_cpu_node; 286 } 287 } 288 } 289found_cpu_node: 290 291 /* find the hwnode that represents the cache */ 292 for (cache_level = PAMU_ATTR_CACHE_L1; (cache_level < PAMU_ATTR_CACHE_L3) && found; cache_level++) { 293 if (stash_dest_hint == cache_level) { 294 prop = of_get_property(node, "cache-stash-id", NULL); 295 if (!prop) { 296 pr_debug("missing cache-stash-id at %pOF\n", 297 node); 298 of_node_put(node); 299 return ~(u32)0; 300 } 301 of_node_put(node); 302 return be32_to_cpup(prop); 303 } 304 305 prop = of_get_property(node, "next-level-cache", NULL); 306 if (!prop) { 307 pr_debug("can't find next-level-cache at %pOF\n", node); 308 of_node_put(node); 309 return ~(u32)0; /* can't traverse any further */ 310 } 311 of_node_put(node); 312 313 /* advance to next node in cache hierarchy */ 314 node = of_find_node_by_phandle(*prop); 315 if (!node) { 316 pr_debug("Invalid node for cache hierarchy\n"); 317 return ~(u32)0; 318 } 319 } 320 321 pr_debug("stash dest not found for %d on vcpu %d\n", 322 stash_dest_hint, vcpu); 323 return ~(u32)0; 324} 325 326/* Identify if the PAACT table entry belongs to QMAN, BMAN or QMAN Portal */ 327#define QMAN_PAACE 1 328#define QMAN_PORTAL_PAACE 2 329#define BMAN_PAACE 3 330 331/** 332 * Setup operation mapping and stash destinations for QMAN and QMAN portal. 333 * Memory accesses to QMAN and BMAN private memory need not be coherent, so 334 * clear the PAACE entry coherency attribute for them. 335 */ 336static void setup_qbman_paace(struct paace *ppaace, int paace_type) 337{ 338 switch (paace_type) { 339 case QMAN_PAACE: 340 set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED); 341 ppaace->op_encode.index_ot.omi = OMI_QMAN_PRIV; 342 /* setup QMAN Private data stashing for the L3 cache */ 343 set_bf(ppaace->impl_attr, PAACE_IA_CID, get_stash_id(PAMU_ATTR_CACHE_L3, 0)); 344 set_bf(ppaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR, 345 0); 346 break; 347 case QMAN_PORTAL_PAACE: 348 set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED); 349 ppaace->op_encode.index_ot.omi = OMI_QMAN; 350 /* Set DQRR and Frame stashing for the L3 cache */ 351 set_bf(ppaace->impl_attr, PAACE_IA_CID, get_stash_id(PAMU_ATTR_CACHE_L3, 0)); 352 break; 353 case BMAN_PAACE: 354 set_bf(ppaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR, 355 0); 356 break; 357 } 358} 359 360/** 361 * Setup the operation mapping table for various devices. This is a static 362 * table where each table index corresponds to a particular device. PAMU uses 363 * this table to translate device transaction to appropriate corenet 364 * transaction. 365 */ 366static void setup_omt(struct ome *omt) 367{ 368 struct ome *ome; 369 370 /* Configure OMI_QMAN */ 371 ome = &omt[OMI_QMAN]; 372 373 ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_READ; 374 ome->moe[IOE_EREAD0_IDX] = EOE_VALID | EOE_RSA; 375 ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE; 376 ome->moe[IOE_EWRITE0_IDX] = EOE_VALID | EOE_WWSAO; 377 378 ome->moe[IOE_DIRECT0_IDX] = EOE_VALID | EOE_LDEC; 379 ome->moe[IOE_DIRECT1_IDX] = EOE_VALID | EOE_LDECPE; 380 381 /* Configure OMI_FMAN */ 382 ome = &omt[OMI_FMAN]; 383 ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_READI; 384 ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE; 385 386 /* Configure OMI_QMAN private */ 387 ome = &omt[OMI_QMAN_PRIV]; 388 ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_READ; 389 ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE; 390 ome->moe[IOE_EREAD0_IDX] = EOE_VALID | EOE_RSA; 391 ome->moe[IOE_EWRITE0_IDX] = EOE_VALID | EOE_WWSA; 392 393 /* Configure OMI_CAAM */ 394 ome = &omt[OMI_CAAM]; 395 ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_READI; 396 ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE; 397} 398 399/* 400 * Get the maximum number of PAACT table entries 401 * and subwindows supported by PAMU 402 */ 403static void get_pamu_cap_values(unsigned long pamu_reg_base) 404{ 405 u32 pc_val; 406 407 pc_val = in_be32((u32 *)(pamu_reg_base + PAMU_PC3)); 408 /* Maximum number of subwindows per liodn */ 409 max_subwindow_count = 1 << (1 + PAMU_PC3_MWCE(pc_val)); 410} 411 412/* Setup PAMU registers pointing to PAACT, SPAACT and OMT */ 413static int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size, 414 phys_addr_t ppaact_phys, phys_addr_t spaact_phys, 415 phys_addr_t omt_phys) 416{ 417 u32 *pc; 418 struct pamu_mmap_regs *pamu_regs; 419 420 pc = (u32 *) (pamu_reg_base + PAMU_PC); 421 pamu_regs = (struct pamu_mmap_regs *) 422 (pamu_reg_base + PAMU_MMAP_REGS_BASE); 423 424 /* set up pointers to corenet control blocks */ 425 426 out_be32(&pamu_regs->ppbah, upper_32_bits(ppaact_phys)); 427 out_be32(&pamu_regs->ppbal, lower_32_bits(ppaact_phys)); 428 ppaact_phys = ppaact_phys + PAACT_SIZE; 429 out_be32(&pamu_regs->pplah, upper_32_bits(ppaact_phys)); 430 out_be32(&pamu_regs->pplal, lower_32_bits(ppaact_phys)); 431 432 out_be32(&pamu_regs->spbah, upper_32_bits(spaact_phys)); 433 out_be32(&pamu_regs->spbal, lower_32_bits(spaact_phys)); 434 spaact_phys = spaact_phys + SPAACT_SIZE; 435 out_be32(&pamu_regs->splah, upper_32_bits(spaact_phys)); 436 out_be32(&pamu_regs->splal, lower_32_bits(spaact_phys)); 437 438 out_be32(&pamu_regs->obah, upper_32_bits(omt_phys)); 439 out_be32(&pamu_regs->obal, lower_32_bits(omt_phys)); 440 omt_phys = omt_phys + OMT_SIZE; 441 out_be32(&pamu_regs->olah, upper_32_bits(omt_phys)); 442 out_be32(&pamu_regs->olal, lower_32_bits(omt_phys)); 443 444 /* 445 * set PAMU enable bit, 446 * allow ppaact & omt to be cached 447 * & enable PAMU access violation interrupts. 448 */ 449 450 out_be32((u32 *)(pamu_reg_base + PAMU_PICS), 451 PAMU_ACCESS_VIOLATION_ENABLE); 452 out_be32(pc, PAMU_PC_PE | PAMU_PC_OCE | PAMU_PC_SPCC | PAMU_PC_PPCC); 453 return 0; 454} 455 456/* Enable all device LIODNS */ 457static void setup_liodns(void) 458{ 459 int i, len; 460 struct paace *ppaace; 461 struct device_node *node = NULL; 462 const u32 *prop; 463 464 for_each_node_with_property(node, "fsl,liodn") { 465 prop = of_get_property(node, "fsl,liodn", &len); 466 for (i = 0; i < len / sizeof(u32); i++) { 467 int liodn; 468 469 liodn = be32_to_cpup(&prop[i]); 470 if (liodn >= PAACE_NUMBER_ENTRIES) { 471 pr_debug("Invalid LIODN value %d\n", liodn); 472 continue; 473 } 474 ppaace = pamu_get_ppaace(liodn); 475 pamu_init_ppaace(ppaace); 476 /* window size is 2^(WSE+1) bytes */ 477 set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE, 35); 478 ppaace->wbah = 0; 479 set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL, 0); 480 set_bf(ppaace->impl_attr, PAACE_IA_ATM, 481 PAACE_ATM_NO_XLATE); 482 set_bf(ppaace->addr_bitfields, PAACE_AF_AP, 483 PAACE_AP_PERMS_ALL); 484 if (of_device_is_compatible(node, "fsl,qman-portal")) 485 setup_qbman_paace(ppaace, QMAN_PORTAL_PAACE); 486 if (of_device_is_compatible(node, "fsl,qman")) 487 setup_qbman_paace(ppaace, QMAN_PAACE); 488 if (of_device_is_compatible(node, "fsl,bman")) 489 setup_qbman_paace(ppaace, BMAN_PAACE); 490 mb(); 491 pamu_enable_liodn(liodn); 492 } 493 } 494} 495 496static irqreturn_t pamu_av_isr(int irq, void *arg) 497{ 498 struct pamu_isr_data *data = arg; 499 phys_addr_t phys; 500 unsigned int i, j, ret; 501 502 pr_emerg("access violation interrupt\n"); 503 504 for (i = 0; i < data->count; i++) { 505 void __iomem *p = data->pamu_reg_base + i * PAMU_OFFSET; 506 u32 pics = in_be32(p + PAMU_PICS); 507 508 if (pics & PAMU_ACCESS_VIOLATION_STAT) { 509 u32 avs1 = in_be32(p + PAMU_AVS1); 510 struct paace *paace; 511 512 pr_emerg("POES1=%08x\n", in_be32(p + PAMU_POES1)); 513 pr_emerg("POES2=%08x\n", in_be32(p + PAMU_POES2)); 514 pr_emerg("AVS1=%08x\n", avs1); 515 pr_emerg("AVS2=%08x\n", in_be32(p + PAMU_AVS2)); 516 pr_emerg("AVA=%016llx\n", 517 make64(in_be32(p + PAMU_AVAH), 518 in_be32(p + PAMU_AVAL))); 519 pr_emerg("UDAD=%08x\n", in_be32(p + PAMU_UDAD)); 520 pr_emerg("POEA=%016llx\n", 521 make64(in_be32(p + PAMU_POEAH), 522 in_be32(p + PAMU_POEAL))); 523 524 phys = make64(in_be32(p + PAMU_POEAH), 525 in_be32(p + PAMU_POEAL)); 526 527 /* Assume that POEA points to a PAACE */ 528 if (phys) { 529 u32 *paace = phys_to_virt(phys); 530 531 /* Only the first four words are relevant */ 532 for (j = 0; j < 4; j++) 533 pr_emerg("PAACE[%u]=%08x\n", 534 j, in_be32(paace + j)); 535 } 536 537 /* clear access violation condition */ 538 out_be32(p + PAMU_AVS1, avs1 & PAMU_AV_MASK); 539 paace = pamu_get_ppaace(avs1 >> PAMU_AVS1_LIODN_SHIFT); 540 BUG_ON(!paace); 541 /* check if we got a violation for a disabled LIODN */ 542 if (!get_bf(paace->addr_bitfields, PAACE_AF_V)) { 543 /* 544 * As per hardware erratum A-003638, access 545 * violation can be reported for a disabled 546 * LIODN. If we hit that condition, disable 547 * access violation reporting. 548 */ 549 pics &= ~PAMU_ACCESS_VIOLATION_ENABLE; 550 } else { 551 /* Disable the LIODN */ 552 ret = pamu_disable_liodn(avs1 >> PAMU_AVS1_LIODN_SHIFT); 553 BUG_ON(ret); 554 pr_emerg("Disabling liodn %x\n", 555 avs1 >> PAMU_AVS1_LIODN_SHIFT); 556 } 557 out_be32((p + PAMU_PICS), pics); 558 } 559 } 560 561 return IRQ_HANDLED; 562} 563 564#define LAWAR_EN 0x80000000 565#define LAWAR_TARGET_MASK 0x0FF00000 566#define LAWAR_TARGET_SHIFT 20 567#define LAWAR_SIZE_MASK 0x0000003F 568#define LAWAR_CSDID_MASK 0x000FF000 569#define LAWAR_CSDID_SHIFT 12 570 571#define LAW_SIZE_4K 0xb 572 573struct ccsr_law { 574 u32 lawbarh; /* LAWn base address high */ 575 u32 lawbarl; /* LAWn base address low */ 576 u32 lawar; /* LAWn attributes */ 577 u32 reserved; 578}; 579 580/* 581 * Create a coherence subdomain for a given memory block. 582 */ 583static int create_csd(phys_addr_t phys, size_t size, u32 csd_port_id) 584{ 585 struct device_node *np; 586 const __be32 *iprop; 587 void __iomem *lac = NULL; /* Local Access Control registers */ 588 struct ccsr_law __iomem *law; 589 void __iomem *ccm = NULL; 590 u32 __iomem *csdids; 591 unsigned int i, num_laws, num_csds; 592 u32 law_target = 0; 593 u32 csd_id = 0; 594 int ret = 0; 595 596 np = of_find_compatible_node(NULL, NULL, "fsl,corenet-law"); 597 if (!np) 598 return -ENODEV; 599 600 iprop = of_get_property(np, "fsl,num-laws", NULL); 601 if (!iprop) { 602 ret = -ENODEV; 603 goto error; 604 } 605 606 num_laws = be32_to_cpup(iprop); 607 if (!num_laws) { 608 ret = -ENODEV; 609 goto error; 610 } 611 612 lac = of_iomap(np, 0); 613 if (!lac) { 614 ret = -ENODEV; 615 goto error; 616 } 617 618 /* LAW registers are at offset 0xC00 */ 619 law = lac + 0xC00; 620 621 of_node_put(np); 622 623 np = of_find_compatible_node(NULL, NULL, "fsl,corenet-cf"); 624 if (!np) { 625 ret = -ENODEV; 626 goto error; 627 } 628 629 iprop = of_get_property(np, "fsl,ccf-num-csdids", NULL); 630 if (!iprop) { 631 ret = -ENODEV; 632 goto error; 633 } 634 635 num_csds = be32_to_cpup(iprop); 636 if (!num_csds) { 637 ret = -ENODEV; 638 goto error; 639 } 640 641 ccm = of_iomap(np, 0); 642 if (!ccm) { 643 ret = -ENOMEM; 644 goto error; 645 } 646 647 /* The undocumented CSDID registers are at offset 0x600 */ 648 csdids = ccm + 0x600; 649 650 of_node_put(np); 651 np = NULL; 652 653 /* Find an unused coherence subdomain ID */ 654 for (csd_id = 0; csd_id < num_csds; csd_id++) { 655 if (!csdids[csd_id]) 656 break; 657 } 658 659 /* Store the Port ID in the (undocumented) proper CIDMRxx register */ 660 csdids[csd_id] = csd_port_id; 661 662 /* Find the DDR LAW that maps to our buffer. */ 663 for (i = 0; i < num_laws; i++) { 664 if (law[i].lawar & LAWAR_EN) { 665 phys_addr_t law_start, law_end; 666 667 law_start = make64(law[i].lawbarh, law[i].lawbarl); 668 law_end = law_start + 669 (2ULL << (law[i].lawar & LAWAR_SIZE_MASK)); 670 671 if (law_start <= phys && phys < law_end) { 672 law_target = law[i].lawar & LAWAR_TARGET_MASK; 673 break; 674 } 675 } 676 } 677 678 if (i == 0 || i == num_laws) { 679 /* This should never happen */ 680 ret = -ENOENT; 681 goto error; 682 } 683 684 /* Find a free LAW entry */ 685 while (law[--i].lawar & LAWAR_EN) { 686 if (i == 0) { 687 /* No higher priority LAW slots available */ 688 ret = -ENOENT; 689 goto error; 690 } 691 } 692 693 law[i].lawbarh = upper_32_bits(phys); 694 law[i].lawbarl = lower_32_bits(phys); 695 wmb(); 696 law[i].lawar = LAWAR_EN | law_target | (csd_id << LAWAR_CSDID_SHIFT) | 697 (LAW_SIZE_4K + get_order(size)); 698 wmb(); 699 700error: 701 if (ccm) 702 iounmap(ccm); 703 704 if (lac) 705 iounmap(lac); 706 707 if (np) 708 of_node_put(np); 709 710 return ret; 711} 712 713/* 714 * Table of SVRs and the corresponding PORT_ID values. Port ID corresponds to a 715 * bit map of snoopers for a given range of memory mapped by a LAW. 716 * 717 * All future CoreNet-enabled SOCs will have this erratum(A-004510) fixed, so this 718 * table should never need to be updated. SVRs are guaranteed to be unique, so 719 * there is no worry that a future SOC will inadvertently have one of these 720 * values. 721 */ 722static const struct { 723 u32 svr; 724 u32 port_id; 725} port_id_map[] = { 726 {(SVR_P2040 << 8) | 0x10, 0xFF000000}, /* P2040 1.0 */ 727 {(SVR_P2040 << 8) | 0x11, 0xFF000000}, /* P2040 1.1 */ 728 {(SVR_P2041 << 8) | 0x10, 0xFF000000}, /* P2041 1.0 */ 729 {(SVR_P2041 << 8) | 0x11, 0xFF000000}, /* P2041 1.1 */ 730 {(SVR_P3041 << 8) | 0x10, 0xFF000000}, /* P3041 1.0 */ 731 {(SVR_P3041 << 8) | 0x11, 0xFF000000}, /* P3041 1.1 */ 732 {(SVR_P4040 << 8) | 0x20, 0xFFF80000}, /* P4040 2.0 */ 733 {(SVR_P4080 << 8) | 0x20, 0xFFF80000}, /* P4080 2.0 */ 734 {(SVR_P5010 << 8) | 0x10, 0xFC000000}, /* P5010 1.0 */ 735 {(SVR_P5010 << 8) | 0x20, 0xFC000000}, /* P5010 2.0 */ 736 {(SVR_P5020 << 8) | 0x10, 0xFC000000}, /* P5020 1.0 */ 737 {(SVR_P5021 << 8) | 0x10, 0xFF800000}, /* P5021 1.0 */ 738 {(SVR_P5040 << 8) | 0x10, 0xFF800000}, /* P5040 1.0 */ 739}; 740 741#define SVR_SECURITY 0x80000 /* The Security (E) bit */ 742 743static int fsl_pamu_probe(struct platform_device *pdev) 744{ 745 struct device *dev = &pdev->dev; 746 void __iomem *pamu_regs = NULL; 747 struct ccsr_guts __iomem *guts_regs = NULL; 748 u32 pamubypenr, pamu_counter; 749 unsigned long pamu_reg_off; 750 unsigned long pamu_reg_base; 751 struct pamu_isr_data *data = NULL; 752 struct device_node *guts_node; 753 u64 size; 754 struct page *p; 755 int ret = 0; 756 int irq; 757 phys_addr_t ppaact_phys; 758 phys_addr_t spaact_phys; 759 struct ome *omt; 760 phys_addr_t omt_phys; 761 size_t mem_size = 0; 762 unsigned int order = 0; 763 u32 csd_port_id = 0; 764 unsigned i; 765 /* 766 * enumerate all PAMUs and allocate and setup PAMU tables 767 * for each of them, 768 * NOTE : All PAMUs share the same LIODN tables. 769 */ 770 771 if (WARN_ON(probed)) 772 return -EBUSY; 773 774 pamu_regs = of_iomap(dev->of_node, 0); 775 if (!pamu_regs) { 776 dev_err(dev, "ioremap of PAMU node failed\n"); 777 return -ENOMEM; 778 } 779 of_get_address(dev->of_node, 0, &size, NULL); 780 781 irq = irq_of_parse_and_map(dev->of_node, 0); 782 if (irq == NO_IRQ) { 783 dev_warn(dev, "no interrupts listed in PAMU node\n"); 784 goto error; 785 } 786 787 data = kzalloc(sizeof(*data), GFP_KERNEL); 788 if (!data) { 789 ret = -ENOMEM; 790 goto error; 791 } 792 data->pamu_reg_base = pamu_regs; 793 data->count = size / PAMU_OFFSET; 794 795 /* The ISR needs access to the regs, so we won't iounmap them */ 796 ret = request_irq(irq, pamu_av_isr, 0, "pamu", data); 797 if (ret < 0) { 798 dev_err(dev, "error %i installing ISR for irq %i\n", ret, irq); 799 goto error; 800 } 801 802 guts_node = of_find_matching_node(NULL, guts_device_ids); 803 if (!guts_node) { 804 dev_err(dev, "could not find GUTS node %pOF\n", dev->of_node); 805 ret = -ENODEV; 806 goto error; 807 } 808 809 guts_regs = of_iomap(guts_node, 0); 810 of_node_put(guts_node); 811 if (!guts_regs) { 812 dev_err(dev, "ioremap of GUTS node failed\n"); 813 ret = -ENODEV; 814 goto error; 815 } 816 817 /* read in the PAMU capability registers */ 818 get_pamu_cap_values((unsigned long)pamu_regs); 819 /* 820 * To simplify the allocation of a coherency domain, we allocate the 821 * PAACT and the OMT in the same memory buffer. Unfortunately, this 822 * wastes more memory compared to allocating the buffers separately. 823 */ 824 /* Determine how much memory we need */ 825 mem_size = (PAGE_SIZE << get_order(PAACT_SIZE)) + 826 (PAGE_SIZE << get_order(SPAACT_SIZE)) + 827 (PAGE_SIZE << get_order(OMT_SIZE)); 828 order = get_order(mem_size); 829 830 p = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); 831 if (!p) { 832 dev_err(dev, "unable to allocate PAACT/SPAACT/OMT block\n"); 833 ret = -ENOMEM; 834 goto error; 835 } 836 837 ppaact = page_address(p); 838 ppaact_phys = page_to_phys(p); 839 840 /* Make sure the memory is naturally aligned */ 841 if (ppaact_phys & ((PAGE_SIZE << order) - 1)) { 842 dev_err(dev, "PAACT/OMT block is unaligned\n"); 843 ret = -ENOMEM; 844 goto error; 845 } 846 847 spaact = (void *)ppaact + (PAGE_SIZE << get_order(PAACT_SIZE)); 848 omt = (void *)spaact + (PAGE_SIZE << get_order(SPAACT_SIZE)); 849 850 dev_dbg(dev, "ppaact virt=%p phys=%pa\n", ppaact, &ppaact_phys); 851 852 /* Check to see if we need to implement the work-around on this SOC */ 853 854 /* Determine the Port ID for our coherence subdomain */ 855 for (i = 0; i < ARRAY_SIZE(port_id_map); i++) { 856 if (port_id_map[i].svr == (mfspr(SPRN_SVR) & ~SVR_SECURITY)) { 857 csd_port_id = port_id_map[i].port_id; 858 dev_dbg(dev, "found matching SVR %08x\n", 859 port_id_map[i].svr); 860 break; 861 } 862 } 863 864 if (csd_port_id) { 865 dev_dbg(dev, "creating coherency subdomain at address %pa, size %zu, port id 0x%08x", 866 &ppaact_phys, mem_size, csd_port_id); 867 868 ret = create_csd(ppaact_phys, mem_size, csd_port_id); 869 if (ret) { 870 dev_err(dev, "could not create coherence subdomain\n"); 871 return ret; 872 } 873 } 874 875 spaact_phys = virt_to_phys(spaact); 876 omt_phys = virt_to_phys(omt); 877 878 pamubypenr = in_be32(&guts_regs->pamubypenr); 879 880 for (pamu_reg_off = 0, pamu_counter = 0x80000000; pamu_reg_off < size; 881 pamu_reg_off += PAMU_OFFSET, pamu_counter >>= 1) { 882 883 pamu_reg_base = (unsigned long)pamu_regs + pamu_reg_off; 884 setup_one_pamu(pamu_reg_base, pamu_reg_off, ppaact_phys, 885 spaact_phys, omt_phys); 886 /* Disable PAMU bypass for this PAMU */ 887 pamubypenr &= ~pamu_counter; 888 } 889 890 setup_omt(omt); 891 892 /* Enable all relevant PAMU(s) */ 893 out_be32(&guts_regs->pamubypenr, pamubypenr); 894 895 iounmap(guts_regs); 896 897 /* Enable DMA for the LIODNs in the device tree */ 898 899 setup_liodns(); 900 901 probed = true; 902 903 return 0; 904 905error: 906 if (irq != NO_IRQ) 907 free_irq(irq, data); 908 909 kfree_sensitive(data); 910 911 if (pamu_regs) 912 iounmap(pamu_regs); 913 914 if (guts_regs) 915 iounmap(guts_regs); 916 917 if (ppaact) 918 free_pages((unsigned long)ppaact, order); 919 920 ppaact = NULL; 921 922 return ret; 923} 924 925static struct platform_driver fsl_of_pamu_driver = { 926 .driver = { 927 .name = "fsl-of-pamu", 928 }, 929 .probe = fsl_pamu_probe, 930}; 931 932static __init int fsl_pamu_init(void) 933{ 934 struct platform_device *pdev = NULL; 935 struct device_node *np; 936 int ret; 937 938 /* 939 * The normal OF process calls the probe function at some 940 * indeterminate later time, after most drivers have loaded. This is 941 * too late for us, because PAMU clients (like the Qman driver) 942 * depend on PAMU being initialized early. 943 * 944 * So instead, we "manually" call our probe function by creating the 945 * platform devices ourselves. 946 */ 947 948 /* 949 * We assume that there is only one PAMU node in the device tree. A 950 * single PAMU node represents all of the PAMU devices in the SOC 951 * already. Everything else already makes that assumption, and the 952 * binding for the PAMU nodes doesn't allow for any parent-child 953 * relationships anyway. In other words, support for more than one 954 * PAMU node would require significant changes to a lot of code. 955 */ 956 957 np = of_find_compatible_node(NULL, NULL, "fsl,pamu"); 958 if (!np) { 959 pr_err("could not find a PAMU node\n"); 960 return -ENODEV; 961 } 962 963 ret = platform_driver_register(&fsl_of_pamu_driver); 964 if (ret) { 965 pr_err("could not register driver (err=%i)\n", ret); 966 goto error_driver_register; 967 } 968 969 pdev = platform_device_alloc("fsl-of-pamu", 0); 970 if (!pdev) { 971 pr_err("could not allocate device %pOF\n", np); 972 ret = -ENOMEM; 973 goto error_device_alloc; 974 } 975 pdev->dev.of_node = of_node_get(np); 976 977 ret = pamu_domain_init(); 978 if (ret) 979 goto error_device_add; 980 981 ret = platform_device_add(pdev); 982 if (ret) { 983 pr_err("could not add device %pOF (err=%i)\n", np, ret); 984 goto error_device_add; 985 } 986 987 return 0; 988 989error_device_add: 990 of_node_put(pdev->dev.of_node); 991 pdev->dev.of_node = NULL; 992 993 platform_device_put(pdev); 994 995error_device_alloc: 996 platform_driver_unregister(&fsl_of_pamu_driver); 997 998error_driver_register: 999 of_node_put(np); 1000 1001 return ret; 1002} 1003arch_initcall(fsl_pamu_init);