arm-cci.c (15878B)
1/* 2 * CCI cache coherent interconnect driver 3 * 4 * Copyright (C) 2013 ARM Ltd. 5 * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any 12 * kind, whether express or implied; without even the implied warranty 13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 */ 16 17#include <linux/arm-cci.h> 18#include <linux/io.h> 19#include <linux/module.h> 20#include <linux/of_address.h> 21#include <linux/of_platform.h> 22#include <linux/platform_device.h> 23#include <linux/slab.h> 24 25#include <asm/cacheflush.h> 26#include <asm/smp_plat.h> 27 28static void __iomem *cci_ctrl_base __ro_after_init; 29static unsigned long cci_ctrl_phys __ro_after_init; 30 31#ifdef CONFIG_ARM_CCI400_PORT_CTRL 32struct cci_nb_ports { 33 unsigned int nb_ace; 34 unsigned int nb_ace_lite; 35}; 36 37static const struct cci_nb_ports cci400_ports = { 38 .nb_ace = 2, 39 .nb_ace_lite = 3 40}; 41 42#define CCI400_PORTS_DATA (&cci400_ports) 43#else 44#define CCI400_PORTS_DATA (NULL) 45#endif 46 47static const struct of_device_id arm_cci_matches[] = { 48#ifdef CONFIG_ARM_CCI400_COMMON 49 {.compatible = "arm,cci-400", .data = CCI400_PORTS_DATA }, 50#endif 51#ifdef CONFIG_ARM_CCI5xx_PMU 52 { .compatible = "arm,cci-500", }, 53 { .compatible = "arm,cci-550", }, 54#endif 55 {}, 56}; 57 58static const struct of_dev_auxdata arm_cci_auxdata[] = { 59 OF_DEV_AUXDATA("arm,cci-400-pmu", 0, NULL, &cci_ctrl_base), 60 OF_DEV_AUXDATA("arm,cci-400-pmu,r0", 0, NULL, &cci_ctrl_base), 61 OF_DEV_AUXDATA("arm,cci-400-pmu,r1", 0, NULL, &cci_ctrl_base), 62 OF_DEV_AUXDATA("arm,cci-500-pmu,r0", 0, NULL, &cci_ctrl_base), 63 OF_DEV_AUXDATA("arm,cci-550-pmu,r0", 0, NULL, &cci_ctrl_base), 64 {} 65}; 66 67#define DRIVER_NAME "ARM-CCI" 68 69static int cci_platform_probe(struct platform_device *pdev) 70{ 71 if (!cci_probed()) 72 return -ENODEV; 73 74 return of_platform_populate(pdev->dev.of_node, NULL, 75 arm_cci_auxdata, &pdev->dev); 76} 77 78static struct platform_driver cci_platform_driver = { 79 .driver = { 80 .name = DRIVER_NAME, 81 .of_match_table = arm_cci_matches, 82 }, 83 .probe = cci_platform_probe, 84}; 85 86static int __init cci_platform_init(void) 87{ 88 return platform_driver_register(&cci_platform_driver); 89} 90 91#ifdef CONFIG_ARM_CCI400_PORT_CTRL 92 93#define CCI_PORT_CTRL 0x0 94#define CCI_CTRL_STATUS 0xc 95 96#define CCI_ENABLE_SNOOP_REQ 0x1 97#define CCI_ENABLE_DVM_REQ 0x2 98#define CCI_ENABLE_REQ (CCI_ENABLE_SNOOP_REQ | CCI_ENABLE_DVM_REQ) 99 100enum cci_ace_port_type { 101 ACE_INVALID_PORT = 0x0, 102 ACE_PORT, 103 ACE_LITE_PORT, 104}; 105 106struct cci_ace_port { 107 void __iomem *base; 108 unsigned long phys; 109 enum cci_ace_port_type type; 110 struct device_node *dn; 111}; 112 113static struct cci_ace_port *ports; 114static unsigned int nb_cci_ports; 115 116struct cpu_port { 117 u64 mpidr; 118 u32 port; 119}; 120 121/* 122 * Use the port MSB as valid flag, shift can be made dynamic 123 * by computing number of bits required for port indexes. 124 * Code disabling CCI cpu ports runs with D-cache invalidated 125 * and SCTLR bit clear so data accesses must be kept to a minimum 126 * to improve performance; for now shift is left static to 127 * avoid one more data access while disabling the CCI port. 128 */ 129#define PORT_VALID_SHIFT 31 130#define PORT_VALID (0x1 << PORT_VALID_SHIFT) 131 132static inline void init_cpu_port(struct cpu_port *port, u32 index, u64 mpidr) 133{ 134 port->port = PORT_VALID | index; 135 port->mpidr = mpidr; 136} 137 138static inline bool cpu_port_is_valid(struct cpu_port *port) 139{ 140 return !!(port->port & PORT_VALID); 141} 142 143static inline bool cpu_port_match(struct cpu_port *port, u64 mpidr) 144{ 145 return port->mpidr == (mpidr & MPIDR_HWID_BITMASK); 146} 147 148static struct cpu_port cpu_port[NR_CPUS]; 149 150/** 151 * __cci_ace_get_port - Function to retrieve the port index connected to 152 * a cpu or device. 153 * 154 * @dn: device node of the device to look-up 155 * @type: port type 156 * 157 * Return value: 158 * - CCI port index if success 159 * - -ENODEV if failure 160 */ 161static int __cci_ace_get_port(struct device_node *dn, int type) 162{ 163 int i; 164 bool ace_match; 165 struct device_node *cci_portn; 166 167 cci_portn = of_parse_phandle(dn, "cci-control-port", 0); 168 for (i = 0; i < nb_cci_ports; i++) { 169 ace_match = ports[i].type == type; 170 if (ace_match && cci_portn == ports[i].dn) 171 return i; 172 } 173 return -ENODEV; 174} 175 176int cci_ace_get_port(struct device_node *dn) 177{ 178 return __cci_ace_get_port(dn, ACE_LITE_PORT); 179} 180EXPORT_SYMBOL_GPL(cci_ace_get_port); 181 182static void cci_ace_init_ports(void) 183{ 184 int port, cpu; 185 struct device_node *cpun; 186 187 /* 188 * Port index look-up speeds up the function disabling ports by CPU, 189 * since the logical to port index mapping is done once and does 190 * not change after system boot. 191 * The stashed index array is initialized for all possible CPUs 192 * at probe time. 193 */ 194 for_each_possible_cpu(cpu) { 195 /* too early to use cpu->of_node */ 196 cpun = of_get_cpu_node(cpu, NULL); 197 198 if (WARN(!cpun, "Missing cpu device node\n")) 199 continue; 200 201 port = __cci_ace_get_port(cpun, ACE_PORT); 202 if (port < 0) 203 continue; 204 205 init_cpu_port(&cpu_port[cpu], port, cpu_logical_map(cpu)); 206 } 207 208 for_each_possible_cpu(cpu) { 209 WARN(!cpu_port_is_valid(&cpu_port[cpu]), 210 "CPU %u does not have an associated CCI port\n", 211 cpu); 212 } 213} 214/* 215 * Functions to enable/disable a CCI interconnect slave port 216 * 217 * They are called by low-level power management code to disable slave 218 * interfaces snoops and DVM broadcast. 219 * Since they may execute with cache data allocation disabled and 220 * after the caches have been cleaned and invalidated the functions provide 221 * no explicit locking since they may run with D-cache disabled, so normal 222 * cacheable kernel locks based on ldrex/strex may not work. 223 * Locking has to be provided by BSP implementations to ensure proper 224 * operations. 225 */ 226 227/** 228 * cci_port_control() - function to control a CCI port 229 * 230 * @port: index of the port to setup 231 * @enable: if true enables the port, if false disables it 232 */ 233static void notrace cci_port_control(unsigned int port, bool enable) 234{ 235 void __iomem *base = ports[port].base; 236 237 writel_relaxed(enable ? CCI_ENABLE_REQ : 0, base + CCI_PORT_CTRL); 238 /* 239 * This function is called from power down procedures 240 * and must not execute any instruction that might 241 * cause the processor to be put in a quiescent state 242 * (eg wfi). Hence, cpu_relax() can not be added to this 243 * read loop to optimize power, since it might hide possibly 244 * disruptive operations. 245 */ 246 while (readl_relaxed(cci_ctrl_base + CCI_CTRL_STATUS) & 0x1) 247 ; 248} 249 250/** 251 * cci_disable_port_by_cpu() - function to disable a CCI port by CPU 252 * reference 253 * 254 * @mpidr: mpidr of the CPU whose CCI port should be disabled 255 * 256 * Disabling a CCI port for a CPU implies disabling the CCI port 257 * controlling that CPU cluster. Code disabling CPU CCI ports 258 * must make sure that the CPU running the code is the last active CPU 259 * in the cluster ie all other CPUs are quiescent in a low power state. 260 * 261 * Return: 262 * 0 on success 263 * -ENODEV on port look-up failure 264 */ 265int notrace cci_disable_port_by_cpu(u64 mpidr) 266{ 267 int cpu; 268 bool is_valid; 269 for (cpu = 0; cpu < nr_cpu_ids; cpu++) { 270 is_valid = cpu_port_is_valid(&cpu_port[cpu]); 271 if (is_valid && cpu_port_match(&cpu_port[cpu], mpidr)) { 272 cci_port_control(cpu_port[cpu].port, false); 273 return 0; 274 } 275 } 276 return -ENODEV; 277} 278EXPORT_SYMBOL_GPL(cci_disable_port_by_cpu); 279 280/** 281 * cci_enable_port_for_self() - enable a CCI port for calling CPU 282 * 283 * Enabling a CCI port for the calling CPU implies enabling the CCI 284 * port controlling that CPU's cluster. Caller must make sure that the 285 * CPU running the code is the first active CPU in the cluster and all 286 * other CPUs are quiescent in a low power state or waiting for this CPU 287 * to complete the CCI initialization. 288 * 289 * Because this is called when the MMU is still off and with no stack, 290 * the code must be position independent and ideally rely on callee 291 * clobbered registers only. To achieve this we must code this function 292 * entirely in assembler. 293 * 294 * On success this returns with the proper CCI port enabled. In case of 295 * any failure this never returns as the inability to enable the CCI is 296 * fatal and there is no possible recovery at this stage. 297 */ 298asmlinkage void __naked cci_enable_port_for_self(void) 299{ 300 asm volatile ("\n" 301" .arch armv7-a\n" 302" mrc p15, 0, r0, c0, c0, 5 @ get MPIDR value \n" 303" and r0, r0, #"__stringify(MPIDR_HWID_BITMASK)" \n" 304" adr r1, 5f \n" 305" ldr r2, [r1] \n" 306" add r1, r1, r2 @ &cpu_port \n" 307" add ip, r1, %[sizeof_cpu_port] \n" 308 309 /* Loop over the cpu_port array looking for a matching MPIDR */ 310"1: ldr r2, [r1, %[offsetof_cpu_port_mpidr_lsb]] \n" 311" cmp r2, r0 @ compare MPIDR \n" 312" bne 2f \n" 313 314 /* Found a match, now test port validity */ 315" ldr r3, [r1, %[offsetof_cpu_port_port]] \n" 316" tst r3, #"__stringify(PORT_VALID)" \n" 317" bne 3f \n" 318 319 /* no match, loop with the next cpu_port entry */ 320"2: add r1, r1, %[sizeof_struct_cpu_port] \n" 321" cmp r1, ip @ done? \n" 322" blo 1b \n" 323 324 /* CCI port not found -- cheaply try to stall this CPU */ 325"cci_port_not_found: \n" 326" wfi \n" 327" wfe \n" 328" b cci_port_not_found \n" 329 330 /* Use matched port index to look up the corresponding ports entry */ 331"3: bic r3, r3, #"__stringify(PORT_VALID)" \n" 332" adr r0, 6f \n" 333" ldmia r0, {r1, r2} \n" 334" sub r1, r1, r0 @ virt - phys \n" 335" ldr r0, [r0, r2] @ *(&ports) \n" 336" mov r2, %[sizeof_struct_ace_port] \n" 337" mla r0, r2, r3, r0 @ &ports[index] \n" 338" sub r0, r0, r1 @ virt_to_phys() \n" 339 340 /* Enable the CCI port */ 341" ldr r0, [r0, %[offsetof_port_phys]] \n" 342" mov r3, %[cci_enable_req]\n" 343" str r3, [r0, #"__stringify(CCI_PORT_CTRL)"] \n" 344 345 /* poll the status reg for completion */ 346" adr r1, 7f \n" 347" ldr r0, [r1] \n" 348" ldr r0, [r0, r1] @ cci_ctrl_base \n" 349"4: ldr r1, [r0, #"__stringify(CCI_CTRL_STATUS)"] \n" 350" tst r1, %[cci_control_status_bits] \n" 351" bne 4b \n" 352 353" mov r0, #0 \n" 354" bx lr \n" 355 356" .align 2 \n" 357"5: .word cpu_port - . \n" 358"6: .word . \n" 359" .word ports - 6b \n" 360"7: .word cci_ctrl_phys - . \n" 361 : : 362 [sizeof_cpu_port] "i" (sizeof(cpu_port)), 363 [cci_enable_req] "i" cpu_to_le32(CCI_ENABLE_REQ), 364 [cci_control_status_bits] "i" cpu_to_le32(1), 365#ifndef __ARMEB__ 366 [offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)), 367#else 368 [offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)+4), 369#endif 370 [offsetof_cpu_port_port] "i" (offsetof(struct cpu_port, port)), 371 [sizeof_struct_cpu_port] "i" (sizeof(struct cpu_port)), 372 [sizeof_struct_ace_port] "i" (sizeof(struct cci_ace_port)), 373 [offsetof_port_phys] "i" (offsetof(struct cci_ace_port, phys)) ); 374} 375 376/** 377 * __cci_control_port_by_device() - function to control a CCI port by device 378 * reference 379 * 380 * @dn: device node pointer of the device whose CCI port should be 381 * controlled 382 * @enable: if true enables the port, if false disables it 383 * 384 * Return: 385 * 0 on success 386 * -ENODEV on port look-up failure 387 */ 388int notrace __cci_control_port_by_device(struct device_node *dn, bool enable) 389{ 390 int port; 391 392 if (!dn) 393 return -ENODEV; 394 395 port = __cci_ace_get_port(dn, ACE_LITE_PORT); 396 if (WARN_ONCE(port < 0, "node %pOF ACE lite port look-up failure\n", 397 dn)) 398 return -ENODEV; 399 cci_port_control(port, enable); 400 return 0; 401} 402EXPORT_SYMBOL_GPL(__cci_control_port_by_device); 403 404/** 405 * __cci_control_port_by_index() - function to control a CCI port by port index 406 * 407 * @port: port index previously retrieved with cci_ace_get_port() 408 * @enable: if true enables the port, if false disables it 409 * 410 * Return: 411 * 0 on success 412 * -ENODEV on port index out of range 413 * -EPERM if operation carried out on an ACE PORT 414 */ 415int notrace __cci_control_port_by_index(u32 port, bool enable) 416{ 417 if (port >= nb_cci_ports || ports[port].type == ACE_INVALID_PORT) 418 return -ENODEV; 419 /* 420 * CCI control for ports connected to CPUS is extremely fragile 421 * and must be made to go through a specific and controlled 422 * interface (ie cci_disable_port_by_cpu(); control by general purpose 423 * indexing is therefore disabled for ACE ports. 424 */ 425 if (ports[port].type == ACE_PORT) 426 return -EPERM; 427 428 cci_port_control(port, enable); 429 return 0; 430} 431EXPORT_SYMBOL_GPL(__cci_control_port_by_index); 432 433static const struct of_device_id arm_cci_ctrl_if_matches[] = { 434 {.compatible = "arm,cci-400-ctrl-if", }, 435 {}, 436}; 437 438static int cci_probe_ports(struct device_node *np) 439{ 440 struct cci_nb_ports const *cci_config; 441 int ret, i, nb_ace = 0, nb_ace_lite = 0; 442 struct device_node *cp; 443 struct resource res; 444 const char *match_str; 445 bool is_ace; 446 447 448 cci_config = of_match_node(arm_cci_matches, np)->data; 449 if (!cci_config) 450 return -ENODEV; 451 452 nb_cci_ports = cci_config->nb_ace + cci_config->nb_ace_lite; 453 454 ports = kcalloc(nb_cci_ports, sizeof(*ports), GFP_KERNEL); 455 if (!ports) 456 return -ENOMEM; 457 458 for_each_available_child_of_node(np, cp) { 459 if (!of_match_node(arm_cci_ctrl_if_matches, cp)) 460 continue; 461 462 i = nb_ace + nb_ace_lite; 463 464 if (i >= nb_cci_ports) 465 break; 466 467 if (of_property_read_string(cp, "interface-type", 468 &match_str)) { 469 WARN(1, "node %pOF missing interface-type property\n", 470 cp); 471 continue; 472 } 473 is_ace = strcmp(match_str, "ace") == 0; 474 if (!is_ace && strcmp(match_str, "ace-lite")) { 475 WARN(1, "node %pOF containing invalid interface-type property, skipping it\n", 476 cp); 477 continue; 478 } 479 480 ret = of_address_to_resource(cp, 0, &res); 481 if (!ret) { 482 ports[i].base = ioremap(res.start, resource_size(&res)); 483 ports[i].phys = res.start; 484 } 485 if (ret || !ports[i].base) { 486 WARN(1, "unable to ioremap CCI port %d\n", i); 487 continue; 488 } 489 490 if (is_ace) { 491 if (WARN_ON(nb_ace >= cci_config->nb_ace)) 492 continue; 493 ports[i].type = ACE_PORT; 494 ++nb_ace; 495 } else { 496 if (WARN_ON(nb_ace_lite >= cci_config->nb_ace_lite)) 497 continue; 498 ports[i].type = ACE_LITE_PORT; 499 ++nb_ace_lite; 500 } 501 ports[i].dn = cp; 502 } 503 504 /* 505 * If there is no CCI port that is under kernel control 506 * return early and report probe status. 507 */ 508 if (!nb_ace && !nb_ace_lite) 509 return -ENODEV; 510 511 /* initialize a stashed array of ACE ports to speed-up look-up */ 512 cci_ace_init_ports(); 513 514 /* 515 * Multi-cluster systems may need this data when non-coherent, during 516 * cluster power-up/power-down. Make sure it reaches main memory. 517 */ 518 sync_cache_w(&cci_ctrl_base); 519 sync_cache_w(&cci_ctrl_phys); 520 sync_cache_w(&ports); 521 sync_cache_w(&cpu_port); 522 __sync_cache_range_w(ports, sizeof(*ports) * nb_cci_ports); 523 pr_info("ARM CCI driver probed\n"); 524 525 return 0; 526} 527#else /* !CONFIG_ARM_CCI400_PORT_CTRL */ 528static inline int cci_probe_ports(struct device_node *np) 529{ 530 return 0; 531} 532#endif /* CONFIG_ARM_CCI400_PORT_CTRL */ 533 534static int cci_probe(void) 535{ 536 int ret; 537 struct device_node *np; 538 struct resource res; 539 540 np = of_find_matching_node(NULL, arm_cci_matches); 541 if (!of_device_is_available(np)) 542 return -ENODEV; 543 544 ret = of_address_to_resource(np, 0, &res); 545 if (!ret) { 546 cci_ctrl_base = ioremap(res.start, resource_size(&res)); 547 cci_ctrl_phys = res.start; 548 } 549 if (ret || !cci_ctrl_base) { 550 WARN(1, "unable to ioremap CCI ctrl\n"); 551 return -ENXIO; 552 } 553 554 return cci_probe_ports(np); 555} 556 557static int cci_init_status = -EAGAIN; 558static DEFINE_MUTEX(cci_probing); 559 560static int cci_init(void) 561{ 562 if (cci_init_status != -EAGAIN) 563 return cci_init_status; 564 565 mutex_lock(&cci_probing); 566 if (cci_init_status == -EAGAIN) 567 cci_init_status = cci_probe(); 568 mutex_unlock(&cci_probing); 569 return cci_init_status; 570} 571 572/* 573 * To sort out early init calls ordering a helper function is provided to 574 * check if the CCI driver has beed initialized. Function check if the driver 575 * has been initialized, if not it calls the init function that probes 576 * the driver and updates the return value. 577 */ 578bool cci_probed(void) 579{ 580 return cci_init() == 0; 581} 582EXPORT_SYMBOL_GPL(cci_probed); 583 584early_initcall(cci_init); 585core_initcall(cci_platform_init); 586MODULE_LICENSE("GPL"); 587MODULE_DESCRIPTION("ARM CCI support");