setup_64.c (17052B)
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * linux/arch/sparc64/kernel/setup.c 4 * 5 * Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu) 6 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 7 */ 8 9#include <linux/errno.h> 10#include <linux/sched.h> 11#include <linux/kernel.h> 12#include <linux/mm.h> 13#include <linux/stddef.h> 14#include <linux/unistd.h> 15#include <linux/ptrace.h> 16#include <asm/smp.h> 17#include <linux/user.h> 18#include <linux/screen_info.h> 19#include <linux/delay.h> 20#include <linux/fs.h> 21#include <linux/seq_file.h> 22#include <linux/syscalls.h> 23#include <linux/kdev_t.h> 24#include <linux/major.h> 25#include <linux/string.h> 26#include <linux/init.h> 27#include <linux/inet.h> 28#include <linux/console.h> 29#include <linux/root_dev.h> 30#include <linux/interrupt.h> 31#include <linux/cpu.h> 32#include <linux/initrd.h> 33#include <linux/module.h> 34#include <linux/start_kernel.h> 35#include <linux/memblock.h> 36#include <uapi/linux/mount.h> 37 38#include <asm/io.h> 39#include <asm/processor.h> 40#include <asm/oplib.h> 41#include <asm/page.h> 42#include <asm/idprom.h> 43#include <asm/head.h> 44#include <asm/starfire.h> 45#include <asm/mmu_context.h> 46#include <asm/timer.h> 47#include <asm/sections.h> 48#include <asm/setup.h> 49#include <asm/mmu.h> 50#include <asm/ns87303.h> 51#include <asm/btext.h> 52#include <asm/elf.h> 53#include <asm/mdesc.h> 54#include <asm/cacheflush.h> 55#include <asm/dma.h> 56#include <asm/irq.h> 57 58#ifdef CONFIG_IP_PNP 59#include <net/ipconfig.h> 60#endif 61 62#include "entry.h" 63#include "kernel.h" 64 65/* Used to synchronize accesses to NatSemi SUPER I/O chip configure 66 * operations in asm/ns87303.h 67 */ 68DEFINE_SPINLOCK(ns87303_lock); 69EXPORT_SYMBOL(ns87303_lock); 70 71struct screen_info screen_info = { 72 0, 0, /* orig-x, orig-y */ 73 0, /* unused */ 74 0, /* orig-video-page */ 75 0, /* orig-video-mode */ 76 128, /* orig-video-cols */ 77 0, 0, 0, /* unused, ega_bx, unused */ 78 54, /* orig-video-lines */ 79 0, /* orig-video-isVGA */ 80 16 /* orig-video-points */ 81}; 82 83static void 84prom_console_write(struct console *con, const char *s, unsigned int n) 85{ 86 prom_write(s, n); 87} 88 89/* Exported for mm/init.c:paging_init. */ 90unsigned long cmdline_memory_size = 0; 91 92static struct console prom_early_console = { 93 .name = "earlyprom", 94 .write = prom_console_write, 95 .flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME, 96 .index = -1, 97}; 98 99/* 100 * Process kernel command line switches that are specific to the 101 * SPARC or that require special low-level processing. 102 */ 103static void __init process_switch(char c) 104{ 105 switch (c) { 106 case 'd': 107 case 's': 108 break; 109 case 'h': 110 prom_printf("boot_flags_init: Halt!\n"); 111 prom_halt(); 112 break; 113 case 'p': 114 prom_early_console.flags &= ~CON_BOOT; 115 break; 116 case 'P': 117 /* Force UltraSPARC-III P-Cache on. */ 118 if (tlb_type != cheetah) { 119 printk("BOOT: Ignoring P-Cache force option.\n"); 120 break; 121 } 122 cheetah_pcache_forced_on = 1; 123 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); 124 cheetah_enable_pcache(); 125 break; 126 127 default: 128 printk("Unknown boot switch (-%c)\n", c); 129 break; 130 } 131} 132 133static void __init boot_flags_init(char *commands) 134{ 135 while (*commands) { 136 /* Move to the start of the next "argument". */ 137 while (*commands == ' ') 138 commands++; 139 140 /* Process any command switches, otherwise skip it. */ 141 if (*commands == '\0') 142 break; 143 if (*commands == '-') { 144 commands++; 145 while (*commands && *commands != ' ') 146 process_switch(*commands++); 147 continue; 148 } 149 if (!strncmp(commands, "mem=", 4)) 150 cmdline_memory_size = memparse(commands + 4, &commands); 151 152 while (*commands && *commands != ' ') 153 commands++; 154 } 155} 156 157extern unsigned short root_flags; 158extern unsigned short root_dev; 159extern unsigned short ram_flags; 160#define RAMDISK_IMAGE_START_MASK 0x07FF 161#define RAMDISK_PROMPT_FLAG 0x8000 162#define RAMDISK_LOAD_FLAG 0x4000 163 164extern int root_mountflags; 165 166char reboot_command[COMMAND_LINE_SIZE]; 167 168static void __init per_cpu_patch(void) 169{ 170 struct cpuid_patch_entry *p; 171 unsigned long ver; 172 int is_jbus; 173 174 if (tlb_type == spitfire && !this_is_starfire) 175 return; 176 177 is_jbus = 0; 178 if (tlb_type != hypervisor) { 179 __asm__ ("rdpr %%ver, %0" : "=r" (ver)); 180 is_jbus = ((ver >> 32UL) == __JALAPENO_ID || 181 (ver >> 32UL) == __SERRANO_ID); 182 } 183 184 p = &__cpuid_patch; 185 while (p < &__cpuid_patch_end) { 186 unsigned long addr = p->addr; 187 unsigned int *insns; 188 189 switch (tlb_type) { 190 case spitfire: 191 insns = &p->starfire[0]; 192 break; 193 case cheetah: 194 case cheetah_plus: 195 if (is_jbus) 196 insns = &p->cheetah_jbus[0]; 197 else 198 insns = &p->cheetah_safari[0]; 199 break; 200 case hypervisor: 201 insns = &p->sun4v[0]; 202 break; 203 default: 204 prom_printf("Unknown cpu type, halting.\n"); 205 prom_halt(); 206 } 207 208 *(unsigned int *) (addr + 0) = insns[0]; 209 wmb(); 210 __asm__ __volatile__("flush %0" : : "r" (addr + 0)); 211 212 *(unsigned int *) (addr + 4) = insns[1]; 213 wmb(); 214 __asm__ __volatile__("flush %0" : : "r" (addr + 4)); 215 216 *(unsigned int *) (addr + 8) = insns[2]; 217 wmb(); 218 __asm__ __volatile__("flush %0" : : "r" (addr + 8)); 219 220 *(unsigned int *) (addr + 12) = insns[3]; 221 wmb(); 222 __asm__ __volatile__("flush %0" : : "r" (addr + 12)); 223 224 p++; 225 } 226} 227 228void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *start, 229 struct sun4v_1insn_patch_entry *end) 230{ 231 while (start < end) { 232 unsigned long addr = start->addr; 233 234 *(unsigned int *) (addr + 0) = start->insn; 235 wmb(); 236 __asm__ __volatile__("flush %0" : : "r" (addr + 0)); 237 238 start++; 239 } 240} 241 242void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start, 243 struct sun4v_2insn_patch_entry *end) 244{ 245 while (start < end) { 246 unsigned long addr = start->addr; 247 248 *(unsigned int *) (addr + 0) = start->insns[0]; 249 wmb(); 250 __asm__ __volatile__("flush %0" : : "r" (addr + 0)); 251 252 *(unsigned int *) (addr + 4) = start->insns[1]; 253 wmb(); 254 __asm__ __volatile__("flush %0" : : "r" (addr + 4)); 255 256 start++; 257 } 258} 259 260void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry *start, 261 struct sun4v_2insn_patch_entry *end) 262{ 263 while (start < end) { 264 unsigned long addr = start->addr; 265 266 *(unsigned int *) (addr + 0) = start->insns[0]; 267 wmb(); 268 __asm__ __volatile__("flush %0" : : "r" (addr + 0)); 269 270 *(unsigned int *) (addr + 4) = start->insns[1]; 271 wmb(); 272 __asm__ __volatile__("flush %0" : : "r" (addr + 4)); 273 274 start++; 275 } 276} 277 278static void __init sun4v_patch(void) 279{ 280 extern void sun4v_hvapi_init(void); 281 282 if (tlb_type != hypervisor) 283 return; 284 285 sun4v_patch_1insn_range(&__sun4v_1insn_patch, 286 &__sun4v_1insn_patch_end); 287 288 sun4v_patch_2insn_range(&__sun4v_2insn_patch, 289 &__sun4v_2insn_patch_end); 290 291 switch (sun4v_chip_type) { 292 case SUN4V_CHIP_SPARC_M7: 293 case SUN4V_CHIP_SPARC_M8: 294 case SUN4V_CHIP_SPARC_SN: 295 sun4v_patch_1insn_range(&__sun_m7_1insn_patch, 296 &__sun_m7_1insn_patch_end); 297 sun_m7_patch_2insn_range(&__sun_m7_2insn_patch, 298 &__sun_m7_2insn_patch_end); 299 break; 300 default: 301 break; 302 } 303 304 if (sun4v_chip_type != SUN4V_CHIP_NIAGARA1) { 305 sun4v_patch_1insn_range(&__fast_win_ctrl_1insn_patch, 306 &__fast_win_ctrl_1insn_patch_end); 307 } 308 309 sun4v_hvapi_init(); 310} 311 312static void __init popc_patch(void) 313{ 314 struct popc_3insn_patch_entry *p3; 315 struct popc_6insn_patch_entry *p6; 316 317 p3 = &__popc_3insn_patch; 318 while (p3 < &__popc_3insn_patch_end) { 319 unsigned long i, addr = p3->addr; 320 321 for (i = 0; i < 3; i++) { 322 *(unsigned int *) (addr + (i * 4)) = p3->insns[i]; 323 wmb(); 324 __asm__ __volatile__("flush %0" 325 : : "r" (addr + (i * 4))); 326 } 327 328 p3++; 329 } 330 331 p6 = &__popc_6insn_patch; 332 while (p6 < &__popc_6insn_patch_end) { 333 unsigned long i, addr = p6->addr; 334 335 for (i = 0; i < 6; i++) { 336 *(unsigned int *) (addr + (i * 4)) = p6->insns[i]; 337 wmb(); 338 __asm__ __volatile__("flush %0" 339 : : "r" (addr + (i * 4))); 340 } 341 342 p6++; 343 } 344} 345 346static void __init pause_patch(void) 347{ 348 struct pause_patch_entry *p; 349 350 p = &__pause_3insn_patch; 351 while (p < &__pause_3insn_patch_end) { 352 unsigned long i, addr = p->addr; 353 354 for (i = 0; i < 3; i++) { 355 *(unsigned int *) (addr + (i * 4)) = p->insns[i]; 356 wmb(); 357 __asm__ __volatile__("flush %0" 358 : : "r" (addr + (i * 4))); 359 } 360 361 p++; 362 } 363} 364 365void __init start_early_boot(void) 366{ 367 int cpu; 368 369 check_if_starfire(); 370 per_cpu_patch(); 371 sun4v_patch(); 372 smp_init_cpu_poke(); 373 374 cpu = hard_smp_processor_id(); 375 if (cpu >= NR_CPUS) { 376 prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n", 377 cpu, NR_CPUS); 378 prom_halt(); 379 } 380 current_thread_info()->cpu = cpu; 381 382 time_init_early(); 383 prom_init_report(); 384 start_kernel(); 385} 386 387/* On Ultra, we support all of the v8 capabilities. */ 388unsigned long sparc64_elf_hwcap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | 389 HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV | 390 HWCAP_SPARC_V9); 391EXPORT_SYMBOL(sparc64_elf_hwcap); 392 393static const char *hwcaps[] = { 394 "flush", "stbar", "swap", "muldiv", "v9", 395 "ultra3", "blkinit", "n2", 396 397 /* These strings are as they appear in the machine description 398 * 'hwcap-list' property for cpu nodes. 399 */ 400 "mul32", "div32", "fsmuld", "v8plus", "popc", "vis", "vis2", 401 "ASIBlkInit", "fmaf", "vis3", "hpc", "random", "trans", "fjfmau", 402 "ima", "cspare", "pause", "cbcond", NULL /*reserved for crypto */, 403 "adp", 404}; 405 406static const char *crypto_hwcaps[] = { 407 "aes", "des", "kasumi", "camellia", "md5", "sha1", "sha256", 408 "sha512", "mpmul", "montmul", "montsqr", "crc32c", 409}; 410 411void cpucap_info(struct seq_file *m) 412{ 413 unsigned long caps = sparc64_elf_hwcap; 414 int i, printed = 0; 415 416 seq_puts(m, "cpucaps\t\t: "); 417 for (i = 0; i < ARRAY_SIZE(hwcaps); i++) { 418 unsigned long bit = 1UL << i; 419 if (hwcaps[i] && (caps & bit)) { 420 seq_printf(m, "%s%s", 421 printed ? "," : "", hwcaps[i]); 422 printed++; 423 } 424 } 425 if (caps & HWCAP_SPARC_CRYPTO) { 426 unsigned long cfr; 427 428 __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr)); 429 for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) { 430 unsigned long bit = 1UL << i; 431 if (cfr & bit) { 432 seq_printf(m, "%s%s", 433 printed ? "," : "", crypto_hwcaps[i]); 434 printed++; 435 } 436 } 437 } 438 seq_putc(m, '\n'); 439} 440 441static void __init report_one_hwcap(int *printed, const char *name) 442{ 443 if ((*printed) == 0) 444 printk(KERN_INFO "CPU CAPS: ["); 445 printk(KERN_CONT "%s%s", 446 (*printed) ? "," : "", name); 447 if (++(*printed) == 8) { 448 printk(KERN_CONT "]\n"); 449 *printed = 0; 450 } 451} 452 453static void __init report_crypto_hwcaps(int *printed) 454{ 455 unsigned long cfr; 456 int i; 457 458 __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr)); 459 460 for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) { 461 unsigned long bit = 1UL << i; 462 if (cfr & bit) 463 report_one_hwcap(printed, crypto_hwcaps[i]); 464 } 465} 466 467static void __init report_hwcaps(unsigned long caps) 468{ 469 int i, printed = 0; 470 471 for (i = 0; i < ARRAY_SIZE(hwcaps); i++) { 472 unsigned long bit = 1UL << i; 473 if (hwcaps[i] && (caps & bit)) 474 report_one_hwcap(&printed, hwcaps[i]); 475 } 476 if (caps & HWCAP_SPARC_CRYPTO) 477 report_crypto_hwcaps(&printed); 478 if (printed != 0) 479 printk(KERN_CONT "]\n"); 480} 481 482static unsigned long __init mdesc_cpu_hwcap_list(void) 483{ 484 struct mdesc_handle *hp; 485 unsigned long caps = 0; 486 const char *prop; 487 int len; 488 u64 pn; 489 490 hp = mdesc_grab(); 491 if (!hp) 492 return 0; 493 494 pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "cpu"); 495 if (pn == MDESC_NODE_NULL) 496 goto out; 497 498 prop = mdesc_get_property(hp, pn, "hwcap-list", &len); 499 if (!prop) 500 goto out; 501 502 while (len) { 503 int i, plen; 504 505 for (i = 0; i < ARRAY_SIZE(hwcaps); i++) { 506 unsigned long bit = 1UL << i; 507 508 if (hwcaps[i] && !strcmp(prop, hwcaps[i])) { 509 caps |= bit; 510 break; 511 } 512 } 513 for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) { 514 if (!strcmp(prop, crypto_hwcaps[i])) 515 caps |= HWCAP_SPARC_CRYPTO; 516 } 517 518 plen = strlen(prop) + 1; 519 prop += plen; 520 len -= plen; 521 } 522 523out: 524 mdesc_release(hp); 525 return caps; 526} 527 528/* This yields a mask that user programs can use to figure out what 529 * instruction set this cpu supports. 530 */ 531static void __init init_sparc64_elf_hwcap(void) 532{ 533 unsigned long cap = sparc64_elf_hwcap; 534 unsigned long mdesc_caps; 535 536 if (tlb_type == cheetah || tlb_type == cheetah_plus) 537 cap |= HWCAP_SPARC_ULTRA3; 538 else if (tlb_type == hypervisor) { 539 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 || 540 sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || 541 sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || 542 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || 543 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || 544 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || 545 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || 546 sun4v_chip_type == SUN4V_CHIP_SPARC_M8 || 547 sun4v_chip_type == SUN4V_CHIP_SPARC_SN || 548 sun4v_chip_type == SUN4V_CHIP_SPARC64X) 549 cap |= HWCAP_SPARC_BLKINIT; 550 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || 551 sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || 552 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || 553 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || 554 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || 555 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || 556 sun4v_chip_type == SUN4V_CHIP_SPARC_M8 || 557 sun4v_chip_type == SUN4V_CHIP_SPARC_SN || 558 sun4v_chip_type == SUN4V_CHIP_SPARC64X) 559 cap |= HWCAP_SPARC_N2; 560 } 561 562 cap |= (AV_SPARC_MUL32 | AV_SPARC_DIV32 | AV_SPARC_V8PLUS); 563 564 mdesc_caps = mdesc_cpu_hwcap_list(); 565 if (!mdesc_caps) { 566 if (tlb_type == spitfire) 567 cap |= AV_SPARC_VIS; 568 if (tlb_type == cheetah || tlb_type == cheetah_plus) 569 cap |= AV_SPARC_VIS | AV_SPARC_VIS2; 570 if (tlb_type == cheetah_plus) { 571 unsigned long impl, ver; 572 573 __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver)); 574 impl = ((ver >> 32) & 0xffff); 575 if (impl == PANTHER_IMPL) 576 cap |= AV_SPARC_POPC; 577 } 578 if (tlb_type == hypervisor) { 579 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1) 580 cap |= AV_SPARC_ASI_BLK_INIT; 581 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || 582 sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || 583 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || 584 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || 585 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || 586 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || 587 sun4v_chip_type == SUN4V_CHIP_SPARC_M8 || 588 sun4v_chip_type == SUN4V_CHIP_SPARC_SN || 589 sun4v_chip_type == SUN4V_CHIP_SPARC64X) 590 cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 | 591 AV_SPARC_ASI_BLK_INIT | 592 AV_SPARC_POPC); 593 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || 594 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || 595 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || 596 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || 597 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || 598 sun4v_chip_type == SUN4V_CHIP_SPARC_M8 || 599 sun4v_chip_type == SUN4V_CHIP_SPARC_SN || 600 sun4v_chip_type == SUN4V_CHIP_SPARC64X) 601 cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC | 602 AV_SPARC_FMAF); 603 } 604 } 605 sparc64_elf_hwcap = cap | mdesc_caps; 606 607 report_hwcaps(sparc64_elf_hwcap); 608 609 if (sparc64_elf_hwcap & AV_SPARC_POPC) 610 popc_patch(); 611 if (sparc64_elf_hwcap & AV_SPARC_PAUSE) 612 pause_patch(); 613} 614 615void __init alloc_irqstack_bootmem(void) 616{ 617 unsigned int i, node; 618 619 for_each_possible_cpu(i) { 620 node = cpu_to_node(i); 621 622 softirq_stack[i] = memblock_alloc_node(THREAD_SIZE, 623 THREAD_SIZE, node); 624 if (!softirq_stack[i]) 625 panic("%s: Failed to allocate %lu bytes align=%lx nid=%d\n", 626 __func__, THREAD_SIZE, THREAD_SIZE, node); 627 hardirq_stack[i] = memblock_alloc_node(THREAD_SIZE, 628 THREAD_SIZE, node); 629 if (!hardirq_stack[i]) 630 panic("%s: Failed to allocate %lu bytes align=%lx nid=%d\n", 631 __func__, THREAD_SIZE, THREAD_SIZE, node); 632 } 633} 634 635void __init setup_arch(char **cmdline_p) 636{ 637 /* Initialize PROM console and command line. */ 638 *cmdline_p = prom_getbootargs(); 639 strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); 640 parse_early_param(); 641 642 boot_flags_init(*cmdline_p); 643#ifdef CONFIG_EARLYFB 644 if (btext_find_display()) 645#endif 646 register_console(&prom_early_console); 647 648 if (tlb_type == hypervisor) 649 pr_info("ARCH: SUN4V\n"); 650 else 651 pr_info("ARCH: SUN4U\n"); 652 653 idprom_init(); 654 655 if (!root_flags) 656 root_mountflags &= ~MS_RDONLY; 657 ROOT_DEV = old_decode_dev(root_dev); 658#ifdef CONFIG_BLK_DEV_RAM 659 rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK; 660#endif 661 662#ifdef CONFIG_IP_PNP 663 if (!ic_set_manually) { 664 phandle chosen = prom_finddevice("/chosen"); 665 u32 cl, sv, gw; 666 667 cl = prom_getintdefault (chosen, "client-ip", 0); 668 sv = prom_getintdefault (chosen, "server-ip", 0); 669 gw = prom_getintdefault (chosen, "gateway-ip", 0); 670 if (cl && sv) { 671 ic_myaddr = cl; 672 ic_servaddr = sv; 673 if (gw) 674 ic_gateway = gw; 675#if defined(CONFIG_IP_PNP_BOOTP) || defined(CONFIG_IP_PNP_RARP) 676 ic_proto_enabled = 0; 677#endif 678 } 679 } 680#endif 681 682 /* Get boot processor trap_block[] setup. */ 683 init_cur_cpu_trap(current_thread_info()); 684 685 paging_init(); 686 init_sparc64_elf_hwcap(); 687 smp_fill_in_cpu_possible_map(); 688 /* 689 * Once the OF device tree and MDESC have been setup and nr_cpus has 690 * been parsed, we know the list of possible cpus. Therefore we can 691 * allocate the IRQ stacks. 692 */ 693 alloc_irqstack_bootmem(); 694} 695 696extern int stop_a_enabled; 697 698void sun_do_break(void) 699{ 700 if (!stop_a_enabled) 701 return; 702 703 prom_printf("\n"); 704 flush_user_windows(); 705 706 prom_cmdline(); 707} 708EXPORT_SYMBOL(sun_do_break); 709 710int stop_a_enabled = 1; 711EXPORT_SYMBOL(stop_a_enabled);