inventory.c (17934B)
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * inventory.c 4 * 5 * Copyright (c) 1999 The Puffin Group (David Kennedy and Alex deVries) 6 * Copyright (c) 2001 Matthew Wilcox for Hewlett-Packard 7 * 8 * These are the routines to discover what hardware exists in this box. 9 * This task is complicated by there being 3 different ways of 10 * performing an inventory, depending largely on the age of the box. 11 * The recommended way to do this is to check to see whether the machine 12 * is a `Snake' first, then try System Map, then try PAT. We try System 13 * Map before checking for a Snake -- this probably doesn't cause any 14 * problems, but... 15 */ 16 17#include <linux/types.h> 18#include <linux/kernel.h> 19#include <linux/init.h> 20#include <linux/slab.h> 21#include <linux/mm.h> 22#include <linux/platform_device.h> 23#include <asm/hardware.h> 24#include <asm/io.h> 25#include <asm/mmzone.h> 26#include <asm/pdc.h> 27#include <asm/pdcpat.h> 28#include <asm/processor.h> 29#include <asm/page.h> 30#include <asm/parisc-device.h> 31#include <asm/tlbflush.h> 32 33/* 34** Debug options 35** DEBUG_PAT Dump details which PDC PAT provides about ranges/devices. 36*/ 37#undef DEBUG_PAT 38 39int pdc_type __ro_after_init = PDC_TYPE_ILLEGAL; 40 41/* cell number and location (PAT firmware only) */ 42unsigned long parisc_cell_num __ro_after_init; 43unsigned long parisc_cell_loc __ro_after_init; 44unsigned long parisc_pat_pdc_cap __ro_after_init; 45 46 47void __init setup_pdc(void) 48{ 49 long status; 50 unsigned int bus_id; 51 struct pdc_system_map_mod_info module_result; 52 struct pdc_module_path module_path; 53 struct pdc_model model; 54#ifdef CONFIG_64BIT 55 struct pdc_pat_cell_num cell_info; 56#endif 57 58 /* Determine the pdc "type" used on this machine */ 59 60 printk(KERN_INFO "Determining PDC firmware type: "); 61 62 status = pdc_system_map_find_mods(&module_result, &module_path, 0); 63 if (status == PDC_OK) { 64 pdc_type = PDC_TYPE_SYSTEM_MAP; 65 pr_cont("System Map.\n"); 66 return; 67 } 68 69 /* 70 * If the machine doesn't support PDC_SYSTEM_MAP then either it 71 * is a pdc pat box, or it is an older box. All 64 bit capable 72 * machines are either pdc pat boxes or they support PDC_SYSTEM_MAP. 73 */ 74 75 /* 76 * TODO: We should test for 64 bit capability and give a 77 * clearer message. 78 */ 79 80#ifdef CONFIG_64BIT 81 status = pdc_pat_cell_get_number(&cell_info); 82 if (status == PDC_OK) { 83 unsigned long legacy_rev, pat_rev; 84 pdc_type = PDC_TYPE_PAT; 85 pr_cont("64 bit PAT.\n"); 86 parisc_cell_num = cell_info.cell_num; 87 parisc_cell_loc = cell_info.cell_loc; 88 pr_info("PAT: Running on cell %lu and location %lu.\n", 89 parisc_cell_num, parisc_cell_loc); 90 status = pdc_pat_pd_get_pdc_revisions(&legacy_rev, 91 &pat_rev, &parisc_pat_pdc_cap); 92 pr_info("PAT: legacy revision 0x%lx, pat_rev 0x%lx, pdc_cap 0x%lx, S-PTLB %d, HPMC_RENDEZ %d.\n", 93 legacy_rev, pat_rev, parisc_pat_pdc_cap, 94 parisc_pat_pdc_cap 95 & PDC_PAT_CAPABILITY_BIT_SIMULTANEOUS_PTLB ? 1:0, 96 parisc_pat_pdc_cap 97 & PDC_PAT_CAPABILITY_BIT_PDC_HPMC_RENDEZ ? 1:0); 98 return; 99 } 100#endif 101 102 /* Check the CPU's bus ID. There's probably a better test. */ 103 104 status = pdc_model_info(&model); 105 106 bus_id = (model.hversion >> (4 + 7)) & 0x1f; 107 108 switch (bus_id) { 109 case 0x4: /* 720, 730, 750, 735, 755 */ 110 case 0x6: /* 705, 710 */ 111 case 0x7: /* 715, 725 */ 112 case 0x8: /* 745, 747, 742 */ 113 case 0xA: /* 712 and similar */ 114 case 0xC: /* 715/64, at least */ 115 116 pdc_type = PDC_TYPE_SNAKE; 117 pr_cont("Snake.\n"); 118 return; 119 120 default: /* Everything else */ 121 122 pr_cont("Unsupported.\n"); 123 panic("If this is a 64-bit machine, please try a 64-bit kernel.\n"); 124 } 125} 126 127#define PDC_PAGE_ADJ_SHIFT (PAGE_SHIFT - 12) /* pdc pages are always 4k */ 128 129static void __init 130set_pmem_entry(physmem_range_t *pmem_ptr, unsigned long start, 131 unsigned long pages4k) 132{ 133 /* Rather than aligning and potentially throwing away 134 * memory, we'll assume that any ranges are already 135 * nicely aligned with any reasonable page size, and 136 * panic if they are not (it's more likely that the 137 * pdc info is bad in this case). 138 */ 139 140 if (unlikely( ((start & (PAGE_SIZE - 1)) != 0) 141 || ((pages4k & ((1UL << PDC_PAGE_ADJ_SHIFT) - 1)) != 0) )) { 142 143 panic("Memory range doesn't align with page size!\n"); 144 } 145 146 pmem_ptr->start_pfn = (start >> PAGE_SHIFT); 147 pmem_ptr->pages = (pages4k >> PDC_PAGE_ADJ_SHIFT); 148} 149 150static void __init pagezero_memconfig(void) 151{ 152 unsigned long npages; 153 154 /* Use the 32 bit information from page zero to create a single 155 * entry in the pmem_ranges[] table. 156 * 157 * We currently don't support machines with contiguous memory 158 * >= 4 Gb, who report that memory using 64 bit only fields 159 * on page zero. It's not worth doing until it can be tested, 160 * and it is not clear we can support those machines for other 161 * reasons. 162 * 163 * If that support is done in the future, this is where it 164 * should be done. 165 */ 166 167 npages = (PAGE_ALIGN(PAGE0->imm_max_mem) >> PAGE_SHIFT); 168 set_pmem_entry(pmem_ranges,0UL,npages); 169 npmem_ranges = 1; 170} 171 172#ifdef CONFIG_64BIT 173 174/* All of the PDC PAT specific code is 64-bit only */ 175 176/* 177** The module object is filled via PDC_PAT_CELL[Return Cell Module]. 178** If a module is found, register module will get the IODC bytes via 179** pdc_iodc_read() using the PA view of conf_base_addr for the hpa parameter. 180** 181** The IO view can be used by PDC_PAT_CELL[Return Cell Module] 182** only for SBAs and LBAs. This view will cause an invalid 183** argument error for all other cell module types. 184** 185*/ 186 187static int __init 188pat_query_module(ulong pcell_loc, ulong mod_index) 189{ 190 pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell; 191 unsigned long bytecnt; 192 unsigned long temp; /* 64-bit scratch value */ 193 long status; /* PDC return value status */ 194 struct parisc_device *dev; 195 196 pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL); 197 if (!pa_pdc_cell) 198 panic("couldn't allocate memory for PDC_PAT_CELL!"); 199 200 /* return cell module (PA or Processor view) */ 201 status = pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index, 202 PA_VIEW, pa_pdc_cell); 203 204 if (status != PDC_OK) { 205 /* no more cell modules or error */ 206 kfree(pa_pdc_cell); 207 return status; 208 } 209 210 temp = pa_pdc_cell->cba; 211 dev = alloc_pa_dev(PAT_GET_CBA(temp), &(pa_pdc_cell->mod_path)); 212 if (!dev) { 213 kfree(pa_pdc_cell); 214 return PDC_OK; 215 } 216 217 /* alloc_pa_dev sets dev->hpa */ 218 219 /* 220 ** save parameters in the parisc_device 221 ** (The idea being the device driver will call pdc_pat_cell_module() 222 ** and store the results in its own data structure.) 223 */ 224 dev->pcell_loc = pcell_loc; 225 dev->mod_index = mod_index; 226 227 /* save generic info returned from the call */ 228 /* REVISIT: who is the consumer of this? not sure yet... */ 229 dev->mod_info = pa_pdc_cell->mod_info; /* pass to PAT_GET_ENTITY() */ 230 dev->pmod_loc = pa_pdc_cell->mod_location; 231 dev->mod0 = pa_pdc_cell->mod[0]; 232 233 register_parisc_device(dev); /* advertise device */ 234 235#ifdef DEBUG_PAT 236 /* dump what we see so far... */ 237 switch (PAT_GET_ENTITY(dev->mod_info)) { 238 pdc_pat_cell_mod_maddr_block_t io_pdc_cell; 239 unsigned long i; 240 241 case PAT_ENTITY_PROC: 242 printk(KERN_DEBUG "PAT_ENTITY_PROC: id_eid 0x%lx\n", 243 pa_pdc_cell->mod[0]); 244 break; 245 246 case PAT_ENTITY_MEM: 247 printk(KERN_DEBUG 248 "PAT_ENTITY_MEM: amount 0x%lx min_gni_base 0x%lx min_gni_len 0x%lx\n", 249 pa_pdc_cell->mod[0], pa_pdc_cell->mod[1], 250 pa_pdc_cell->mod[2]); 251 break; 252 case PAT_ENTITY_CA: 253 printk(KERN_DEBUG "PAT_ENTITY_CA: %ld\n", pcell_loc); 254 break; 255 256 case PAT_ENTITY_PBC: 257 printk(KERN_DEBUG "PAT_ENTITY_PBC: "); 258 goto print_ranges; 259 260 case PAT_ENTITY_SBA: 261 printk(KERN_DEBUG "PAT_ENTITY_SBA: "); 262 goto print_ranges; 263 264 case PAT_ENTITY_LBA: 265 printk(KERN_DEBUG "PAT_ENTITY_LBA: "); 266 267 print_ranges: 268 pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index, 269 IO_VIEW, &io_pdc_cell); 270 printk(KERN_DEBUG "ranges %ld\n", pa_pdc_cell->mod[1]); 271 for (i = 0; i < pa_pdc_cell->mod[1]; i++) { 272 printk(KERN_DEBUG 273 " PA_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n", 274 i, pa_pdc_cell->mod[2 + i * 3], /* type */ 275 pa_pdc_cell->mod[3 + i * 3], /* start */ 276 pa_pdc_cell->mod[4 + i * 3]); /* finish (ie end) */ 277 printk(KERN_DEBUG 278 " IO_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n", 279 i, io_pdc_cell.mod[2 + i * 3], /* type */ 280 io_pdc_cell.mod[3 + i * 3], /* start */ 281 io_pdc_cell.mod[4 + i * 3]); /* finish (ie end) */ 282 } 283 printk(KERN_DEBUG "\n"); 284 break; 285 } 286#endif /* DEBUG_PAT */ 287 288 kfree(pa_pdc_cell); 289 290 return PDC_OK; 291} 292 293 294/* pat pdc can return information about a variety of different 295 * types of memory (e.g. firmware,i/o, etc) but we only care about 296 * the usable physical ram right now. Since the firmware specific 297 * information is allocated on the stack, we'll be generous, in 298 * case there is a lot of other information we don't care about. 299 */ 300 301#define PAT_MAX_RANGES (4 * MAX_PHYSMEM_RANGES) 302 303static void __init pat_memconfig(void) 304{ 305 unsigned long actual_len; 306 struct pdc_pat_pd_addr_map_entry mem_table[PAT_MAX_RANGES+1]; 307 struct pdc_pat_pd_addr_map_entry *mtbl_ptr; 308 physmem_range_t *pmem_ptr; 309 long status; 310 int entries; 311 unsigned long length; 312 int i; 313 314 length = (PAT_MAX_RANGES + 1) * sizeof(struct pdc_pat_pd_addr_map_entry); 315 316 status = pdc_pat_pd_get_addr_map(&actual_len, mem_table, length, 0L); 317 318 if ((status != PDC_OK) 319 || ((actual_len % sizeof(struct pdc_pat_pd_addr_map_entry)) != 0)) { 320 321 /* The above pdc call shouldn't fail, but, just in 322 * case, just use the PAGE0 info. 323 */ 324 325 printk("\n\n\n"); 326 printk(KERN_WARNING "WARNING! Could not get full memory configuration. " 327 "All memory may not be used!\n\n\n"); 328 pagezero_memconfig(); 329 return; 330 } 331 332 entries = actual_len / sizeof(struct pdc_pat_pd_addr_map_entry); 333 334 if (entries > PAT_MAX_RANGES) { 335 printk(KERN_WARNING "This Machine has more memory ranges than we support!\n"); 336 printk(KERN_WARNING "Some memory may not be used!\n"); 337 } 338 339 /* Copy information into the firmware independent pmem_ranges 340 * array, skipping types we don't care about. Notice we said 341 * "may" above. We'll use all the entries that were returned. 342 */ 343 344 npmem_ranges = 0; 345 mtbl_ptr = mem_table; 346 pmem_ptr = pmem_ranges; /* Global firmware independent table */ 347 for (i = 0; i < entries; i++,mtbl_ptr++) { 348 if ( (mtbl_ptr->entry_type != PAT_MEMORY_DESCRIPTOR) 349 || (mtbl_ptr->memory_type != PAT_MEMTYPE_MEMORY) 350 || (mtbl_ptr->pages == 0) 351 || ( (mtbl_ptr->memory_usage != PAT_MEMUSE_GENERAL) 352 && (mtbl_ptr->memory_usage != PAT_MEMUSE_GI) 353 && (mtbl_ptr->memory_usage != PAT_MEMUSE_GNI) ) ) { 354 355 continue; 356 } 357 358 if (npmem_ranges == MAX_PHYSMEM_RANGES) { 359 printk(KERN_WARNING "This Machine has more memory ranges than we support!\n"); 360 printk(KERN_WARNING "Some memory will not be used!\n"); 361 break; 362 } 363 364 set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages); 365 npmem_ranges++; 366 } 367} 368 369static int __init pat_inventory(void) 370{ 371 int status; 372 ulong mod_index = 0; 373 struct pdc_pat_cell_num cell_info; 374 375 /* 376 ** Note: Prelude (and it's successors: Lclass, A400/500) only 377 ** implement PDC_PAT_CELL sub-options 0 and 2. 378 */ 379 status = pdc_pat_cell_get_number(&cell_info); 380 if (status != PDC_OK) { 381 return 0; 382 } 383 384#ifdef DEBUG_PAT 385 printk(KERN_DEBUG "CELL_GET_NUMBER: 0x%lx 0x%lx\n", cell_info.cell_num, 386 cell_info.cell_loc); 387#endif 388 389 while (PDC_OK == pat_query_module(cell_info.cell_loc, mod_index)) { 390 mod_index++; 391 } 392 393 return mod_index; 394} 395 396/* We only look for extended memory ranges on a 64 bit capable box */ 397static void __init sprockets_memconfig(void) 398{ 399 struct pdc_memory_table_raddr r_addr; 400 struct pdc_memory_table mem_table[MAX_PHYSMEM_RANGES]; 401 struct pdc_memory_table *mtbl_ptr; 402 physmem_range_t *pmem_ptr; 403 long status; 404 int entries; 405 int i; 406 407 status = pdc_mem_mem_table(&r_addr,mem_table, 408 (unsigned long)MAX_PHYSMEM_RANGES); 409 410 if (status != PDC_OK) { 411 412 /* The above pdc call only works on boxes with sprockets 413 * firmware (newer B,C,J class). Other non PAT PDC machines 414 * do support more than 3.75 Gb of memory, but we don't 415 * support them yet. 416 */ 417 418 pagezero_memconfig(); 419 return; 420 } 421 422 if (r_addr.entries_total > MAX_PHYSMEM_RANGES) { 423 printk(KERN_WARNING "This Machine has more memory ranges than we support!\n"); 424 printk(KERN_WARNING "Some memory will not be used!\n"); 425 } 426 427 entries = (int)r_addr.entries_returned; 428 429 npmem_ranges = 0; 430 mtbl_ptr = mem_table; 431 pmem_ptr = pmem_ranges; /* Global firmware independent table */ 432 for (i = 0; i < entries; i++,mtbl_ptr++) { 433 set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages); 434 npmem_ranges++; 435 } 436} 437 438#else /* !CONFIG_64BIT */ 439 440#define pat_inventory() do { } while (0) 441#define pat_memconfig() do { } while (0) 442#define sprockets_memconfig() pagezero_memconfig() 443 444#endif /* !CONFIG_64BIT */ 445 446 447#ifndef CONFIG_PA20 448 449/* Code to support Snake machines (7[2350], 7[235]5, 715/Scorpio) */ 450 451static struct parisc_device * __init 452legacy_create_device(struct pdc_memory_map *r_addr, 453 struct pdc_module_path *module_path) 454{ 455 struct parisc_device *dev; 456 int status = pdc_mem_map_hpa(r_addr, module_path); 457 if (status != PDC_OK) 458 return NULL; 459 460 dev = alloc_pa_dev(r_addr->hpa, &module_path->path); 461 if (dev == NULL) 462 return NULL; 463 464 register_parisc_device(dev); 465 return dev; 466} 467 468/** 469 * snake_inventory 470 * 471 * Before PDC_SYSTEM_MAP was invented, the PDC_MEM_MAP call was used. 472 * To use it, we initialise the mod_path.bc to 0xff and try all values of 473 * mod to get the HPA for the top-level devices. Bus adapters may have 474 * sub-devices which are discovered by setting bc[5] to 0 and bc[4] to the 475 * module, then trying all possible functions. 476 */ 477static void __init snake_inventory(void) 478{ 479 int mod; 480 for (mod = 0; mod < 16; mod++) { 481 struct parisc_device *dev; 482 struct pdc_module_path module_path; 483 struct pdc_memory_map r_addr; 484 unsigned int func; 485 486 memset(module_path.path.bc, 0xff, 6); 487 module_path.path.mod = mod; 488 dev = legacy_create_device(&r_addr, &module_path); 489 if ((!dev) || (dev->id.hw_type != HPHW_BA)) 490 continue; 491 492 memset(module_path.path.bc, 0xff, 4); 493 module_path.path.bc[4] = mod; 494 495 for (func = 0; func < 16; func++) { 496 module_path.path.bc[5] = 0; 497 module_path.path.mod = func; 498 legacy_create_device(&r_addr, &module_path); 499 } 500 } 501} 502 503#else /* CONFIG_PA20 */ 504#define snake_inventory() do { } while (0) 505#endif /* CONFIG_PA20 */ 506 507/* Common 32/64 bit based code goes here */ 508 509/** 510 * add_system_map_addresses - Add additional addresses to the parisc device. 511 * @dev: The parisc device. 512 * @num_addrs: Then number of addresses to add; 513 * @module_instance: The system_map module instance. 514 * 515 * This function adds any additional addresses reported by the system_map 516 * firmware to the parisc device. 517 */ 518static void __init 519add_system_map_addresses(struct parisc_device *dev, int num_addrs, 520 int module_instance) 521{ 522 int i; 523 long status; 524 struct pdc_system_map_addr_info addr_result; 525 526 dev->addr = kmalloc_array(num_addrs, sizeof(*dev->addr), GFP_KERNEL); 527 if(!dev->addr) { 528 printk(KERN_ERR "%s %s(): memory allocation failure\n", 529 __FILE__, __func__); 530 return; 531 } 532 533 for(i = 1; i <= num_addrs; ++i) { 534 status = pdc_system_map_find_addrs(&addr_result, 535 module_instance, i); 536 if(PDC_OK == status) { 537 dev->addr[dev->num_addrs] = (unsigned long)addr_result.mod_addr; 538 dev->num_addrs++; 539 } else { 540 printk(KERN_WARNING 541 "Bad PDC_FIND_ADDRESS status return (%ld) for index %d\n", 542 status, i); 543 } 544 } 545} 546 547/** 548 * system_map_inventory - Retrieve firmware devices via SYSTEM_MAP. 549 * 550 * This function attempts to retrieve and register all the devices firmware 551 * knows about via the SYSTEM_MAP PDC call. 552 */ 553static void __init system_map_inventory(void) 554{ 555 int i; 556 long status = PDC_OK; 557 558 for (i = 0; i < 256; i++) { 559 struct parisc_device *dev; 560 struct pdc_system_map_mod_info module_result; 561 struct pdc_module_path module_path; 562 563 status = pdc_system_map_find_mods(&module_result, 564 &module_path, i); 565 if ((status == PDC_BAD_PROC) || (status == PDC_NE_MOD)) 566 break; 567 if (status != PDC_OK) 568 continue; 569 570 dev = alloc_pa_dev(module_result.mod_addr, &module_path.path); 571 if (!dev) 572 continue; 573 574 register_parisc_device(dev); 575 576 /* if available, get the additional addresses for a module */ 577 if (!module_result.add_addrs) 578 continue; 579 580 add_system_map_addresses(dev, module_result.add_addrs, i); 581 } 582 583 walk_central_bus(); 584 return; 585} 586 587void __init do_memory_inventory(void) 588{ 589 switch (pdc_type) { 590 591 case PDC_TYPE_PAT: 592 pat_memconfig(); 593 break; 594 595 case PDC_TYPE_SYSTEM_MAP: 596 sprockets_memconfig(); 597 break; 598 599 case PDC_TYPE_SNAKE: 600 pagezero_memconfig(); 601 return; 602 603 default: 604 panic("Unknown PDC type!\n"); 605 } 606 607 if (npmem_ranges == 0 || pmem_ranges[0].start_pfn != 0) { 608 printk(KERN_WARNING "Bad memory configuration returned!\n"); 609 printk(KERN_WARNING "Some memory may not be used!\n"); 610 pagezero_memconfig(); 611 } 612} 613 614void __init do_device_inventory(void) 615{ 616 printk(KERN_INFO "Searching for devices...\n"); 617 618 init_parisc_bus(); 619 620 switch (pdc_type) { 621 622 case PDC_TYPE_PAT: 623 pat_inventory(); 624 break; 625 626 case PDC_TYPE_SYSTEM_MAP: 627 system_map_inventory(); 628 break; 629 630 case PDC_TYPE_SNAKE: 631 snake_inventory(); 632 break; 633 634 default: 635 panic("Unknown PDC type!\n"); 636 } 637 printk(KERN_INFO "Found devices:\n"); 638 print_parisc_devices(); 639 640#if defined(CONFIG_64BIT) && defined(CONFIG_SMP) 641 pa_serialize_tlb_flushes = machine_has_merced_bus(); 642 if (pa_serialize_tlb_flushes) 643 pr_info("Merced bus found: Enable PxTLB serialization.\n"); 644#endif 645 646#if defined(CONFIG_FW_CFG_SYSFS) 647 if (running_on_qemu) { 648 struct resource res[3] = {0,}; 649 unsigned int base; 650 651 base = ((unsigned long long) PAGE0->pad0[2] << 32) 652 | PAGE0->pad0[3]; /* SeaBIOS stored it here */ 653 654 res[0].name = "fw_cfg"; 655 res[0].start = base; 656 res[0].end = base + 8 - 1; 657 res[0].flags = IORESOURCE_MEM; 658 659 res[1].name = "ctrl"; 660 res[1].start = 0; 661 res[1].flags = IORESOURCE_REG; 662 663 res[2].name = "data"; 664 res[2].start = 4; 665 res[2].flags = IORESOURCE_REG; 666 667 if (base) { 668 pr_info("Found qemu fw_cfg interface at %#08x\n", base); 669 platform_device_register_simple("fw_cfg", 670 PLATFORM_DEVID_NONE, res, 3); 671 } 672 } 673#endif 674}