amd.c (30272B)
1// SPDX-License-Identifier: GPL-2.0-only 2#include <linux/export.h> 3#include <linux/bitops.h> 4#include <linux/elf.h> 5#include <linux/mm.h> 6 7#include <linux/io.h> 8#include <linux/sched.h> 9#include <linux/sched/clock.h> 10#include <linux/random.h> 11#include <linux/topology.h> 12#include <asm/processor.h> 13#include <asm/apic.h> 14#include <asm/cacheinfo.h> 15#include <asm/cpu.h> 16#include <asm/spec-ctrl.h> 17#include <asm/smp.h> 18#include <asm/numa.h> 19#include <asm/pci-direct.h> 20#include <asm/delay.h> 21#include <asm/debugreg.h> 22#include <asm/resctrl.h> 23 24#ifdef CONFIG_X86_64 25# include <asm/mmconfig.h> 26#endif 27 28#include "cpu.h" 29 30static const int amd_erratum_383[]; 31static const int amd_erratum_400[]; 32static const int amd_erratum_1054[]; 33static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); 34 35/* 36 * nodes_per_socket: Stores the number of nodes per socket. 37 * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX 38 * Node Identifiers[10:8] 39 */ 40static u32 nodes_per_socket = 1; 41 42static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) 43{ 44 u32 gprs[8] = { 0 }; 45 int err; 46 47 WARN_ONCE((boot_cpu_data.x86 != 0xf), 48 "%s should only be used on K8!\n", __func__); 49 50 gprs[1] = msr; 51 gprs[7] = 0x9c5a203a; 52 53 err = rdmsr_safe_regs(gprs); 54 55 *p = gprs[0] | ((u64)gprs[2] << 32); 56 57 return err; 58} 59 60static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val) 61{ 62 u32 gprs[8] = { 0 }; 63 64 WARN_ONCE((boot_cpu_data.x86 != 0xf), 65 "%s should only be used on K8!\n", __func__); 66 67 gprs[0] = (u32)val; 68 gprs[1] = msr; 69 gprs[2] = val >> 32; 70 gprs[7] = 0x9c5a203a; 71 72 return wrmsr_safe_regs(gprs); 73} 74 75/* 76 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause 77 * misexecution of code under Linux. Owners of such processors should 78 * contact AMD for precise details and a CPU swap. 79 * 80 * See http://www.multimania.com/poulot/k6bug.html 81 * and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6" 82 * (Publication # 21266 Issue Date: August 1998) 83 * 84 * The following test is erm.. interesting. AMD neglected to up 85 * the chip setting when fixing the bug but they also tweaked some 86 * performance at the same time.. 87 */ 88 89#ifdef CONFIG_X86_32 90extern __visible void vide(void); 91__asm__(".text\n" 92 ".globl vide\n" 93 ".type vide, @function\n" 94 ".align 4\n" 95 "vide: ret\n"); 96#endif 97 98static void init_amd_k5(struct cpuinfo_x86 *c) 99{ 100#ifdef CONFIG_X86_32 101/* 102 * General Systems BIOSen alias the cpu frequency registers 103 * of the Elan at 0x000df000. Unfortunately, one of the Linux 104 * drivers subsequently pokes it, and changes the CPU speed. 105 * Workaround : Remove the unneeded alias. 106 */ 107#define CBAR (0xfffc) /* Configuration Base Address (32-bit) */ 108#define CBAR_ENB (0x80000000) 109#define CBAR_KEY (0X000000CB) 110 if (c->x86_model == 9 || c->x86_model == 10) { 111 if (inl(CBAR) & CBAR_ENB) 112 outl(0 | CBAR_KEY, CBAR); 113 } 114#endif 115} 116 117static void init_amd_k6(struct cpuinfo_x86 *c) 118{ 119#ifdef CONFIG_X86_32 120 u32 l, h; 121 int mbytes = get_num_physpages() >> (20-PAGE_SHIFT); 122 123 if (c->x86_model < 6) { 124 /* Based on AMD doc 20734R - June 2000 */ 125 if (c->x86_model == 0) { 126 clear_cpu_cap(c, X86_FEATURE_APIC); 127 set_cpu_cap(c, X86_FEATURE_PGE); 128 } 129 return; 130 } 131 132 if (c->x86_model == 6 && c->x86_stepping == 1) { 133 const int K6_BUG_LOOP = 1000000; 134 int n; 135 void (*f_vide)(void); 136 u64 d, d2; 137 138 pr_info("AMD K6 stepping B detected - "); 139 140 /* 141 * It looks like AMD fixed the 2.6.2 bug and improved indirect 142 * calls at the same time. 143 */ 144 145 n = K6_BUG_LOOP; 146 f_vide = vide; 147 OPTIMIZER_HIDE_VAR(f_vide); 148 d = rdtsc(); 149 while (n--) 150 f_vide(); 151 d2 = rdtsc(); 152 d = d2-d; 153 154 if (d > 20*K6_BUG_LOOP) 155 pr_cont("system stability may be impaired when more than 32 MB are used.\n"); 156 else 157 pr_cont("probably OK (after B9730xxxx).\n"); 158 } 159 160 /* K6 with old style WHCR */ 161 if (c->x86_model < 8 || 162 (c->x86_model == 8 && c->x86_stepping < 8)) { 163 /* We can only write allocate on the low 508Mb */ 164 if (mbytes > 508) 165 mbytes = 508; 166 167 rdmsr(MSR_K6_WHCR, l, h); 168 if ((l&0x0000FFFF) == 0) { 169 unsigned long flags; 170 l = (1<<0)|((mbytes/4)<<1); 171 local_irq_save(flags); 172 wbinvd(); 173 wrmsr(MSR_K6_WHCR, l, h); 174 local_irq_restore(flags); 175 pr_info("Enabling old style K6 write allocation for %d Mb\n", 176 mbytes); 177 } 178 return; 179 } 180 181 if ((c->x86_model == 8 && c->x86_stepping > 7) || 182 c->x86_model == 9 || c->x86_model == 13) { 183 /* The more serious chips .. */ 184 185 if (mbytes > 4092) 186 mbytes = 4092; 187 188 rdmsr(MSR_K6_WHCR, l, h); 189 if ((l&0xFFFF0000) == 0) { 190 unsigned long flags; 191 l = ((mbytes>>2)<<22)|(1<<16); 192 local_irq_save(flags); 193 wbinvd(); 194 wrmsr(MSR_K6_WHCR, l, h); 195 local_irq_restore(flags); 196 pr_info("Enabling new style K6 write allocation for %d Mb\n", 197 mbytes); 198 } 199 200 return; 201 } 202 203 if (c->x86_model == 10) { 204 /* AMD Geode LX is model 10 */ 205 /* placeholder for any needed mods */ 206 return; 207 } 208#endif 209} 210 211static void init_amd_k7(struct cpuinfo_x86 *c) 212{ 213#ifdef CONFIG_X86_32 214 u32 l, h; 215 216 /* 217 * Bit 15 of Athlon specific MSR 15, needs to be 0 218 * to enable SSE on Palomino/Morgan/Barton CPU's. 219 * If the BIOS didn't enable it already, enable it here. 220 */ 221 if (c->x86_model >= 6 && c->x86_model <= 10) { 222 if (!cpu_has(c, X86_FEATURE_XMM)) { 223 pr_info("Enabling disabled K7/SSE Support.\n"); 224 msr_clear_bit(MSR_K7_HWCR, 15); 225 set_cpu_cap(c, X86_FEATURE_XMM); 226 } 227 } 228 229 /* 230 * It's been determined by AMD that Athlons since model 8 stepping 1 231 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx 232 * As per AMD technical note 27212 0.2 233 */ 234 if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) { 235 rdmsr(MSR_K7_CLK_CTL, l, h); 236 if ((l & 0xfff00000) != 0x20000000) { 237 pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", 238 l, ((l & 0x000fffff)|0x20000000)); 239 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); 240 } 241 } 242 243 /* calling is from identify_secondary_cpu() ? */ 244 if (!c->cpu_index) 245 return; 246 247 /* 248 * Certain Athlons might work (for various values of 'work') in SMP 249 * but they are not certified as MP capable. 250 */ 251 /* Athlon 660/661 is valid. */ 252 if ((c->x86_model == 6) && ((c->x86_stepping == 0) || 253 (c->x86_stepping == 1))) 254 return; 255 256 /* Duron 670 is valid */ 257 if ((c->x86_model == 7) && (c->x86_stepping == 0)) 258 return; 259 260 /* 261 * Athlon 662, Duron 671, and Athlon >model 7 have capability 262 * bit. It's worth noting that the A5 stepping (662) of some 263 * Athlon XP's have the MP bit set. 264 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for 265 * more. 266 */ 267 if (((c->x86_model == 6) && (c->x86_stepping >= 2)) || 268 ((c->x86_model == 7) && (c->x86_stepping >= 1)) || 269 (c->x86_model > 7)) 270 if (cpu_has(c, X86_FEATURE_MP)) 271 return; 272 273 /* If we get here, not a certified SMP capable AMD system. */ 274 275 /* 276 * Don't taint if we are running SMP kernel on a single non-MP 277 * approved Athlon 278 */ 279 WARN_ONCE(1, "WARNING: This combination of AMD" 280 " processors is not suitable for SMP.\n"); 281 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE); 282#endif 283} 284 285#ifdef CONFIG_NUMA 286/* 287 * To workaround broken NUMA config. Read the comment in 288 * srat_detect_node(). 289 */ 290static int nearby_node(int apicid) 291{ 292 int i, node; 293 294 for (i = apicid - 1; i >= 0; i--) { 295 node = __apicid_to_node[i]; 296 if (node != NUMA_NO_NODE && node_online(node)) 297 return node; 298 } 299 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { 300 node = __apicid_to_node[i]; 301 if (node != NUMA_NO_NODE && node_online(node)) 302 return node; 303 } 304 return first_node(node_online_map); /* Shouldn't happen */ 305} 306#endif 307 308/* 309 * Fix up cpu_core_id for pre-F17h systems to be in the 310 * [0 .. cores_per_node - 1] range. Not really needed but 311 * kept so as not to break existing setups. 312 */ 313static void legacy_fixup_core_id(struct cpuinfo_x86 *c) 314{ 315 u32 cus_per_node; 316 317 if (c->x86 >= 0x17) 318 return; 319 320 cus_per_node = c->x86_max_cores / nodes_per_socket; 321 c->cpu_core_id %= cus_per_node; 322} 323 324/* 325 * Fixup core topology information for 326 * (1) AMD multi-node processors 327 * Assumption: Number of cores in each internal node is the same. 328 * (2) AMD processors supporting compute units 329 */ 330static void amd_get_topology(struct cpuinfo_x86 *c) 331{ 332 int cpu = smp_processor_id(); 333 334 /* get information required for multi-node processors */ 335 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { 336 int err; 337 u32 eax, ebx, ecx, edx; 338 339 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); 340 341 c->cpu_die_id = ecx & 0xff; 342 343 if (c->x86 == 0x15) 344 c->cu_id = ebx & 0xff; 345 346 if (c->x86 >= 0x17) { 347 c->cpu_core_id = ebx & 0xff; 348 349 if (smp_num_siblings > 1) 350 c->x86_max_cores /= smp_num_siblings; 351 } 352 353 /* 354 * In case leaf B is available, use it to derive 355 * topology information. 356 */ 357 err = detect_extended_topology(c); 358 if (!err) 359 c->x86_coreid_bits = get_count_order(c->x86_max_cores); 360 361 cacheinfo_amd_init_llc_id(c, cpu); 362 363 } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { 364 u64 value; 365 366 rdmsrl(MSR_FAM10H_NODE_ID, value); 367 c->cpu_die_id = value & 7; 368 369 per_cpu(cpu_llc_id, cpu) = c->cpu_die_id; 370 } else 371 return; 372 373 if (nodes_per_socket > 1) { 374 set_cpu_cap(c, X86_FEATURE_AMD_DCM); 375 legacy_fixup_core_id(c); 376 } 377} 378 379/* 380 * On a AMD dual core setup the lower bits of the APIC id distinguish the cores. 381 * Assumes number of cores is a power of two. 382 */ 383static void amd_detect_cmp(struct cpuinfo_x86 *c) 384{ 385 unsigned bits; 386 int cpu = smp_processor_id(); 387 388 bits = c->x86_coreid_bits; 389 /* Low order bits define the core id (index of core in socket) */ 390 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1); 391 /* Convert the initial APIC ID into the socket ID */ 392 c->phys_proc_id = c->initial_apicid >> bits; 393 /* use socket ID also for last level cache */ 394 per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id; 395} 396 397u32 amd_get_nodes_per_socket(void) 398{ 399 return nodes_per_socket; 400} 401EXPORT_SYMBOL_GPL(amd_get_nodes_per_socket); 402 403static void srat_detect_node(struct cpuinfo_x86 *c) 404{ 405#ifdef CONFIG_NUMA 406 int cpu = smp_processor_id(); 407 int node; 408 unsigned apicid = c->apicid; 409 410 node = numa_cpu_node(cpu); 411 if (node == NUMA_NO_NODE) 412 node = get_llc_id(cpu); 413 414 /* 415 * On multi-fabric platform (e.g. Numascale NumaChip) a 416 * platform-specific handler needs to be called to fixup some 417 * IDs of the CPU. 418 */ 419 if (x86_cpuinit.fixup_cpu_id) 420 x86_cpuinit.fixup_cpu_id(c, node); 421 422 if (!node_online(node)) { 423 /* 424 * Two possibilities here: 425 * 426 * - The CPU is missing memory and no node was created. In 427 * that case try picking one from a nearby CPU. 428 * 429 * - The APIC IDs differ from the HyperTransport node IDs 430 * which the K8 northbridge parsing fills in. Assume 431 * they are all increased by a constant offset, but in 432 * the same order as the HT nodeids. If that doesn't 433 * result in a usable node fall back to the path for the 434 * previous case. 435 * 436 * This workaround operates directly on the mapping between 437 * APIC ID and NUMA node, assuming certain relationship 438 * between APIC ID, HT node ID and NUMA topology. As going 439 * through CPU mapping may alter the outcome, directly 440 * access __apicid_to_node[]. 441 */ 442 int ht_nodeid = c->initial_apicid; 443 444 if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE) 445 node = __apicid_to_node[ht_nodeid]; 446 /* Pick a nearby node */ 447 if (!node_online(node)) 448 node = nearby_node(apicid); 449 } 450 numa_set_node(cpu, node); 451#endif 452} 453 454static void early_init_amd_mc(struct cpuinfo_x86 *c) 455{ 456#ifdef CONFIG_SMP 457 unsigned bits, ecx; 458 459 /* Multi core CPU? */ 460 if (c->extended_cpuid_level < 0x80000008) 461 return; 462 463 ecx = cpuid_ecx(0x80000008); 464 465 c->x86_max_cores = (ecx & 0xff) + 1; 466 467 /* CPU telling us the core id bits shift? */ 468 bits = (ecx >> 12) & 0xF; 469 470 /* Otherwise recompute */ 471 if (bits == 0) { 472 while ((1 << bits) < c->x86_max_cores) 473 bits++; 474 } 475 476 c->x86_coreid_bits = bits; 477#endif 478} 479 480static void bsp_init_amd(struct cpuinfo_x86 *c) 481{ 482 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { 483 484 if (c->x86 > 0x10 || 485 (c->x86 == 0x10 && c->x86_model >= 0x2)) { 486 u64 val; 487 488 rdmsrl(MSR_K7_HWCR, val); 489 if (!(val & BIT(24))) 490 pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n"); 491 } 492 } 493 494 if (c->x86 == 0x15) { 495 unsigned long upperbit; 496 u32 cpuid, assoc; 497 498 cpuid = cpuid_edx(0x80000005); 499 assoc = cpuid >> 16 & 0xff; 500 upperbit = ((cpuid >> 24) << 10) / assoc; 501 502 va_align.mask = (upperbit - 1) & PAGE_MASK; 503 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64; 504 505 /* A random value per boot for bit slice [12:upper_bit) */ 506 va_align.bits = get_random_int() & va_align.mask; 507 } 508 509 if (cpu_has(c, X86_FEATURE_MWAITX)) 510 use_mwaitx_delay(); 511 512 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { 513 u32 ecx; 514 515 ecx = cpuid_ecx(0x8000001e); 516 __max_die_per_package = nodes_per_socket = ((ecx >> 8) & 7) + 1; 517 } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) { 518 u64 value; 519 520 rdmsrl(MSR_FAM10H_NODE_ID, value); 521 __max_die_per_package = nodes_per_socket = ((value >> 3) & 7) + 1; 522 } 523 524 if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) && 525 !boot_cpu_has(X86_FEATURE_VIRT_SSBD) && 526 c->x86 >= 0x15 && c->x86 <= 0x17) { 527 unsigned int bit; 528 529 switch (c->x86) { 530 case 0x15: bit = 54; break; 531 case 0x16: bit = 33; break; 532 case 0x17: bit = 10; break; 533 default: return; 534 } 535 /* 536 * Try to cache the base value so further operations can 537 * avoid RMW. If that faults, do not enable SSBD. 538 */ 539 if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) { 540 setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD); 541 setup_force_cpu_cap(X86_FEATURE_SSBD); 542 x86_amd_ls_cfg_ssbd_mask = 1ULL << bit; 543 } 544 } 545 546 resctrl_cpu_detect(c); 547} 548 549static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) 550{ 551 u64 msr; 552 553 /* 554 * BIOS support is required for SME and SEV. 555 * For SME: If BIOS has enabled SME then adjust x86_phys_bits by 556 * the SME physical address space reduction value. 557 * If BIOS has not enabled SME then don't advertise the 558 * SME feature (set in scattered.c). 559 * If the kernel has not enabled SME via any means then 560 * don't advertise the SME feature. 561 * For SEV: If BIOS has not enabled SEV then don't advertise the 562 * SEV, SEV_ES and SEV_SNP feature. 563 * 564 * In all cases, since support for SME and SEV requires long mode, 565 * don't advertise the feature under CONFIG_X86_32. 566 */ 567 if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) { 568 /* Check if memory encryption is enabled */ 569 rdmsrl(MSR_AMD64_SYSCFG, msr); 570 if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT)) 571 goto clear_all; 572 573 /* 574 * Always adjust physical address bits. Even though this 575 * will be a value above 32-bits this is still done for 576 * CONFIG_X86_32 so that accurate values are reported. 577 */ 578 c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f; 579 580 if (IS_ENABLED(CONFIG_X86_32)) 581 goto clear_all; 582 583 if (!sme_me_mask) 584 setup_clear_cpu_cap(X86_FEATURE_SME); 585 586 rdmsrl(MSR_K7_HWCR, msr); 587 if (!(msr & MSR_K7_HWCR_SMMLOCK)) 588 goto clear_sev; 589 590 return; 591 592clear_all: 593 setup_clear_cpu_cap(X86_FEATURE_SME); 594clear_sev: 595 setup_clear_cpu_cap(X86_FEATURE_SEV); 596 setup_clear_cpu_cap(X86_FEATURE_SEV_ES); 597 setup_clear_cpu_cap(X86_FEATURE_SEV_SNP); 598 } 599} 600 601static void early_init_amd(struct cpuinfo_x86 *c) 602{ 603 u64 value; 604 u32 dummy; 605 606 early_init_amd_mc(c); 607 608 if (c->x86 >= 0xf) 609 set_cpu_cap(c, X86_FEATURE_K8); 610 611 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); 612 613 /* 614 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate 615 * with P/T states and does not stop in deep C-states 616 */ 617 if (c->x86_power & (1 << 8)) { 618 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 619 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); 620 } 621 622 /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */ 623 if (c->x86_power & BIT(12)) 624 set_cpu_cap(c, X86_FEATURE_ACC_POWER); 625 626 /* Bit 14 indicates the Runtime Average Power Limit interface. */ 627 if (c->x86_power & BIT(14)) 628 set_cpu_cap(c, X86_FEATURE_RAPL); 629 630#ifdef CONFIG_X86_64 631 set_cpu_cap(c, X86_FEATURE_SYSCALL32); 632#else 633 /* Set MTRR capability flag if appropriate */ 634 if (c->x86 == 5) 635 if (c->x86_model == 13 || c->x86_model == 9 || 636 (c->x86_model == 8 && c->x86_stepping >= 8)) 637 set_cpu_cap(c, X86_FEATURE_K6_MTRR); 638#endif 639#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI) 640 /* 641 * ApicID can always be treated as an 8-bit value for AMD APIC versions 642 * >= 0x10, but even old K8s came out of reset with version 0x10. So, we 643 * can safely set X86_FEATURE_EXTD_APICID unconditionally for families 644 * after 16h. 645 */ 646 if (boot_cpu_has(X86_FEATURE_APIC)) { 647 if (c->x86 > 0x16) 648 set_cpu_cap(c, X86_FEATURE_EXTD_APICID); 649 else if (c->x86 >= 0xf) { 650 /* check CPU config space for extended APIC ID */ 651 unsigned int val; 652 653 val = read_pci_config(0, 24, 0, 0x68); 654 if ((val >> 17 & 0x3) == 0x3) 655 set_cpu_cap(c, X86_FEATURE_EXTD_APICID); 656 } 657 } 658#endif 659 660 /* 661 * This is only needed to tell the kernel whether to use VMCALL 662 * and VMMCALL. VMMCALL is never executed except under virt, so 663 * we can set it unconditionally. 664 */ 665 set_cpu_cap(c, X86_FEATURE_VMMCALL); 666 667 /* F16h erratum 793, CVE-2013-6885 */ 668 if (c->x86 == 0x16 && c->x86_model <= 0xf) 669 msr_set_bit(MSR_AMD64_LS_CFG, 15); 670 671 /* 672 * Check whether the machine is affected by erratum 400. This is 673 * used to select the proper idle routine and to enable the check 674 * whether the machine is affected in arch_post_acpi_init(), which 675 * sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check. 676 */ 677 if (cpu_has_amd_erratum(c, amd_erratum_400)) 678 set_cpu_bug(c, X86_BUG_AMD_E400); 679 680 early_detect_mem_encrypt(c); 681 682 /* Re-enable TopologyExtensions if switched off by BIOS */ 683 if (c->x86 == 0x15 && 684 (c->x86_model >= 0x10 && c->x86_model <= 0x6f) && 685 !cpu_has(c, X86_FEATURE_TOPOEXT)) { 686 687 if (msr_set_bit(0xc0011005, 54) > 0) { 688 rdmsrl(0xc0011005, value); 689 if (value & BIT_64(54)) { 690 set_cpu_cap(c, X86_FEATURE_TOPOEXT); 691 pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n"); 692 } 693 } 694 } 695 696 if (cpu_has(c, X86_FEATURE_TOPOEXT)) 697 smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1; 698} 699 700static void init_amd_k8(struct cpuinfo_x86 *c) 701{ 702 u32 level; 703 u64 value; 704 705 /* On C+ stepping K8 rep microcode works well for copy/memset */ 706 level = cpuid_eax(1); 707 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) 708 set_cpu_cap(c, X86_FEATURE_REP_GOOD); 709 710 /* 711 * Some BIOSes incorrectly force this feature, but only K8 revision D 712 * (model = 0x14) and later actually support it. 713 * (AMD Erratum #110, docId: 25759). 714 */ 715 if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) { 716 clear_cpu_cap(c, X86_FEATURE_LAHF_LM); 717 if (!rdmsrl_amd_safe(0xc001100d, &value)) { 718 value &= ~BIT_64(32); 719 wrmsrl_amd_safe(0xc001100d, value); 720 } 721 } 722 723 if (!c->x86_model_id[0]) 724 strcpy(c->x86_model_id, "Hammer"); 725 726#ifdef CONFIG_SMP 727 /* 728 * Disable TLB flush filter by setting HWCR.FFDIS on K8 729 * bit 6 of msr C001_0015 730 * 731 * Errata 63 for SH-B3 steppings 732 * Errata 122 for all steppings (F+ have it disabled by default) 733 */ 734 msr_set_bit(MSR_K7_HWCR, 6); 735#endif 736 set_cpu_bug(c, X86_BUG_SWAPGS_FENCE); 737} 738 739static void init_amd_gh(struct cpuinfo_x86 *c) 740{ 741#ifdef CONFIG_MMCONF_FAM10H 742 /* do this for boot cpu */ 743 if (c == &boot_cpu_data) 744 check_enable_amd_mmconf_dmi(); 745 746 fam10h_check_enable_mmcfg(); 747#endif 748 749 /* 750 * Disable GART TLB Walk Errors on Fam10h. We do this here because this 751 * is always needed when GART is enabled, even in a kernel which has no 752 * MCE support built in. BIOS should disable GartTlbWlk Errors already. 753 * If it doesn't, we do it here as suggested by the BKDG. 754 * 755 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012 756 */ 757 msr_set_bit(MSR_AMD64_MCx_MASK(4), 10); 758 759 /* 760 * On family 10h BIOS may not have properly enabled WC+ support, causing 761 * it to be converted to CD memtype. This may result in performance 762 * degradation for certain nested-paging guests. Prevent this conversion 763 * by clearing bit 24 in MSR_AMD64_BU_CFG2. 764 * 765 * NOTE: we want to use the _safe accessors so as not to #GP kvm 766 * guests on older kvm hosts. 767 */ 768 msr_clear_bit(MSR_AMD64_BU_CFG2, 24); 769 770 if (cpu_has_amd_erratum(c, amd_erratum_383)) 771 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); 772} 773 774#define MSR_AMD64_DE_CFG 0xC0011029 775 776static void init_amd_ln(struct cpuinfo_x86 *c) 777{ 778 /* 779 * Apply erratum 665 fix unconditionally so machines without a BIOS 780 * fix work. 781 */ 782 msr_set_bit(MSR_AMD64_DE_CFG, 31); 783} 784 785static bool rdrand_force; 786 787static int __init rdrand_cmdline(char *str) 788{ 789 if (!str) 790 return -EINVAL; 791 792 if (!strcmp(str, "force")) 793 rdrand_force = true; 794 else 795 return -EINVAL; 796 797 return 0; 798} 799early_param("rdrand", rdrand_cmdline); 800 801static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c) 802{ 803 /* 804 * Saving of the MSR used to hide the RDRAND support during 805 * suspend/resume is done by arch/x86/power/cpu.c, which is 806 * dependent on CONFIG_PM_SLEEP. 807 */ 808 if (!IS_ENABLED(CONFIG_PM_SLEEP)) 809 return; 810 811 /* 812 * The nordrand option can clear X86_FEATURE_RDRAND, so check for 813 * RDRAND support using the CPUID function directly. 814 */ 815 if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force) 816 return; 817 818 msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62); 819 820 /* 821 * Verify that the CPUID change has occurred in case the kernel is 822 * running virtualized and the hypervisor doesn't support the MSR. 823 */ 824 if (cpuid_ecx(1) & BIT(30)) { 825 pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n"); 826 return; 827 } 828 829 clear_cpu_cap(c, X86_FEATURE_RDRAND); 830 pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n"); 831} 832 833static void init_amd_jg(struct cpuinfo_x86 *c) 834{ 835 /* 836 * Some BIOS implementations do not restore proper RDRAND support 837 * across suspend and resume. Check on whether to hide the RDRAND 838 * instruction support via CPUID. 839 */ 840 clear_rdrand_cpuid_bit(c); 841} 842 843static void init_amd_bd(struct cpuinfo_x86 *c) 844{ 845 u64 value; 846 847 /* 848 * The way access filter has a performance penalty on some workloads. 849 * Disable it on the affected CPUs. 850 */ 851 if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) { 852 if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) { 853 value |= 0x1E; 854 wrmsrl_safe(MSR_F15H_IC_CFG, value); 855 } 856 } 857 858 /* 859 * Some BIOS implementations do not restore proper RDRAND support 860 * across suspend and resume. Check on whether to hide the RDRAND 861 * instruction support via CPUID. 862 */ 863 clear_rdrand_cpuid_bit(c); 864} 865 866static void init_amd_zn(struct cpuinfo_x86 *c) 867{ 868 set_cpu_cap(c, X86_FEATURE_ZEN); 869 870#ifdef CONFIG_NUMA 871 node_reclaim_distance = 32; 872#endif 873 874 /* 875 * Fix erratum 1076: CPB feature bit not being set in CPUID. 876 * Always set it, except when running under a hypervisor. 877 */ 878 if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB)) 879 set_cpu_cap(c, X86_FEATURE_CPB); 880} 881 882static void init_amd(struct cpuinfo_x86 *c) 883{ 884 early_init_amd(c); 885 886 /* 887 * Bit 31 in normal CPUID used for nonstandard 3DNow ID; 888 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway 889 */ 890 clear_cpu_cap(c, 0*32+31); 891 892 if (c->x86 >= 0x10) 893 set_cpu_cap(c, X86_FEATURE_REP_GOOD); 894 895 /* get apicid instead of initial apic id from cpuid */ 896 c->apicid = hard_smp_processor_id(); 897 898 /* K6s reports MCEs but don't actually have all the MSRs */ 899 if (c->x86 < 6) 900 clear_cpu_cap(c, X86_FEATURE_MCE); 901 902 switch (c->x86) { 903 case 4: init_amd_k5(c); break; 904 case 5: init_amd_k6(c); break; 905 case 6: init_amd_k7(c); break; 906 case 0xf: init_amd_k8(c); break; 907 case 0x10: init_amd_gh(c); break; 908 case 0x12: init_amd_ln(c); break; 909 case 0x15: init_amd_bd(c); break; 910 case 0x16: init_amd_jg(c); break; 911 case 0x17: fallthrough; 912 case 0x19: init_amd_zn(c); break; 913 } 914 915 /* 916 * Enable workaround for FXSAVE leak on CPUs 917 * without a XSaveErPtr feature 918 */ 919 if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR))) 920 set_cpu_bug(c, X86_BUG_FXSAVE_LEAK); 921 922 cpu_detect_cache_sizes(c); 923 924 amd_detect_cmp(c); 925 amd_get_topology(c); 926 srat_detect_node(c); 927 928 init_amd_cacheinfo(c); 929 930 if (cpu_has(c, X86_FEATURE_XMM2)) { 931 /* 932 * Use LFENCE for execution serialization. On families which 933 * don't have that MSR, LFENCE is already serializing. 934 * msr_set_bit() uses the safe accessors, too, even if the MSR 935 * is not present. 936 */ 937 msr_set_bit(MSR_F10H_DECFG, 938 MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT); 939 940 /* A serializing LFENCE stops RDTSC speculation */ 941 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); 942 } 943 944 /* 945 * Family 0x12 and above processors have APIC timer 946 * running in deep C states. 947 */ 948 if (c->x86 > 0x11) 949 set_cpu_cap(c, X86_FEATURE_ARAT); 950 951 /* 3DNow or LM implies PREFETCHW */ 952 if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH)) 953 if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM)) 954 set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH); 955 956 /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */ 957 if (!cpu_has(c, X86_FEATURE_XENPV)) 958 set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); 959 960 /* 961 * Turn on the Instructions Retired free counter on machines not 962 * susceptible to erratum #1054 "Instructions Retired Performance 963 * Counter May Be Inaccurate". 964 */ 965 if (cpu_has(c, X86_FEATURE_IRPERF) && 966 !cpu_has_amd_erratum(c, amd_erratum_1054)) 967 msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT); 968 969 check_null_seg_clears_base(c); 970} 971 972#ifdef CONFIG_X86_32 973static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) 974{ 975 /* AMD errata T13 (order #21922) */ 976 if (c->x86 == 6) { 977 /* Duron Rev A0 */ 978 if (c->x86_model == 3 && c->x86_stepping == 0) 979 size = 64; 980 /* Tbird rev A1/A2 */ 981 if (c->x86_model == 4 && 982 (c->x86_stepping == 0 || c->x86_stepping == 1)) 983 size = 256; 984 } 985 return size; 986} 987#endif 988 989static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c) 990{ 991 u32 ebx, eax, ecx, edx; 992 u16 mask = 0xfff; 993 994 if (c->x86 < 0xf) 995 return; 996 997 if (c->extended_cpuid_level < 0x80000006) 998 return; 999 1000 cpuid(0x80000006, &eax, &ebx, &ecx, &edx); 1001 1002 tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask; 1003 tlb_lli_4k[ENTRIES] = ebx & mask; 1004 1005 /* 1006 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB 1007 * characteristics from the CPUID function 0x80000005 instead. 1008 */ 1009 if (c->x86 == 0xf) { 1010 cpuid(0x80000005, &eax, &ebx, &ecx, &edx); 1011 mask = 0xff; 1012 } 1013 1014 /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */ 1015 if (!((eax >> 16) & mask)) 1016 tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff; 1017 else 1018 tlb_lld_2m[ENTRIES] = (eax >> 16) & mask; 1019 1020 /* a 4M entry uses two 2M entries */ 1021 tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1; 1022 1023 /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */ 1024 if (!(eax & mask)) { 1025 /* Erratum 658 */ 1026 if (c->x86 == 0x15 && c->x86_model <= 0x1f) { 1027 tlb_lli_2m[ENTRIES] = 1024; 1028 } else { 1029 cpuid(0x80000005, &eax, &ebx, &ecx, &edx); 1030 tlb_lli_2m[ENTRIES] = eax & 0xff; 1031 } 1032 } else 1033 tlb_lli_2m[ENTRIES] = eax & mask; 1034 1035 tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1; 1036} 1037 1038static const struct cpu_dev amd_cpu_dev = { 1039 .c_vendor = "AMD", 1040 .c_ident = { "AuthenticAMD" }, 1041#ifdef CONFIG_X86_32 1042 .legacy_models = { 1043 { .family = 4, .model_names = 1044 { 1045 [3] = "486 DX/2", 1046 [7] = "486 DX/2-WB", 1047 [8] = "486 DX/4", 1048 [9] = "486 DX/4-WB", 1049 [14] = "Am5x86-WT", 1050 [15] = "Am5x86-WB" 1051 } 1052 }, 1053 }, 1054 .legacy_cache_size = amd_size_cache, 1055#endif 1056 .c_early_init = early_init_amd, 1057 .c_detect_tlb = cpu_detect_tlb_amd, 1058 .c_bsp_init = bsp_init_amd, 1059 .c_init = init_amd, 1060 .c_x86_vendor = X86_VENDOR_AMD, 1061}; 1062 1063cpu_dev_register(amd_cpu_dev); 1064 1065/* 1066 * AMD errata checking 1067 * 1068 * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or 1069 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that 1070 * have an OSVW id assigned, which it takes as first argument. Both take a 1071 * variable number of family-specific model-stepping ranges created by 1072 * AMD_MODEL_RANGE(). 1073 * 1074 * Example: 1075 * 1076 * const int amd_erratum_319[] = 1077 * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), 1078 * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), 1079 * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); 1080 */ 1081 1082#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } 1083#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } 1084#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ 1085 ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) 1086#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) 1087#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) 1088#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) 1089 1090static const int amd_erratum_400[] = 1091 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), 1092 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); 1093 1094static const int amd_erratum_383[] = 1095 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); 1096 1097/* #1054: Instructions Retired Performance Counter May Be Inaccurate */ 1098static const int amd_erratum_1054[] = 1099 AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); 1100 1101static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) 1102{ 1103 int osvw_id = *erratum++; 1104 u32 range; 1105 u32 ms; 1106 1107 if (osvw_id >= 0 && osvw_id < 65536 && 1108 cpu_has(cpu, X86_FEATURE_OSVW)) { 1109 u64 osvw_len; 1110 1111 rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len); 1112 if (osvw_id < osvw_len) { 1113 u64 osvw_bits; 1114 1115 rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6), 1116 osvw_bits); 1117 return osvw_bits & (1ULL << (osvw_id & 0x3f)); 1118 } 1119 } 1120 1121 /* OSVW unavailable or ID unknown, match family-model-stepping range */ 1122 ms = (cpu->x86_model << 4) | cpu->x86_stepping; 1123 while ((range = *erratum++)) 1124 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && 1125 (ms >= AMD_MODEL_RANGE_START(range)) && 1126 (ms <= AMD_MODEL_RANGE_END(range))) 1127 return true; 1128 1129 return false; 1130} 1131 1132void set_dr_addr_mask(unsigned long mask, int dr) 1133{ 1134 if (!boot_cpu_has(X86_FEATURE_BPEXT)) 1135 return; 1136 1137 switch (dr) { 1138 case 0: 1139 wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0); 1140 break; 1141 case 1: 1142 case 2: 1143 case 3: 1144 wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0); 1145 break; 1146 default: 1147 break; 1148 } 1149} 1150 1151u32 amd_get_highest_perf(void) 1152{ 1153 struct cpuinfo_x86 *c = &boot_cpu_data; 1154 1155 if (c->x86 == 0x17 && ((c->x86_model >= 0x30 && c->x86_model < 0x40) || 1156 (c->x86_model >= 0x70 && c->x86_model < 0x80))) 1157 return 166; 1158 1159 if (c->x86 == 0x19 && ((c->x86_model >= 0x20 && c->x86_model < 0x30) || 1160 (c->x86_model >= 0x40 && c->x86_model < 0x70))) 1161 return 166; 1162 1163 return 255; 1164} 1165EXPORT_SYMBOL_GPL(amd_get_highest_perf);