cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dt_cpu_ftrs.c (27652B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright 2017, Nicholas Piggin, IBM Corporation
      4 */
      5
      6#define pr_fmt(fmt) "dt-cpu-ftrs: " fmt
      7
      8#include <linux/export.h>
      9#include <linux/init.h>
     10#include <linux/jump_label.h>
     11#include <linux/libfdt.h>
     12#include <linux/memblock.h>
     13#include <linux/of_fdt.h>
     14#include <linux/printk.h>
     15#include <linux/sched.h>
     16#include <linux/string.h>
     17#include <linux/threads.h>
     18
     19#include <asm/cputable.h>
     20#include <asm/dt_cpu_ftrs.h>
     21#include <asm/mce.h>
     22#include <asm/mmu.h>
     23#include <asm/setup.h>
     24
     25
     26/* Device-tree visible constants follow */
     27#define ISA_V3_0B       3000
     28#define ISA_V3_1        3100
     29
     30#define USABLE_PR               (1U << 0)
     31#define USABLE_OS               (1U << 1)
     32#define USABLE_HV               (1U << 2)
     33
     34#define HV_SUPPORT_HFSCR        (1U << 0)
     35#define OS_SUPPORT_FSCR         (1U << 0)
     36
     37/* For parsing, we define all bits set as "NONE" case */
     38#define HV_SUPPORT_NONE		0xffffffffU
     39#define OS_SUPPORT_NONE		0xffffffffU
     40
     41struct dt_cpu_feature {
     42	const char *name;
     43	uint32_t isa;
     44	uint32_t usable_privilege;
     45	uint32_t hv_support;
     46	uint32_t os_support;
     47	uint32_t hfscr_bit_nr;
     48	uint32_t fscr_bit_nr;
     49	uint32_t hwcap_bit_nr;
     50	/* fdt parsing */
     51	unsigned long node;
     52	int enabled;
     53	int disabled;
     54};
     55
     56#define MMU_FTRS_HASH_BASE (MMU_FTRS_POWER8)
     57
     58#define COMMON_USER_BASE	(PPC_FEATURE_32 | PPC_FEATURE_64 | \
     59				 PPC_FEATURE_ARCH_2_06 |\
     60				 PPC_FEATURE_ICACHE_SNOOP)
     61#define COMMON_USER2_BASE	(PPC_FEATURE2_ARCH_2_07 | \
     62				 PPC_FEATURE2_ISEL)
     63/*
     64 * Set up the base CPU
     65 */
     66
     67static int hv_mode;
     68
     69static struct {
     70	u64	lpcr;
     71	u64	hfscr;
     72	u64	fscr;
     73	u64	pcr;
     74} system_registers;
     75
     76static void (*init_pmu_registers)(void);
     77
     78static void __restore_cpu_cpufeatures(void)
     79{
     80	mtspr(SPRN_LPCR, system_registers.lpcr);
     81	if (hv_mode) {
     82		mtspr(SPRN_LPID, 0);
     83		mtspr(SPRN_AMOR, ~0);
     84		mtspr(SPRN_HFSCR, system_registers.hfscr);
     85		mtspr(SPRN_PCR, system_registers.pcr);
     86	}
     87	mtspr(SPRN_FSCR, system_registers.fscr);
     88
     89	if (init_pmu_registers)
     90		init_pmu_registers();
     91}
     92
     93static char dt_cpu_name[64];
     94
     95static struct cpu_spec __initdata base_cpu_spec = {
     96	.cpu_name		= NULL,
     97	.cpu_features		= CPU_FTRS_DT_CPU_BASE,
     98	.cpu_user_features	= COMMON_USER_BASE,
     99	.cpu_user_features2	= COMMON_USER2_BASE,
    100	.mmu_features		= 0,
    101	.icache_bsize		= 32, /* minimum block size, fixed by */
    102	.dcache_bsize		= 32, /* cache info init.             */
    103	.num_pmcs		= 0,
    104	.pmc_type		= PPC_PMC_DEFAULT,
    105	.oprofile_cpu_type	= NULL,
    106	.cpu_setup		= NULL,
    107	.cpu_restore		= __restore_cpu_cpufeatures,
    108	.machine_check_early	= NULL,
    109	.platform		= NULL,
    110};
    111
    112static void __init cpufeatures_setup_cpu(void)
    113{
    114	set_cur_cpu_spec(&base_cpu_spec);
    115
    116	cur_cpu_spec->pvr_mask = -1;
    117	cur_cpu_spec->pvr_value = mfspr(SPRN_PVR);
    118
    119	/* Initialize the base environment -- clear FSCR/HFSCR.  */
    120	hv_mode = !!(mfmsr() & MSR_HV);
    121	if (hv_mode) {
    122		cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
    123		mtspr(SPRN_HFSCR, 0);
    124	}
    125	mtspr(SPRN_FSCR, 0);
    126	mtspr(SPRN_PCR, PCR_MASK);
    127
    128	/*
    129	 * LPCR does not get cleared, to match behaviour with secondaries
    130	 * in __restore_cpu_cpufeatures. Once the idle code is fixed, this
    131	 * could clear LPCR too.
    132	 */
    133}
    134
    135static int __init feat_try_enable_unknown(struct dt_cpu_feature *f)
    136{
    137	if (f->hv_support == HV_SUPPORT_NONE) {
    138	} else if (f->hv_support & HV_SUPPORT_HFSCR) {
    139		u64 hfscr = mfspr(SPRN_HFSCR);
    140		hfscr |= 1UL << f->hfscr_bit_nr;
    141		mtspr(SPRN_HFSCR, hfscr);
    142	} else {
    143		/* Does not have a known recipe */
    144		return 0;
    145	}
    146
    147	if (f->os_support == OS_SUPPORT_NONE) {
    148	} else if (f->os_support & OS_SUPPORT_FSCR) {
    149		u64 fscr = mfspr(SPRN_FSCR);
    150		fscr |= 1UL << f->fscr_bit_nr;
    151		mtspr(SPRN_FSCR, fscr);
    152	} else {
    153		/* Does not have a known recipe */
    154		return 0;
    155	}
    156
    157	if ((f->usable_privilege & USABLE_PR) && (f->hwcap_bit_nr != -1)) {
    158		uint32_t word = f->hwcap_bit_nr / 32;
    159		uint32_t bit = f->hwcap_bit_nr % 32;
    160
    161		if (word == 0)
    162			cur_cpu_spec->cpu_user_features |= 1U << bit;
    163		else if (word == 1)
    164			cur_cpu_spec->cpu_user_features2 |= 1U << bit;
    165		else
    166			pr_err("%s could not advertise to user (no hwcap bits)\n", f->name);
    167	}
    168
    169	return 1;
    170}
    171
    172static int __init feat_enable(struct dt_cpu_feature *f)
    173{
    174	if (f->hv_support != HV_SUPPORT_NONE) {
    175		if (f->hfscr_bit_nr != -1) {
    176			u64 hfscr = mfspr(SPRN_HFSCR);
    177			hfscr |= 1UL << f->hfscr_bit_nr;
    178			mtspr(SPRN_HFSCR, hfscr);
    179		}
    180	}
    181
    182	if (f->os_support != OS_SUPPORT_NONE) {
    183		if (f->fscr_bit_nr != -1) {
    184			u64 fscr = mfspr(SPRN_FSCR);
    185			fscr |= 1UL << f->fscr_bit_nr;
    186			mtspr(SPRN_FSCR, fscr);
    187		}
    188	}
    189
    190	if ((f->usable_privilege & USABLE_PR) && (f->hwcap_bit_nr != -1)) {
    191		uint32_t word = f->hwcap_bit_nr / 32;
    192		uint32_t bit = f->hwcap_bit_nr % 32;
    193
    194		if (word == 0)
    195			cur_cpu_spec->cpu_user_features |= 1U << bit;
    196		else if (word == 1)
    197			cur_cpu_spec->cpu_user_features2 |= 1U << bit;
    198		else
    199			pr_err("CPU feature: %s could not advertise to user (no hwcap bits)\n", f->name);
    200	}
    201
    202	return 1;
    203}
    204
    205static int __init feat_disable(struct dt_cpu_feature *f)
    206{
    207	return 0;
    208}
    209
    210static int __init feat_enable_hv(struct dt_cpu_feature *f)
    211{
    212	u64 lpcr;
    213
    214	if (!hv_mode) {
    215		pr_err("CPU feature hypervisor present in device tree but HV mode not enabled in the CPU. Ignoring.\n");
    216		return 0;
    217	}
    218
    219	mtspr(SPRN_LPID, 0);
    220	mtspr(SPRN_AMOR, ~0);
    221
    222	lpcr = mfspr(SPRN_LPCR);
    223	lpcr &=  ~LPCR_LPES0; /* HV external interrupts */
    224	mtspr(SPRN_LPCR, lpcr);
    225
    226	cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
    227
    228	return 1;
    229}
    230
    231static int __init feat_enable_le(struct dt_cpu_feature *f)
    232{
    233	cur_cpu_spec->cpu_user_features |= PPC_FEATURE_TRUE_LE;
    234	return 1;
    235}
    236
    237static int __init feat_enable_smt(struct dt_cpu_feature *f)
    238{
    239	cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
    240	cur_cpu_spec->cpu_user_features |= PPC_FEATURE_SMT;
    241	return 1;
    242}
    243
    244static int __init feat_enable_idle_nap(struct dt_cpu_feature *f)
    245{
    246	u64 lpcr;
    247
    248	/* Set PECE wakeup modes for ISA 207 */
    249	lpcr = mfspr(SPRN_LPCR);
    250	lpcr |=  LPCR_PECE0;
    251	lpcr |=  LPCR_PECE1;
    252	lpcr |=  LPCR_PECE2;
    253	mtspr(SPRN_LPCR, lpcr);
    254
    255	return 1;
    256}
    257
    258static int __init feat_enable_idle_stop(struct dt_cpu_feature *f)
    259{
    260	u64 lpcr;
    261
    262	/* Set PECE wakeup modes for ISAv3.0B */
    263	lpcr = mfspr(SPRN_LPCR);
    264	lpcr |=  LPCR_PECE0;
    265	lpcr |=  LPCR_PECE1;
    266	lpcr |=  LPCR_PECE2;
    267	mtspr(SPRN_LPCR, lpcr);
    268
    269	return 1;
    270}
    271
    272static int __init feat_enable_mmu_hash(struct dt_cpu_feature *f)
    273{
    274	u64 lpcr;
    275
    276	if (!IS_ENABLED(CONFIG_PPC_64S_HASH_MMU))
    277		return 0;
    278
    279	lpcr = mfspr(SPRN_LPCR);
    280	lpcr &= ~LPCR_ISL;
    281
    282	/* VRMASD */
    283	lpcr |= LPCR_VPM0;
    284	lpcr &= ~LPCR_VPM1;
    285	lpcr |= 0x10UL << LPCR_VRMASD_SH; /* L=1 LP=00 */
    286	mtspr(SPRN_LPCR, lpcr);
    287
    288	cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
    289	cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
    290
    291	return 1;
    292}
    293
    294static int __init feat_enable_mmu_hash_v3(struct dt_cpu_feature *f)
    295{
    296	u64 lpcr;
    297
    298	if (!IS_ENABLED(CONFIG_PPC_64S_HASH_MMU))
    299		return 0;
    300
    301	lpcr = mfspr(SPRN_LPCR);
    302	lpcr &= ~(LPCR_ISL | LPCR_UPRT | LPCR_HR);
    303	mtspr(SPRN_LPCR, lpcr);
    304
    305	cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
    306	cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
    307
    308	return 1;
    309}
    310
    311
    312static int __init feat_enable_mmu_radix(struct dt_cpu_feature *f)
    313{
    314	if (!IS_ENABLED(CONFIG_PPC_RADIX_MMU))
    315		return 0;
    316
    317	cur_cpu_spec->mmu_features |= MMU_FTR_KERNEL_RO;
    318	cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
    319	cur_cpu_spec->mmu_features |= MMU_FTR_GTSE;
    320	cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
    321
    322	return 1;
    323}
    324
    325static int __init feat_enable_dscr(struct dt_cpu_feature *f)
    326{
    327	u64 lpcr;
    328
    329	/*
    330	 * Linux relies on FSCR[DSCR] being clear, so that we can take the
    331	 * facility unavailable interrupt and track the task's usage of DSCR.
    332	 * See facility_unavailable_exception().
    333	 * Clear the bit here so that feat_enable() doesn't set it.
    334	 */
    335	f->fscr_bit_nr = -1;
    336
    337	feat_enable(f);
    338
    339	lpcr = mfspr(SPRN_LPCR);
    340	lpcr &= ~LPCR_DPFD;
    341	lpcr |=  (4UL << LPCR_DPFD_SH);
    342	mtspr(SPRN_LPCR, lpcr);
    343
    344	return 1;
    345}
    346
    347static void __init hfscr_pmu_enable(void)
    348{
    349	u64 hfscr = mfspr(SPRN_HFSCR);
    350	hfscr |= PPC_BIT(60);
    351	mtspr(SPRN_HFSCR, hfscr);
    352}
    353
    354static void init_pmu_power8(void)
    355{
    356	if (hv_mode) {
    357		mtspr(SPRN_MMCRC, 0);
    358		mtspr(SPRN_MMCRH, 0);
    359	}
    360
    361	mtspr(SPRN_MMCRA, 0);
    362	mtspr(SPRN_MMCR0, MMCR0_FC);
    363	mtspr(SPRN_MMCR1, 0);
    364	mtspr(SPRN_MMCR2, 0);
    365	mtspr(SPRN_MMCRS, 0);
    366}
    367
    368static int __init feat_enable_mce_power8(struct dt_cpu_feature *f)
    369{
    370	cur_cpu_spec->platform = "power8";
    371	cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p8;
    372
    373	return 1;
    374}
    375
    376static int __init feat_enable_pmu_power8(struct dt_cpu_feature *f)
    377{
    378	hfscr_pmu_enable();
    379
    380	init_pmu_power8();
    381	init_pmu_registers = init_pmu_power8;
    382
    383	cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
    384	cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
    385	if (pvr_version_is(PVR_POWER8E))
    386		cur_cpu_spec->cpu_features |= CPU_FTR_PMAO_BUG;
    387
    388	cur_cpu_spec->num_pmcs		= 6;
    389	cur_cpu_spec->pmc_type		= PPC_PMC_IBM;
    390	cur_cpu_spec->oprofile_cpu_type	= "ppc64/power8";
    391
    392	return 1;
    393}
    394
    395static void init_pmu_power9(void)
    396{
    397	if (hv_mode)
    398		mtspr(SPRN_MMCRC, 0);
    399
    400	mtspr(SPRN_MMCRA, 0);
    401	mtspr(SPRN_MMCR0, MMCR0_FC);
    402	mtspr(SPRN_MMCR1, 0);
    403	mtspr(SPRN_MMCR2, 0);
    404}
    405
    406static int __init feat_enable_mce_power9(struct dt_cpu_feature *f)
    407{
    408	cur_cpu_spec->platform = "power9";
    409	cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p9;
    410
    411	return 1;
    412}
    413
    414static int __init feat_enable_pmu_power9(struct dt_cpu_feature *f)
    415{
    416	hfscr_pmu_enable();
    417
    418	init_pmu_power9();
    419	init_pmu_registers = init_pmu_power9;
    420
    421	cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
    422	cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
    423
    424	cur_cpu_spec->num_pmcs		= 6;
    425	cur_cpu_spec->pmc_type		= PPC_PMC_IBM;
    426	cur_cpu_spec->oprofile_cpu_type	= "ppc64/power9";
    427
    428	return 1;
    429}
    430
    431static void init_pmu_power10(void)
    432{
    433	init_pmu_power9();
    434
    435	mtspr(SPRN_MMCR3, 0);
    436	mtspr(SPRN_MMCRA, MMCRA_BHRB_DISABLE);
    437	mtspr(SPRN_MMCR0, MMCR0_FC | MMCR0_PMCCEXT);
    438}
    439
    440static int __init feat_enable_pmu_power10(struct dt_cpu_feature *f)
    441{
    442	hfscr_pmu_enable();
    443
    444	init_pmu_power10();
    445	init_pmu_registers = init_pmu_power10;
    446
    447	cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
    448	cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
    449
    450	cur_cpu_spec->num_pmcs          = 6;
    451	cur_cpu_spec->pmc_type          = PPC_PMC_IBM;
    452	cur_cpu_spec->oprofile_cpu_type = "ppc64/power10";
    453
    454	return 1;
    455}
    456
    457static int __init feat_enable_mce_power10(struct dt_cpu_feature *f)
    458{
    459	cur_cpu_spec->platform = "power10";
    460	cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p10;
    461
    462	return 1;
    463}
    464
    465static int __init feat_enable_tm(struct dt_cpu_feature *f)
    466{
    467#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
    468	feat_enable(f);
    469	cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_HTM_NOSC;
    470	return 1;
    471#endif
    472	return 0;
    473}
    474
    475static int __init feat_enable_fp(struct dt_cpu_feature *f)
    476{
    477	feat_enable(f);
    478	cur_cpu_spec->cpu_features &= ~CPU_FTR_FPU_UNAVAILABLE;
    479
    480	return 1;
    481}
    482
    483static int __init feat_enable_vector(struct dt_cpu_feature *f)
    484{
    485#ifdef CONFIG_ALTIVEC
    486	feat_enable(f);
    487	cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
    488	cur_cpu_spec->cpu_features |= CPU_FTR_VMX_COPY;
    489	cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
    490
    491	return 1;
    492#endif
    493	return 0;
    494}
    495
    496static int __init feat_enable_vsx(struct dt_cpu_feature *f)
    497{
    498#ifdef CONFIG_VSX
    499	feat_enable(f);
    500	cur_cpu_spec->cpu_features |= CPU_FTR_VSX;
    501	cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_VSX;
    502
    503	return 1;
    504#endif
    505	return 0;
    506}
    507
    508static int __init feat_enable_purr(struct dt_cpu_feature *f)
    509{
    510	cur_cpu_spec->cpu_features |= CPU_FTR_PURR | CPU_FTR_SPURR;
    511
    512	return 1;
    513}
    514
    515static int __init feat_enable_ebb(struct dt_cpu_feature *f)
    516{
    517	/*
    518	 * PPC_FEATURE2_EBB is enabled in PMU init code because it has
    519	 * historically been related to the PMU facility. This may have
    520	 * to be decoupled if EBB becomes more generic. For now, follow
    521	 * existing convention.
    522	 */
    523	f->hwcap_bit_nr = -1;
    524	feat_enable(f);
    525
    526	return 1;
    527}
    528
    529static int __init feat_enable_dbell(struct dt_cpu_feature *f)
    530{
    531	u64 lpcr;
    532
    533	/* P9 has an HFSCR for privileged state */
    534	feat_enable(f);
    535
    536	cur_cpu_spec->cpu_features |= CPU_FTR_DBELL;
    537
    538	lpcr = mfspr(SPRN_LPCR);
    539	lpcr |=  LPCR_PECEDH; /* hyp doorbell wakeup */
    540	mtspr(SPRN_LPCR, lpcr);
    541
    542	return 1;
    543}
    544
    545static int __init feat_enable_hvi(struct dt_cpu_feature *f)
    546{
    547	u64 lpcr;
    548
    549	/*
    550	 * POWER9 XIVE interrupts including in OPAL XICS compatibility
    551	 * are always delivered as hypervisor virtualization interrupts (HVI)
    552	 * rather than EE.
    553	 *
    554	 * However LPES0 is not set here, in the chance that an EE does get
    555	 * delivered to the host somehow, the EE handler would not expect it
    556	 * to be delivered in LPES0 mode (e.g., using SRR[01]). This could
    557	 * happen if there is a bug in interrupt controller code, or IC is
    558	 * misconfigured in systemsim.
    559	 */
    560
    561	lpcr = mfspr(SPRN_LPCR);
    562	lpcr |= LPCR_HVICE;	/* enable hvi interrupts */
    563	lpcr |= LPCR_HEIC;	/* disable ee interrupts when MSR_HV */
    564	lpcr |= LPCR_PECE_HVEE; /* hvi can wake from stop */
    565	mtspr(SPRN_LPCR, lpcr);
    566
    567	return 1;
    568}
    569
    570static int __init feat_enable_large_ci(struct dt_cpu_feature *f)
    571{
    572	cur_cpu_spec->mmu_features |= MMU_FTR_CI_LARGE_PAGE;
    573
    574	return 1;
    575}
    576
    577static int __init feat_enable_mma(struct dt_cpu_feature *f)
    578{
    579	u64 pcr;
    580
    581	feat_enable(f);
    582	pcr = mfspr(SPRN_PCR);
    583	pcr &= ~PCR_MMA_DIS;
    584	mtspr(SPRN_PCR, pcr);
    585
    586	return 1;
    587}
    588
    589struct dt_cpu_feature_match {
    590	const char *name;
    591	int (*enable)(struct dt_cpu_feature *f);
    592	u64 cpu_ftr_bit_mask;
    593};
    594
    595static struct dt_cpu_feature_match __initdata
    596		dt_cpu_feature_match_table[] = {
    597	{"hypervisor", feat_enable_hv, 0},
    598	{"big-endian", feat_enable, 0},
    599	{"little-endian", feat_enable_le, CPU_FTR_REAL_LE},
    600	{"smt", feat_enable_smt, 0},
    601	{"interrupt-facilities", feat_enable, 0},
    602	{"system-call-vectored", feat_enable, 0},
    603	{"timer-facilities", feat_enable, 0},
    604	{"timer-facilities-v3", feat_enable, 0},
    605	{"debug-facilities", feat_enable, 0},
    606	{"come-from-address-register", feat_enable, CPU_FTR_CFAR},
    607	{"branch-tracing", feat_enable, 0},
    608	{"floating-point", feat_enable_fp, 0},
    609	{"vector", feat_enable_vector, 0},
    610	{"vector-scalar", feat_enable_vsx, 0},
    611	{"vector-scalar-v3", feat_enable, 0},
    612	{"decimal-floating-point", feat_enable, 0},
    613	{"decimal-integer", feat_enable, 0},
    614	{"quadword-load-store", feat_enable, 0},
    615	{"vector-crypto", feat_enable, 0},
    616	{"mmu-hash", feat_enable_mmu_hash, 0},
    617	{"mmu-radix", feat_enable_mmu_radix, 0},
    618	{"mmu-hash-v3", feat_enable_mmu_hash_v3, 0},
    619	{"virtual-page-class-key-protection", feat_enable, 0},
    620	{"transactional-memory", feat_enable_tm, CPU_FTR_TM},
    621	{"transactional-memory-v3", feat_enable_tm, 0},
    622	{"tm-suspend-hypervisor-assist", feat_enable, CPU_FTR_P9_TM_HV_ASSIST},
    623	{"tm-suspend-xer-so-bug", feat_enable, CPU_FTR_P9_TM_XER_SO_BUG},
    624	{"idle-nap", feat_enable_idle_nap, 0},
    625	/* alignment-interrupt-dsisr ignored */
    626	{"idle-stop", feat_enable_idle_stop, 0},
    627	{"machine-check-power8", feat_enable_mce_power8, 0},
    628	{"performance-monitor-power8", feat_enable_pmu_power8, 0},
    629	{"data-stream-control-register", feat_enable_dscr, CPU_FTR_DSCR},
    630	{"event-based-branch", feat_enable_ebb, 0},
    631	{"target-address-register", feat_enable, 0},
    632	{"branch-history-rolling-buffer", feat_enable, 0},
    633	{"control-register", feat_enable, CPU_FTR_CTRL},
    634	{"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL},
    635	{"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
    636	{"processor-utilization-of-resources-register", feat_enable_purr, 0},
    637	{"no-execute", feat_enable, 0},
    638	{"strong-access-ordering", feat_enable, CPU_FTR_SAO},
    639	{"cache-inhibited-large-page", feat_enable_large_ci, 0},
    640	{"coprocessor-icswx", feat_enable, 0},
    641	{"hypervisor-virtualization-interrupt", feat_enable_hvi, 0},
    642	{"program-priority-register", feat_enable, CPU_FTR_HAS_PPR},
    643	{"wait", feat_enable, 0},
    644	{"atomic-memory-operations", feat_enable, 0},
    645	{"branch-v3", feat_enable, 0},
    646	{"copy-paste", feat_enable, 0},
    647	{"decimal-floating-point-v3", feat_enable, 0},
    648	{"decimal-integer-v3", feat_enable, 0},
    649	{"fixed-point-v3", feat_enable, 0},
    650	{"floating-point-v3", feat_enable, 0},
    651	{"group-start-register", feat_enable, 0},
    652	{"pc-relative-addressing", feat_enable, 0},
    653	{"machine-check-power9", feat_enable_mce_power9, 0},
    654	{"machine-check-power10", feat_enable_mce_power10, 0},
    655	{"performance-monitor-power9", feat_enable_pmu_power9, 0},
    656	{"performance-monitor-power10", feat_enable_pmu_power10, 0},
    657	{"event-based-branch-v3", feat_enable, 0},
    658	{"random-number-generator", feat_enable, 0},
    659	{"system-call-vectored", feat_disable, 0},
    660	{"trace-interrupt-v3", feat_enable, 0},
    661	{"vector-v3", feat_enable, 0},
    662	{"vector-binary128", feat_enable, 0},
    663	{"vector-binary16", feat_enable, 0},
    664	{"wait-v3", feat_enable, 0},
    665	{"prefix-instructions", feat_enable, 0},
    666	{"matrix-multiply-assist", feat_enable_mma, 0},
    667	{"debug-facilities-v31", feat_enable, CPU_FTR_DAWR1},
    668};
    669
    670static bool __initdata using_dt_cpu_ftrs;
    671static bool __initdata enable_unknown = true;
    672
    673static int __init dt_cpu_ftrs_parse(char *str)
    674{
    675	if (!str)
    676		return 0;
    677
    678	if (!strcmp(str, "off"))
    679		using_dt_cpu_ftrs = false;
    680	else if (!strcmp(str, "known"))
    681		enable_unknown = false;
    682	else
    683		return 1;
    684
    685	return 0;
    686}
    687early_param("dt_cpu_ftrs", dt_cpu_ftrs_parse);
    688
    689static void __init cpufeatures_setup_start(u32 isa)
    690{
    691	pr_info("setup for ISA %d\n", isa);
    692
    693	if (isa >= ISA_V3_0B) {
    694		cur_cpu_spec->cpu_features |= CPU_FTR_ARCH_300;
    695		cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_ARCH_3_00;
    696	}
    697
    698	if (isa >= ISA_V3_1) {
    699		cur_cpu_spec->cpu_features |= CPU_FTR_ARCH_31;
    700		cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_ARCH_3_1;
    701	}
    702}
    703
    704static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
    705{
    706	const struct dt_cpu_feature_match *m;
    707	bool known = false;
    708	int i;
    709
    710	for (i = 0; i < ARRAY_SIZE(dt_cpu_feature_match_table); i++) {
    711		m = &dt_cpu_feature_match_table[i];
    712		if (!strcmp(f->name, m->name)) {
    713			known = true;
    714			if (m->enable(f)) {
    715				cur_cpu_spec->cpu_features |= m->cpu_ftr_bit_mask;
    716				break;
    717			}
    718
    719			pr_info("not enabling: %s (disabled or unsupported by kernel)\n",
    720				f->name);
    721			return false;
    722		}
    723	}
    724
    725	if (!known && (!enable_unknown || !feat_try_enable_unknown(f))) {
    726		pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
    727			f->name);
    728		return false;
    729	}
    730
    731	if (known)
    732		pr_debug("enabling: %s\n", f->name);
    733	else
    734		pr_debug("enabling: %s (unknown)\n", f->name);
    735
    736	return true;
    737}
    738
    739/*
    740 * Handle POWER9 broadcast tlbie invalidation issue using
    741 * cpu feature flag.
    742 */
    743static __init void update_tlbie_feature_flag(unsigned long pvr)
    744{
    745	if (PVR_VER(pvr) == PVR_POWER9) {
    746		/*
    747		 * Set the tlbie feature flag for anything below
    748		 * Nimbus DD 2.3 and Cumulus DD 1.3
    749		 */
    750		if ((pvr & 0xe000) == 0) {
    751			/* Nimbus */
    752			if ((pvr & 0xfff) < 0x203)
    753				cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
    754		} else if ((pvr & 0xc000) == 0) {
    755			/* Cumulus */
    756			if ((pvr & 0xfff) < 0x103)
    757				cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
    758		} else {
    759			WARN_ONCE(1, "Unknown PVR");
    760			cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
    761		}
    762
    763		cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_ERAT_BUG;
    764	}
    765}
    766
    767static __init void cpufeatures_cpu_quirks(void)
    768{
    769	unsigned long version = mfspr(SPRN_PVR);
    770
    771	/*
    772	 * Not all quirks can be derived from the cpufeatures device tree.
    773	 */
    774	if ((version & 0xffffefff) == 0x004e0200) {
    775		/* DD2.0 has no feature flag */
    776		cur_cpu_spec->cpu_features |= CPU_FTR_P9_RADIX_PREFETCH_BUG;
    777		cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
    778	} else if ((version & 0xffffefff) == 0x004e0201) {
    779		cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
    780		cur_cpu_spec->cpu_features |= CPU_FTR_P9_RADIX_PREFETCH_BUG;
    781		cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
    782	} else if ((version & 0xffffefff) == 0x004e0202) {
    783		cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST;
    784		cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG;
    785		cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
    786		cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
    787	} else if ((version & 0xffffefff) == 0x004e0203) {
    788		cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST;
    789		cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG;
    790		cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
    791	} else if ((version & 0xffff0000) == 0x004e0000) {
    792		/* DD2.1 and up have DD2_1 */
    793		cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
    794	}
    795
    796	if ((version & 0xffff0000) == 0x004e0000) {
    797		cur_cpu_spec->cpu_features |= CPU_FTR_P9_TIDR;
    798	}
    799
    800	update_tlbie_feature_flag(version);
    801}
    802
    803static void __init cpufeatures_setup_finished(void)
    804{
    805	cpufeatures_cpu_quirks();
    806
    807	if (hv_mode && !(cur_cpu_spec->cpu_features & CPU_FTR_HVMODE)) {
    808		pr_err("hypervisor not present in device tree but HV mode is enabled in the CPU. Enabling.\n");
    809		cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
    810	}
    811
    812	/* Make sure powerpc_base_platform is non-NULL */
    813	powerpc_base_platform = cur_cpu_spec->platform;
    814
    815	system_registers.lpcr = mfspr(SPRN_LPCR);
    816	system_registers.hfscr = mfspr(SPRN_HFSCR);
    817	system_registers.fscr = mfspr(SPRN_FSCR);
    818	system_registers.pcr = mfspr(SPRN_PCR);
    819
    820	pr_info("final cpu/mmu features = 0x%016lx 0x%08x\n",
    821		cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
    822}
    823
    824static int __init disabled_on_cmdline(void)
    825{
    826	unsigned long root, chosen;
    827	const char *p;
    828
    829	root = of_get_flat_dt_root();
    830	chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
    831	if (chosen == -FDT_ERR_NOTFOUND)
    832		return false;
    833
    834	p = of_get_flat_dt_prop(chosen, "bootargs", NULL);
    835	if (!p)
    836		return false;
    837
    838	if (strstr(p, "dt_cpu_ftrs=off"))
    839		return true;
    840
    841	return false;
    842}
    843
    844static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
    845					int depth, void *data)
    846{
    847	if (of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features")
    848	    && of_get_flat_dt_prop(node, "isa", NULL))
    849		return 1;
    850
    851	return 0;
    852}
    853
    854bool __init dt_cpu_ftrs_in_use(void)
    855{
    856	return using_dt_cpu_ftrs;
    857}
    858
    859bool __init dt_cpu_ftrs_init(void *fdt)
    860{
    861	using_dt_cpu_ftrs = false;
    862
    863	/* Setup and verify the FDT, if it fails we just bail */
    864	if (!early_init_dt_verify(fdt))
    865		return false;
    866
    867	if (!of_scan_flat_dt(fdt_find_cpu_features, NULL))
    868		return false;
    869
    870	if (disabled_on_cmdline())
    871		return false;
    872
    873	cpufeatures_setup_cpu();
    874
    875	using_dt_cpu_ftrs = true;
    876	return true;
    877}
    878
    879static int nr_dt_cpu_features;
    880static struct dt_cpu_feature *dt_cpu_features;
    881
    882static int __init process_cpufeatures_node(unsigned long node,
    883					  const char *uname, int i)
    884{
    885	const __be32 *prop;
    886	struct dt_cpu_feature *f;
    887	int len;
    888
    889	f = &dt_cpu_features[i];
    890
    891	f->node = node;
    892
    893	f->name = uname;
    894
    895	prop = of_get_flat_dt_prop(node, "isa", &len);
    896	if (!prop) {
    897		pr_warn("%s: missing isa property\n", uname);
    898		return 0;
    899	}
    900	f->isa = be32_to_cpup(prop);
    901
    902	prop = of_get_flat_dt_prop(node, "usable-privilege", &len);
    903	if (!prop) {
    904		pr_warn("%s: missing usable-privilege property", uname);
    905		return 0;
    906	}
    907	f->usable_privilege = be32_to_cpup(prop);
    908
    909	prop = of_get_flat_dt_prop(node, "hv-support", &len);
    910	if (prop)
    911		f->hv_support = be32_to_cpup(prop);
    912	else
    913		f->hv_support = HV_SUPPORT_NONE;
    914
    915	prop = of_get_flat_dt_prop(node, "os-support", &len);
    916	if (prop)
    917		f->os_support = be32_to_cpup(prop);
    918	else
    919		f->os_support = OS_SUPPORT_NONE;
    920
    921	prop = of_get_flat_dt_prop(node, "hfscr-bit-nr", &len);
    922	if (prop)
    923		f->hfscr_bit_nr = be32_to_cpup(prop);
    924	else
    925		f->hfscr_bit_nr = -1;
    926	prop = of_get_flat_dt_prop(node, "fscr-bit-nr", &len);
    927	if (prop)
    928		f->fscr_bit_nr = be32_to_cpup(prop);
    929	else
    930		f->fscr_bit_nr = -1;
    931	prop = of_get_flat_dt_prop(node, "hwcap-bit-nr", &len);
    932	if (prop)
    933		f->hwcap_bit_nr = be32_to_cpup(prop);
    934	else
    935		f->hwcap_bit_nr = -1;
    936
    937	if (f->usable_privilege & USABLE_HV) {
    938		if (!(mfmsr() & MSR_HV)) {
    939			pr_warn("%s: HV feature passed to guest\n", uname);
    940			return 0;
    941		}
    942
    943		if (f->hv_support == HV_SUPPORT_NONE && f->hfscr_bit_nr != -1) {
    944			pr_warn("%s: unwanted hfscr_bit_nr\n", uname);
    945			return 0;
    946		}
    947
    948		if (f->hv_support == HV_SUPPORT_HFSCR) {
    949			if (f->hfscr_bit_nr == -1) {
    950				pr_warn("%s: missing hfscr_bit_nr\n", uname);
    951				return 0;
    952			}
    953		}
    954	} else {
    955		if (f->hv_support != HV_SUPPORT_NONE || f->hfscr_bit_nr != -1) {
    956			pr_warn("%s: unwanted hv_support/hfscr_bit_nr\n", uname);
    957			return 0;
    958		}
    959	}
    960
    961	if (f->usable_privilege & USABLE_OS) {
    962		if (f->os_support == OS_SUPPORT_NONE && f->fscr_bit_nr != -1) {
    963			pr_warn("%s: unwanted fscr_bit_nr\n", uname);
    964			return 0;
    965		}
    966
    967		if (f->os_support == OS_SUPPORT_FSCR) {
    968			if (f->fscr_bit_nr == -1) {
    969				pr_warn("%s: missing fscr_bit_nr\n", uname);
    970				return 0;
    971			}
    972		}
    973	} else {
    974		if (f->os_support != OS_SUPPORT_NONE || f->fscr_bit_nr != -1) {
    975			pr_warn("%s: unwanted os_support/fscr_bit_nr\n", uname);
    976			return 0;
    977		}
    978	}
    979
    980	if (!(f->usable_privilege & USABLE_PR)) {
    981		if (f->hwcap_bit_nr != -1) {
    982			pr_warn("%s: unwanted hwcap_bit_nr\n", uname);
    983			return 0;
    984		}
    985	}
    986
    987	/* Do all the independent features in the first pass */
    988	if (!of_get_flat_dt_prop(node, "dependencies", &len)) {
    989		if (cpufeatures_process_feature(f))
    990			f->enabled = 1;
    991		else
    992			f->disabled = 1;
    993	}
    994
    995	return 0;
    996}
    997
    998static void __init cpufeatures_deps_enable(struct dt_cpu_feature *f)
    999{
   1000	const __be32 *prop;
   1001	int len;
   1002	int nr_deps;
   1003	int i;
   1004
   1005	if (f->enabled || f->disabled)
   1006		return;
   1007
   1008	prop = of_get_flat_dt_prop(f->node, "dependencies", &len);
   1009	if (!prop) {
   1010		pr_warn("%s: missing dependencies property", f->name);
   1011		return;
   1012	}
   1013
   1014	nr_deps = len / sizeof(int);
   1015
   1016	for (i = 0; i < nr_deps; i++) {
   1017		unsigned long phandle = be32_to_cpu(prop[i]);
   1018		int j;
   1019
   1020		for (j = 0; j < nr_dt_cpu_features; j++) {
   1021			struct dt_cpu_feature *d = &dt_cpu_features[j];
   1022
   1023			if (of_get_flat_dt_phandle(d->node) == phandle) {
   1024				cpufeatures_deps_enable(d);
   1025				if (d->disabled) {
   1026					f->disabled = 1;
   1027					return;
   1028				}
   1029			}
   1030		}
   1031	}
   1032
   1033	if (cpufeatures_process_feature(f))
   1034		f->enabled = 1;
   1035	else
   1036		f->disabled = 1;
   1037}
   1038
   1039static int __init scan_cpufeatures_subnodes(unsigned long node,
   1040					  const char *uname,
   1041					  void *data)
   1042{
   1043	int *count = data;
   1044
   1045	process_cpufeatures_node(node, uname, *count);
   1046
   1047	(*count)++;
   1048
   1049	return 0;
   1050}
   1051
   1052static int __init count_cpufeatures_subnodes(unsigned long node,
   1053					  const char *uname,
   1054					  void *data)
   1055{
   1056	int *count = data;
   1057
   1058	(*count)++;
   1059
   1060	return 0;
   1061}
   1062
   1063static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
   1064					    *uname, int depth, void *data)
   1065{
   1066	const __be32 *prop;
   1067	int count, i;
   1068	u32 isa;
   1069
   1070	/* We are scanning "ibm,powerpc-cpu-features" nodes only */
   1071	if (!of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features"))
   1072		return 0;
   1073
   1074	prop = of_get_flat_dt_prop(node, "isa", NULL);
   1075	if (!prop)
   1076		/* We checked before, "can't happen" */
   1077		return 0;
   1078
   1079	isa = be32_to_cpup(prop);
   1080
   1081	/* Count and allocate space for cpu features */
   1082	of_scan_flat_dt_subnodes(node, count_cpufeatures_subnodes,
   1083						&nr_dt_cpu_features);
   1084	dt_cpu_features = memblock_alloc(sizeof(struct dt_cpu_feature) * nr_dt_cpu_features, PAGE_SIZE);
   1085	if (!dt_cpu_features)
   1086		panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
   1087		      __func__,
   1088		      sizeof(struct dt_cpu_feature) * nr_dt_cpu_features,
   1089		      PAGE_SIZE);
   1090
   1091	cpufeatures_setup_start(isa);
   1092
   1093	/* Scan nodes into dt_cpu_features and enable those without deps  */
   1094	count = 0;
   1095	of_scan_flat_dt_subnodes(node, scan_cpufeatures_subnodes, &count);
   1096
   1097	/* Recursive enable remaining features with dependencies */
   1098	for (i = 0; i < nr_dt_cpu_features; i++) {
   1099		struct dt_cpu_feature *f = &dt_cpu_features[i];
   1100
   1101		cpufeatures_deps_enable(f);
   1102	}
   1103
   1104	prop = of_get_flat_dt_prop(node, "display-name", NULL);
   1105	if (prop && strlen((char *)prop) != 0) {
   1106		strlcpy(dt_cpu_name, (char *)prop, sizeof(dt_cpu_name));
   1107		cur_cpu_spec->cpu_name = dt_cpu_name;
   1108	}
   1109
   1110	cpufeatures_setup_finished();
   1111
   1112	memblock_free(dt_cpu_features,
   1113		      sizeof(struct dt_cpu_feature) * nr_dt_cpu_features);
   1114
   1115	return 0;
   1116}
   1117
   1118void __init dt_cpu_ftrs_scan(void)
   1119{
   1120	if (!using_dt_cpu_ftrs)
   1121		return;
   1122
   1123	of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL);
   1124}