cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

paca.c (9248B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * c 2001 PPC 64 Team, IBM Corp
      4 */
      5
      6#include <linux/smp.h>
      7#include <linux/export.h>
      8#include <linux/memblock.h>
      9#include <linux/sched/task.h>
     10#include <linux/numa.h>
     11#include <linux/pgtable.h>
     12
     13#include <asm/lppaca.h>
     14#include <asm/paca.h>
     15#include <asm/sections.h>
     16#include <asm/kexec.h>
     17#include <asm/svm.h>
     18#include <asm/ultravisor.h>
     19#include <asm/rtas.h>
     20
     21#include "setup.h"
     22
     23#ifndef CONFIG_SMP
     24#define boot_cpuid 0
     25#endif
     26
     27static void *__init alloc_paca_data(unsigned long size, unsigned long align,
     28				unsigned long limit, int cpu)
     29{
     30	void *ptr;
     31	int nid;
     32
     33	/*
     34	 * boot_cpuid paca is allocated very early before cpu_to_node is up.
     35	 * Set bottom-up mode, because the boot CPU should be on node-0,
     36	 * which will put its paca in the right place.
     37	 */
     38	if (cpu == boot_cpuid) {
     39		nid = NUMA_NO_NODE;
     40		memblock_set_bottom_up(true);
     41	} else {
     42		nid = early_cpu_to_node(cpu);
     43	}
     44
     45	ptr = memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
     46				     limit, nid);
     47	if (!ptr)
     48		panic("cannot allocate paca data");
     49
     50	if (cpu == boot_cpuid)
     51		memblock_set_bottom_up(false);
     52
     53	return ptr;
     54}
     55
     56#ifdef CONFIG_PPC_PSERIES
     57
     58#define LPPACA_SIZE 0x400
     59
     60static void *__init alloc_shared_lppaca(unsigned long size, unsigned long limit,
     61					int cpu)
     62{
     63	size_t shared_lppaca_total_size = PAGE_ALIGN(nr_cpu_ids * LPPACA_SIZE);
     64	static unsigned long shared_lppaca_size;
     65	static void *shared_lppaca;
     66	void *ptr;
     67
     68	if (!shared_lppaca) {
     69		memblock_set_bottom_up(true);
     70
     71		/*
     72		 * See Documentation/powerpc/ultravisor.rst for more details.
     73		 *
     74		 * UV/HV data sharing is in PAGE_SIZE granularity. In order to
     75		 * minimize the number of pages shared, align the allocation to
     76		 * PAGE_SIZE.
     77		 */
     78		shared_lppaca =
     79			memblock_alloc_try_nid(shared_lppaca_total_size,
     80					       PAGE_SIZE, MEMBLOCK_LOW_LIMIT,
     81					       limit, NUMA_NO_NODE);
     82		if (!shared_lppaca)
     83			panic("cannot allocate shared data");
     84
     85		memblock_set_bottom_up(false);
     86		uv_share_page(PHYS_PFN(__pa(shared_lppaca)),
     87			      shared_lppaca_total_size >> PAGE_SHIFT);
     88	}
     89
     90	ptr = shared_lppaca + shared_lppaca_size;
     91	shared_lppaca_size += size;
     92
     93	/*
     94	 * This is very early in boot, so no harm done if the kernel crashes at
     95	 * this point.
     96	 */
     97	BUG_ON(shared_lppaca_size > shared_lppaca_total_size);
     98
     99	return ptr;
    100}
    101
    102/*
    103 * See asm/lppaca.h for more detail.
    104 *
    105 * lppaca structures must must be 1kB in size, L1 cache line aligned,
    106 * and not cross 4kB boundary. A 1kB size and 1kB alignment will satisfy
    107 * these requirements.
    108 */
    109static inline void init_lppaca(struct lppaca *lppaca)
    110{
    111	BUILD_BUG_ON(sizeof(struct lppaca) != 640);
    112
    113	*lppaca = (struct lppaca) {
    114		.desc = cpu_to_be32(0xd397d781),	/* "LpPa" */
    115		.size = cpu_to_be16(LPPACA_SIZE),
    116		.fpregs_in_use = 1,
    117		.slb_count = cpu_to_be16(64),
    118		.vmxregs_in_use = 0,
    119		.page_ins = 0, };
    120};
    121
    122static struct lppaca * __init new_lppaca(int cpu, unsigned long limit)
    123{
    124	struct lppaca *lp;
    125
    126	BUILD_BUG_ON(sizeof(struct lppaca) > LPPACA_SIZE);
    127
    128	if (early_cpu_has_feature(CPU_FTR_HVMODE))
    129		return NULL;
    130
    131	if (is_secure_guest())
    132		lp = alloc_shared_lppaca(LPPACA_SIZE, limit, cpu);
    133	else
    134		lp = alloc_paca_data(LPPACA_SIZE, 0x400, limit, cpu);
    135
    136	init_lppaca(lp);
    137
    138	return lp;
    139}
    140#endif /* CONFIG_PPC_PSERIES */
    141
    142#ifdef CONFIG_PPC_64S_HASH_MMU
    143/*
    144 * 3 persistent SLBs are allocated here.  The buffer will be zero
    145 * initially, hence will all be invaild until we actually write them.
    146 *
    147 * If you make the number of persistent SLB entries dynamic, please also
    148 * update PR KVM to flush and restore them accordingly.
    149 */
    150static struct slb_shadow * __init new_slb_shadow(int cpu, unsigned long limit)
    151{
    152	struct slb_shadow *s;
    153
    154	if (cpu != boot_cpuid) {
    155		/*
    156		 * Boot CPU comes here before early_radix_enabled
    157		 * is parsed (e.g., for disable_radix). So allocate
    158		 * always and this will be fixed up in free_unused_pacas.
    159		 */
    160		if (early_radix_enabled())
    161			return NULL;
    162	}
    163
    164	s = alloc_paca_data(sizeof(*s), L1_CACHE_BYTES, limit, cpu);
    165
    166	s->persistent = cpu_to_be32(SLB_NUM_BOLTED);
    167	s->buffer_length = cpu_to_be32(sizeof(*s));
    168
    169	return s;
    170}
    171#endif /* CONFIG_PPC_64S_HASH_MMU */
    172
    173#ifdef CONFIG_PPC_PSERIES
    174/**
    175 * new_rtas_args() - Allocates rtas args
    176 * @cpu:	CPU number
    177 * @limit:	Memory limit for this allocation
    178 *
    179 * Allocates a struct rtas_args and return it's pointer,
    180 * if not in Hypervisor mode
    181 *
    182 * Return:	Pointer to allocated rtas_args
    183 *		NULL if CPU in Hypervisor Mode
    184 */
    185static struct rtas_args * __init new_rtas_args(int cpu, unsigned long limit)
    186{
    187	limit = min_t(unsigned long, limit, RTAS_INSTANTIATE_MAX);
    188
    189	if (early_cpu_has_feature(CPU_FTR_HVMODE))
    190		return NULL;
    191
    192	return alloc_paca_data(sizeof(struct rtas_args), L1_CACHE_BYTES,
    193			       limit, cpu);
    194}
    195#endif /* CONFIG_PPC_PSERIES */
    196
    197/* The Paca is an array with one entry per processor.  Each contains an
    198 * lppaca, which contains the information shared between the
    199 * hypervisor and Linux.
    200 * On systems with hardware multi-threading, there are two threads
    201 * per processor.  The Paca array must contain an entry for each thread.
    202 * The VPD Areas will give a max logical processors = 2 * max physical
    203 * processors.  The processor VPD array needs one entry per physical
    204 * processor (not thread).
    205 */
    206struct paca_struct **paca_ptrs __read_mostly;
    207EXPORT_SYMBOL(paca_ptrs);
    208
    209void __init initialise_paca(struct paca_struct *new_paca, int cpu)
    210{
    211#ifdef CONFIG_PPC_PSERIES
    212	new_paca->lppaca_ptr = NULL;
    213#endif
    214#ifdef CONFIG_PPC_BOOK3E
    215	new_paca->kernel_pgd = swapper_pg_dir;
    216#endif
    217	new_paca->lock_token = 0x8000;
    218	new_paca->paca_index = cpu;
    219	new_paca->kernel_toc = kernel_toc_addr();
    220	new_paca->kernelbase = (unsigned long) _stext;
    221	/* Only set MSR:IR/DR when MMU is initialized */
    222	new_paca->kernel_msr = MSR_KERNEL & ~(MSR_IR | MSR_DR);
    223	new_paca->hw_cpu_id = 0xffff;
    224	new_paca->kexec_state = KEXEC_STATE_NONE;
    225	new_paca->__current = &init_task;
    226	new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL;
    227#ifdef CONFIG_PPC_64S_HASH_MMU
    228	new_paca->slb_shadow_ptr = NULL;
    229#endif
    230
    231#ifdef CONFIG_PPC_BOOK3E
    232	/* For now -- if we have threads this will be adjusted later */
    233	new_paca->tcd_ptr = &new_paca->tcd;
    234#endif
    235
    236#ifdef CONFIG_PPC_PSERIES
    237	new_paca->rtas_args_reentrant = NULL;
    238#endif
    239}
    240
    241/* Put the paca pointer into r13 and SPRG_PACA */
    242void setup_paca(struct paca_struct *new_paca)
    243{
    244	/* Setup r13 */
    245	local_paca = new_paca;
    246
    247#ifdef CONFIG_PPC_BOOK3E
    248	/* On Book3E, initialize the TLB miss exception frames */
    249	mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb);
    250#else
    251	/*
    252	 * In HV mode, we setup both HPACA and PACA to avoid problems
    253	 * if we do a GET_PACA() before the feature fixups have been
    254	 * applied.
    255	 *
    256	 * Normally you should test against CPU_FTR_HVMODE, but CPU features
    257	 * are not yet set up when we first reach here.
    258	 */
    259	if (mfmsr() & MSR_HV)
    260		mtspr(SPRN_SPRG_HPACA, local_paca);
    261#endif
    262	mtspr(SPRN_SPRG_PACA, local_paca);
    263
    264}
    265
    266static int __initdata paca_nr_cpu_ids;
    267static int __initdata paca_ptrs_size;
    268static int __initdata paca_struct_size;
    269
    270void __init allocate_paca_ptrs(void)
    271{
    272	paca_nr_cpu_ids = nr_cpu_ids;
    273
    274	paca_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids;
    275	paca_ptrs = memblock_alloc_raw(paca_ptrs_size, SMP_CACHE_BYTES);
    276	if (!paca_ptrs)
    277		panic("Failed to allocate %d bytes for paca pointers\n",
    278		      paca_ptrs_size);
    279
    280	memset(paca_ptrs, 0x88, paca_ptrs_size);
    281}
    282
    283void __init allocate_paca(int cpu)
    284{
    285	u64 limit;
    286	struct paca_struct *paca;
    287
    288	BUG_ON(cpu >= paca_nr_cpu_ids);
    289
    290#ifdef CONFIG_PPC_BOOK3S_64
    291	/*
    292	 * We access pacas in real mode, and cannot take SLB faults
    293	 * on them when in virtual mode, so allocate them accordingly.
    294	 */
    295	limit = min(ppc64_bolted_size(), ppc64_rma_size);
    296#else
    297	limit = ppc64_rma_size;
    298#endif
    299
    300	paca = alloc_paca_data(sizeof(struct paca_struct), L1_CACHE_BYTES,
    301				limit, cpu);
    302	paca_ptrs[cpu] = paca;
    303
    304	initialise_paca(paca, cpu);
    305#ifdef CONFIG_PPC_PSERIES
    306	paca->lppaca_ptr = new_lppaca(cpu, limit);
    307#endif
    308#ifdef CONFIG_PPC_64S_HASH_MMU
    309	paca->slb_shadow_ptr = new_slb_shadow(cpu, limit);
    310#endif
    311#ifdef CONFIG_PPC_PSERIES
    312	paca->rtas_args_reentrant = new_rtas_args(cpu, limit);
    313#endif
    314	paca_struct_size += sizeof(struct paca_struct);
    315}
    316
    317void __init free_unused_pacas(void)
    318{
    319	int new_ptrs_size;
    320
    321	new_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids;
    322	if (new_ptrs_size < paca_ptrs_size)
    323		memblock_phys_free(__pa(paca_ptrs) + new_ptrs_size,
    324				   paca_ptrs_size - new_ptrs_size);
    325
    326	paca_nr_cpu_ids = nr_cpu_ids;
    327	paca_ptrs_size = new_ptrs_size;
    328
    329#ifdef CONFIG_PPC_64S_HASH_MMU
    330	if (early_radix_enabled()) {
    331		/* Ugly fixup, see new_slb_shadow() */
    332		memblock_phys_free(__pa(paca_ptrs[boot_cpuid]->slb_shadow_ptr),
    333				   sizeof(struct slb_shadow));
    334		paca_ptrs[boot_cpuid]->slb_shadow_ptr = NULL;
    335	}
    336#endif
    337
    338	printk(KERN_DEBUG "Allocated %u bytes for %u pacas\n",
    339			paca_ptrs_size + paca_struct_size, nr_cpu_ids);
    340}
    341
    342#ifdef CONFIG_PPC_64S_HASH_MMU
    343void copy_mm_to_paca(struct mm_struct *mm)
    344{
    345	mm_context_t *context = &mm->context;
    346
    347	VM_BUG_ON(!mm_ctx_slb_addr_limit(context));
    348	memcpy(&get_paca()->mm_ctx_low_slices_psize, mm_ctx_low_slices(context),
    349	       LOW_SLICE_ARRAY_SZ);
    350	memcpy(&get_paca()->mm_ctx_high_slices_psize, mm_ctx_high_slices(context),
    351	       TASK_SLICE_ARRAY_SZ(context));
    352}
    353#endif /* CONFIG_PPC_64S_HASH_MMU */