cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

paravirt.c (9975B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*  Paravirtualization interfaces
      3    Copyright (C) 2006 Rusty Russell IBM Corporation
      4
      5
      6    2007 - x86_64 support added by Glauber de Oliveira Costa, Red Hat Inc
      7*/
      8
      9#include <linux/errno.h>
     10#include <linux/init.h>
     11#include <linux/export.h>
     12#include <linux/efi.h>
     13#include <linux/bcd.h>
     14#include <linux/highmem.h>
     15#include <linux/kprobes.h>
     16#include <linux/pgtable.h>
     17#include <linux/static_call.h>
     18
     19#include <asm/bug.h>
     20#include <asm/paravirt.h>
     21#include <asm/debugreg.h>
     22#include <asm/desc.h>
     23#include <asm/setup.h>
     24#include <asm/time.h>
     25#include <asm/pgalloc.h>
     26#include <asm/irq.h>
     27#include <asm/delay.h>
     28#include <asm/fixmap.h>
     29#include <asm/apic.h>
     30#include <asm/tlbflush.h>
     31#include <asm/timer.h>
     32#include <asm/special_insns.h>
     33#include <asm/tlb.h>
     34#include <asm/io_bitmap.h>
     35
     36/*
     37 * nop stub, which must not clobber anything *including the stack* to
     38 * avoid confusing the entry prologues.
     39 */
     40extern void _paravirt_nop(void);
     41asm (".pushsection .entry.text, \"ax\"\n"
     42     ".global _paravirt_nop\n"
     43     "_paravirt_nop:\n\t"
     44     ASM_ENDBR
     45     ASM_RET
     46     ".size _paravirt_nop, . - _paravirt_nop\n\t"
     47     ".type _paravirt_nop, @function\n\t"
     48     ".popsection");
     49
     50/* stub always returning 0. */
     51asm (".pushsection .entry.text, \"ax\"\n"
     52     ".global paravirt_ret0\n"
     53     "paravirt_ret0:\n\t"
     54     ASM_ENDBR
     55     "xor %" _ASM_AX ", %" _ASM_AX ";\n\t"
     56     ASM_RET
     57     ".size paravirt_ret0, . - paravirt_ret0\n\t"
     58     ".type paravirt_ret0, @function\n\t"
     59     ".popsection");
     60
     61
     62void __init default_banner(void)
     63{
     64	printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
     65	       pv_info.name);
     66}
     67
     68/* Undefined instruction for dealing with missing ops pointers. */
     69noinstr void paravirt_BUG(void)
     70{
     71	BUG();
     72}
     73
     74static unsigned paravirt_patch_call(void *insn_buff, const void *target,
     75				    unsigned long addr, unsigned len)
     76{
     77	__text_gen_insn(insn_buff, CALL_INSN_OPCODE,
     78			(void *)addr, target, CALL_INSN_SIZE);
     79	return CALL_INSN_SIZE;
     80}
     81
     82#ifdef CONFIG_PARAVIRT_XXL
     83/* identity function, which can be inlined */
     84u64 notrace _paravirt_ident_64(u64 x)
     85{
     86	return x;
     87}
     88#endif
     89
     90DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
     91
     92void __init native_pv_lock_init(void)
     93{
     94	if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
     95		static_branch_disable(&virt_spin_lock_key);
     96}
     97
     98unsigned int paravirt_patch(u8 type, void *insn_buff, unsigned long addr,
     99			    unsigned int len)
    100{
    101	/*
    102	 * Neat trick to map patch type back to the call within the
    103	 * corresponding structure.
    104	 */
    105	void *opfunc = *((void **)&pv_ops + type);
    106	unsigned ret;
    107
    108	if (opfunc == NULL)
    109		/* If there's no function, patch it with paravirt_BUG() */
    110		ret = paravirt_patch_call(insn_buff, paravirt_BUG, addr, len);
    111	else if (opfunc == _paravirt_nop)
    112		ret = 0;
    113	else
    114		/* Otherwise call the function. */
    115		ret = paravirt_patch_call(insn_buff, opfunc, addr, len);
    116
    117	return ret;
    118}
    119
    120struct static_key paravirt_steal_enabled;
    121struct static_key paravirt_steal_rq_enabled;
    122
    123static u64 native_steal_clock(int cpu)
    124{
    125	return 0;
    126}
    127
    128DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
    129DEFINE_STATIC_CALL(pv_sched_clock, native_sched_clock);
    130
    131void paravirt_set_sched_clock(u64 (*func)(void))
    132{
    133	static_call_update(pv_sched_clock, func);
    134}
    135
    136/* These are in entry.S */
    137static struct resource reserve_ioports = {
    138	.start = 0,
    139	.end = IO_SPACE_LIMIT,
    140	.name = "paravirt-ioport",
    141	.flags = IORESOURCE_IO | IORESOURCE_BUSY,
    142};
    143
    144/*
    145 * Reserve the whole legacy IO space to prevent any legacy drivers
    146 * from wasting time probing for their hardware.  This is a fairly
    147 * brute-force approach to disabling all non-virtual drivers.
    148 *
    149 * Note that this must be called very early to have any effect.
    150 */
    151int paravirt_disable_iospace(void)
    152{
    153	return request_resource(&ioport_resource, &reserve_ioports);
    154}
    155
    156static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
    157
    158static inline void enter_lazy(enum paravirt_lazy_mode mode)
    159{
    160	BUG_ON(this_cpu_read(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
    161
    162	this_cpu_write(paravirt_lazy_mode, mode);
    163}
    164
    165static void leave_lazy(enum paravirt_lazy_mode mode)
    166{
    167	BUG_ON(this_cpu_read(paravirt_lazy_mode) != mode);
    168
    169	this_cpu_write(paravirt_lazy_mode, PARAVIRT_LAZY_NONE);
    170}
    171
    172void paravirt_enter_lazy_mmu(void)
    173{
    174	enter_lazy(PARAVIRT_LAZY_MMU);
    175}
    176
    177void paravirt_leave_lazy_mmu(void)
    178{
    179	leave_lazy(PARAVIRT_LAZY_MMU);
    180}
    181
    182void paravirt_flush_lazy_mmu(void)
    183{
    184	preempt_disable();
    185
    186	if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
    187		arch_leave_lazy_mmu_mode();
    188		arch_enter_lazy_mmu_mode();
    189	}
    190
    191	preempt_enable();
    192}
    193
    194#ifdef CONFIG_PARAVIRT_XXL
    195void paravirt_start_context_switch(struct task_struct *prev)
    196{
    197	BUG_ON(preemptible());
    198
    199	if (this_cpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) {
    200		arch_leave_lazy_mmu_mode();
    201		set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES);
    202	}
    203	enter_lazy(PARAVIRT_LAZY_CPU);
    204}
    205
    206void paravirt_end_context_switch(struct task_struct *next)
    207{
    208	BUG_ON(preemptible());
    209
    210	leave_lazy(PARAVIRT_LAZY_CPU);
    211
    212	if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES))
    213		arch_enter_lazy_mmu_mode();
    214}
    215
    216static noinstr unsigned long pv_native_read_cr2(void)
    217{
    218	return native_read_cr2();
    219}
    220
    221static noinstr void pv_native_write_cr2(unsigned long val)
    222{
    223	native_write_cr2(val);
    224}
    225
    226static noinstr unsigned long pv_native_get_debugreg(int regno)
    227{
    228	return native_get_debugreg(regno);
    229}
    230
    231static noinstr void pv_native_set_debugreg(int regno, unsigned long val)
    232{
    233	native_set_debugreg(regno, val);
    234}
    235
    236static noinstr void pv_native_irq_enable(void)
    237{
    238	native_irq_enable();
    239}
    240
    241static noinstr void pv_native_irq_disable(void)
    242{
    243	native_irq_disable();
    244}
    245#endif
    246
    247enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
    248{
    249	if (in_interrupt())
    250		return PARAVIRT_LAZY_NONE;
    251
    252	return this_cpu_read(paravirt_lazy_mode);
    253}
    254
    255struct pv_info pv_info = {
    256	.name = "bare hardware",
    257#ifdef CONFIG_PARAVIRT_XXL
    258	.extra_user_64bit_cs = __USER_CS,
    259#endif
    260};
    261
    262/* 64-bit pagetable entries */
    263#define PTE_IDENT	__PV_IS_CALLEE_SAVE(_paravirt_ident_64)
    264
    265struct paravirt_patch_template pv_ops = {
    266	/* Cpu ops. */
    267	.cpu.io_delay		= native_io_delay,
    268
    269#ifdef CONFIG_PARAVIRT_XXL
    270	.cpu.cpuid		= native_cpuid,
    271	.cpu.get_debugreg	= pv_native_get_debugreg,
    272	.cpu.set_debugreg	= pv_native_set_debugreg,
    273	.cpu.read_cr0		= native_read_cr0,
    274	.cpu.write_cr0		= native_write_cr0,
    275	.cpu.write_cr4		= native_write_cr4,
    276	.cpu.wbinvd		= native_wbinvd,
    277	.cpu.read_msr		= native_read_msr,
    278	.cpu.write_msr		= native_write_msr,
    279	.cpu.read_msr_safe	= native_read_msr_safe,
    280	.cpu.write_msr_safe	= native_write_msr_safe,
    281	.cpu.read_pmc		= native_read_pmc,
    282	.cpu.load_tr_desc	= native_load_tr_desc,
    283	.cpu.set_ldt		= native_set_ldt,
    284	.cpu.load_gdt		= native_load_gdt,
    285	.cpu.load_idt		= native_load_idt,
    286	.cpu.store_tr		= native_store_tr,
    287	.cpu.load_tls		= native_load_tls,
    288	.cpu.load_gs_index	= native_load_gs_index,
    289	.cpu.write_ldt_entry	= native_write_ldt_entry,
    290	.cpu.write_gdt_entry	= native_write_gdt_entry,
    291	.cpu.write_idt_entry	= native_write_idt_entry,
    292
    293	.cpu.alloc_ldt		= paravirt_nop,
    294	.cpu.free_ldt		= paravirt_nop,
    295
    296	.cpu.load_sp0		= native_load_sp0,
    297
    298#ifdef CONFIG_X86_IOPL_IOPERM
    299	.cpu.invalidate_io_bitmap	= native_tss_invalidate_io_bitmap,
    300	.cpu.update_io_bitmap		= native_tss_update_io_bitmap,
    301#endif
    302
    303	.cpu.start_context_switch	= paravirt_nop,
    304	.cpu.end_context_switch		= paravirt_nop,
    305
    306	/* Irq ops. */
    307	.irq.save_fl		= __PV_IS_CALLEE_SAVE(native_save_fl),
    308	.irq.irq_disable	= __PV_IS_CALLEE_SAVE(pv_native_irq_disable),
    309	.irq.irq_enable		= __PV_IS_CALLEE_SAVE(pv_native_irq_enable),
    310	.irq.safe_halt		= native_safe_halt,
    311	.irq.halt		= native_halt,
    312#endif /* CONFIG_PARAVIRT_XXL */
    313
    314	/* Mmu ops. */
    315	.mmu.flush_tlb_user	= native_flush_tlb_local,
    316	.mmu.flush_tlb_kernel	= native_flush_tlb_global,
    317	.mmu.flush_tlb_one_user	= native_flush_tlb_one_user,
    318	.mmu.flush_tlb_multi	= native_flush_tlb_multi,
    319	.mmu.tlb_remove_table	=
    320			(void (*)(struct mmu_gather *, void *))tlb_remove_page,
    321
    322	.mmu.exit_mmap		= paravirt_nop,
    323	.mmu.notify_page_enc_status_changed	= paravirt_nop,
    324
    325#ifdef CONFIG_PARAVIRT_XXL
    326	.mmu.read_cr2		= __PV_IS_CALLEE_SAVE(pv_native_read_cr2),
    327	.mmu.write_cr2		= pv_native_write_cr2,
    328	.mmu.read_cr3		= __native_read_cr3,
    329	.mmu.write_cr3		= native_write_cr3,
    330
    331	.mmu.pgd_alloc		= __paravirt_pgd_alloc,
    332	.mmu.pgd_free		= paravirt_nop,
    333
    334	.mmu.alloc_pte		= paravirt_nop,
    335	.mmu.alloc_pmd		= paravirt_nop,
    336	.mmu.alloc_pud		= paravirt_nop,
    337	.mmu.alloc_p4d		= paravirt_nop,
    338	.mmu.release_pte	= paravirt_nop,
    339	.mmu.release_pmd	= paravirt_nop,
    340	.mmu.release_pud	= paravirt_nop,
    341	.mmu.release_p4d	= paravirt_nop,
    342
    343	.mmu.set_pte		= native_set_pte,
    344	.mmu.set_pmd		= native_set_pmd,
    345
    346	.mmu.ptep_modify_prot_start	= __ptep_modify_prot_start,
    347	.mmu.ptep_modify_prot_commit	= __ptep_modify_prot_commit,
    348
    349	.mmu.set_pud		= native_set_pud,
    350
    351	.mmu.pmd_val		= PTE_IDENT,
    352	.mmu.make_pmd		= PTE_IDENT,
    353
    354	.mmu.pud_val		= PTE_IDENT,
    355	.mmu.make_pud		= PTE_IDENT,
    356
    357	.mmu.set_p4d		= native_set_p4d,
    358
    359#if CONFIG_PGTABLE_LEVELS >= 5
    360	.mmu.p4d_val		= PTE_IDENT,
    361	.mmu.make_p4d		= PTE_IDENT,
    362
    363	.mmu.set_pgd		= native_set_pgd,
    364#endif /* CONFIG_PGTABLE_LEVELS >= 5 */
    365
    366	.mmu.pte_val		= PTE_IDENT,
    367	.mmu.pgd_val		= PTE_IDENT,
    368
    369	.mmu.make_pte		= PTE_IDENT,
    370	.mmu.make_pgd		= PTE_IDENT,
    371
    372	.mmu.dup_mmap		= paravirt_nop,
    373	.mmu.activate_mm	= paravirt_nop,
    374
    375	.mmu.lazy_mode = {
    376		.enter		= paravirt_nop,
    377		.leave		= paravirt_nop,
    378		.flush		= paravirt_nop,
    379	},
    380
    381	.mmu.set_fixmap		= native_set_fixmap,
    382#endif /* CONFIG_PARAVIRT_XXL */
    383
    384#if defined(CONFIG_PARAVIRT_SPINLOCKS)
    385	/* Lock ops. */
    386#ifdef CONFIG_SMP
    387	.lock.queued_spin_lock_slowpath	= native_queued_spin_lock_slowpath,
    388	.lock.queued_spin_unlock	=
    389				PV_CALLEE_SAVE(__native_queued_spin_unlock),
    390	.lock.wait			= paravirt_nop,
    391	.lock.kick			= paravirt_nop,
    392	.lock.vcpu_is_preempted		=
    393				PV_CALLEE_SAVE(__native_vcpu_is_preempted),
    394#endif /* SMP */
    395#endif
    396};
    397
    398#ifdef CONFIG_PARAVIRT_XXL
    399NOKPROBE_SYMBOL(native_load_idt);
    400#endif
    401
    402EXPORT_SYMBOL(pv_ops);
    403EXPORT_SYMBOL_GPL(pv_info);