cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

kup.h (10196B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _ASM_POWERPC_BOOK3S_64_KUP_H
      3#define _ASM_POWERPC_BOOK3S_64_KUP_H
      4
      5#include <linux/const.h>
      6#include <asm/reg.h>
      7
      8#define AMR_KUAP_BLOCK_READ	UL(0x5455555555555555)
      9#define AMR_KUAP_BLOCK_WRITE	UL(0xa8aaaaaaaaaaaaaa)
     10#define AMR_KUEP_BLOCKED	UL(0x5455555555555555)
     11#define AMR_KUAP_BLOCKED	(AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE)
     12
     13#ifdef __ASSEMBLY__
     14
     15.macro kuap_user_restore gpr1, gpr2
     16#if defined(CONFIG_PPC_PKEY)
     17	BEGIN_MMU_FTR_SECTION_NESTED(67)
     18	b	100f  // skip_restore_amr
     19	END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)
     20	/*
     21	 * AMR and IAMR are going to be different when
     22	 * returning to userspace.
     23	 */
     24	ld	\gpr1, STACK_REGS_AMR(r1)
     25
     26	/*
     27	 * If kuap feature is not enabled, do the mtspr
     28	 * only if AMR value is different.
     29	 */
     30	BEGIN_MMU_FTR_SECTION_NESTED(68)
     31	mfspr	\gpr2, SPRN_AMR
     32	cmpd	\gpr1, \gpr2
     33	beq	99f
     34	END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUAP, 68)
     35
     36	isync
     37	mtspr	SPRN_AMR, \gpr1
     3899:
     39	/*
     40	 * Restore IAMR only when returning to userspace
     41	 */
     42	ld	\gpr1, STACK_REGS_IAMR(r1)
     43
     44	/*
     45	 * If kuep feature is not enabled, do the mtspr
     46	 * only if IAMR value is different.
     47	 */
     48	BEGIN_MMU_FTR_SECTION_NESTED(69)
     49	mfspr	\gpr2, SPRN_IAMR
     50	cmpd	\gpr1, \gpr2
     51	beq	100f
     52	END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUEP, 69)
     53
     54	isync
     55	mtspr	SPRN_IAMR, \gpr1
     56
     57100: //skip_restore_amr
     58	/* No isync required, see kuap_user_restore() */
     59#endif
     60.endm
     61
     62.macro kuap_kernel_restore gpr1, gpr2
     63#if defined(CONFIG_PPC_PKEY)
     64
     65	BEGIN_MMU_FTR_SECTION_NESTED(67)
     66	/*
     67	 * AMR is going to be mostly the same since we are
     68	 * returning to the kernel. Compare and do a mtspr.
     69	 */
     70	ld	\gpr2, STACK_REGS_AMR(r1)
     71	mfspr	\gpr1, SPRN_AMR
     72	cmpd	\gpr1, \gpr2
     73	beq	100f
     74	isync
     75	mtspr	SPRN_AMR, \gpr2
     76	/*
     77	 * No isync required, see kuap_restore_amr()
     78	 * No need to restore IAMR when returning to kernel space.
     79	 */
     80100:
     81	END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
     82#endif
     83.endm
     84
     85#ifdef CONFIG_PPC_KUAP
     86.macro kuap_check_amr gpr1, gpr2
     87#ifdef CONFIG_PPC_KUAP_DEBUG
     88	BEGIN_MMU_FTR_SECTION_NESTED(67)
     89	mfspr	\gpr1, SPRN_AMR
     90	/* Prevent access to userspace using any key values */
     91	LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
     92999:	tdne	\gpr1, \gpr2
     93	EMIT_WARN_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
     94	END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
     95#endif
     96.endm
     97#endif
     98
     99/*
    100 *	if (pkey) {
    101 *
    102 *		save AMR -> stack;
    103 *		if (kuap) {
    104 *			if (AMR != BLOCKED)
    105 *				KUAP_BLOCKED -> AMR;
    106 *		}
    107 *		if (from_user) {
    108 *			save IAMR -> stack;
    109 *			if (kuep) {
    110 *				KUEP_BLOCKED ->IAMR
    111 *			}
    112 *		}
    113 *		return;
    114 *	}
    115 *
    116 *	if (kuap) {
    117 *		if (from_kernel) {
    118 *			save AMR -> stack;
    119 *			if (AMR != BLOCKED)
    120 *				KUAP_BLOCKED -> AMR;
    121 *		}
    122 *
    123 *	}
    124 */
    125.macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
    126#if defined(CONFIG_PPC_PKEY)
    127
    128	/*
    129	 * if both pkey and kuap is disabled, nothing to do
    130	 */
    131	BEGIN_MMU_FTR_SECTION_NESTED(68)
    132	b	100f  // skip_save_amr
    133	END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY | MMU_FTR_BOOK3S_KUAP, 68)
    134
    135	/*
    136	 * if pkey is disabled and we are entering from userspace
    137	 * don't do anything.
    138	 */
    139	BEGIN_MMU_FTR_SECTION_NESTED(67)
    140	.ifnb \msr_pr_cr
    141	/*
    142	 * Without pkey we are not changing AMR outside the kernel
    143	 * hence skip this completely.
    144	 */
    145	bne	\msr_pr_cr, 100f  // from userspace
    146	.endif
    147        END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)
    148
    149	/*
    150	 * pkey is enabled or pkey is disabled but entering from kernel
    151	 */
    152	mfspr	\gpr1, SPRN_AMR
    153	std	\gpr1, STACK_REGS_AMR(r1)
    154
    155	/*
    156	 * update kernel AMR with AMR_KUAP_BLOCKED only
    157	 * if KUAP feature is enabled
    158	 */
    159	BEGIN_MMU_FTR_SECTION_NESTED(69)
    160	LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
    161	cmpd	\use_cr, \gpr1, \gpr2
    162	beq	\use_cr, 102f
    163	/*
    164	 * We don't isync here because we very recently entered via an interrupt
    165	 */
    166	mtspr	SPRN_AMR, \gpr2
    167	isync
    168102:
    169	END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 69)
    170
    171	/*
    172	 * if entering from kernel we don't need save IAMR
    173	 */
    174	.ifnb \msr_pr_cr
    175	beq	\msr_pr_cr, 100f // from kernel space
    176	mfspr	\gpr1, SPRN_IAMR
    177	std	\gpr1, STACK_REGS_IAMR(r1)
    178
    179	/*
    180	 * update kernel IAMR with AMR_KUEP_BLOCKED only
    181	 * if KUEP feature is enabled
    182	 */
    183	BEGIN_MMU_FTR_SECTION_NESTED(70)
    184	LOAD_REG_IMMEDIATE(\gpr2, AMR_KUEP_BLOCKED)
    185	mtspr	SPRN_IAMR, \gpr2
    186	isync
    187	END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUEP, 70)
    188	.endif
    189
    190100: // skip_save_amr
    191#endif
    192.endm
    193
    194#else /* !__ASSEMBLY__ */
    195
    196#include <linux/jump_label.h>
    197
    198DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
    199
    200#ifdef CONFIG_PPC_PKEY
    201
    202extern u64 __ro_after_init default_uamor;
    203extern u64 __ro_after_init default_amr;
    204extern u64 __ro_after_init default_iamr;
    205
    206#include <asm/mmu.h>
    207#include <asm/ptrace.h>
    208
    209/* usage of kthread_use_mm() should inherit the
    210 * AMR value of the operating address space. But, the AMR value is
    211 * thread-specific and we inherit the address space and not thread
    212 * access restrictions. Because of this ignore AMR value when accessing
    213 * userspace via kernel thread.
    214 */
    215static inline u64 current_thread_amr(void)
    216{
    217	if (current->thread.regs)
    218		return current->thread.regs->amr;
    219	return default_amr;
    220}
    221
    222static inline u64 current_thread_iamr(void)
    223{
    224	if (current->thread.regs)
    225		return current->thread.regs->iamr;
    226	return default_iamr;
    227}
    228#endif /* CONFIG_PPC_PKEY */
    229
    230#ifdef CONFIG_PPC_KUAP
    231
    232static __always_inline bool kuap_is_disabled(void)
    233{
    234	return !mmu_has_feature(MMU_FTR_BOOK3S_KUAP);
    235}
    236
    237static inline void kuap_user_restore(struct pt_regs *regs)
    238{
    239	bool restore_amr = false, restore_iamr = false;
    240	unsigned long amr, iamr;
    241
    242	if (!mmu_has_feature(MMU_FTR_PKEY))
    243		return;
    244
    245	if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
    246		amr = mfspr(SPRN_AMR);
    247		if (amr != regs->amr)
    248			restore_amr = true;
    249	} else {
    250		restore_amr = true;
    251	}
    252
    253	if (!mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {
    254		iamr = mfspr(SPRN_IAMR);
    255		if (iamr != regs->iamr)
    256			restore_iamr = true;
    257	} else {
    258		restore_iamr = true;
    259	}
    260
    261
    262	if (restore_amr || restore_iamr) {
    263		isync();
    264		if (restore_amr)
    265			mtspr(SPRN_AMR, regs->amr);
    266		if (restore_iamr)
    267			mtspr(SPRN_IAMR, regs->iamr);
    268	}
    269	/*
    270	 * No isync required here because we are about to rfi
    271	 * back to previous context before any user accesses
    272	 * would be made, which is a CSI.
    273	 */
    274}
    275
    276static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr)
    277{
    278	if (likely(regs->amr == amr))
    279		return;
    280
    281	isync();
    282	mtspr(SPRN_AMR, regs->amr);
    283	/*
    284	 * No isync required here because we are about to rfi
    285	 * back to previous context before any user accesses
    286	 * would be made, which is a CSI.
    287	 *
    288	 * No need to restore IAMR when returning to kernel space.
    289	 */
    290}
    291
    292static inline unsigned long __kuap_get_and_assert_locked(void)
    293{
    294	unsigned long amr = mfspr(SPRN_AMR);
    295
    296	if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */
    297		WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED);
    298	return amr;
    299}
    300
    301/* Do nothing, book3s/64 does that in ASM */
    302static inline void __kuap_lock(void)
    303{
    304}
    305
    306static inline void __kuap_save_and_lock(struct pt_regs *regs)
    307{
    308}
    309
    310/*
    311 * We support individually allowing read or write, but we don't support nesting
    312 * because that would require an expensive read/modify write of the AMR.
    313 */
    314
    315static inline unsigned long get_kuap(void)
    316{
    317	/*
    318	 * We return AMR_KUAP_BLOCKED when we don't support KUAP because
    319	 * prevent_user_access_return needs to return AMR_KUAP_BLOCKED to
    320	 * cause restore_user_access to do a flush.
    321	 *
    322	 * This has no effect in terms of actually blocking things on hash,
    323	 * so it doesn't break anything.
    324	 */
    325	if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
    326		return AMR_KUAP_BLOCKED;
    327
    328	return mfspr(SPRN_AMR);
    329}
    330
    331static __always_inline void set_kuap(unsigned long value)
    332{
    333	if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
    334		return;
    335
    336	/*
    337	 * ISA v3.0B says we need a CSI (Context Synchronising Instruction) both
    338	 * before and after the move to AMR. See table 6 on page 1134.
    339	 */
    340	isync();
    341	mtspr(SPRN_AMR, value);
    342	isync();
    343}
    344
    345static inline bool __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
    346{
    347	/*
    348	 * For radix this will be a storage protection fault (DSISR_PROTFAULT).
    349	 * For hash this will be a key fault (DSISR_KEYFAULT)
    350	 */
    351	/*
    352	 * We do have exception table entry, but accessing the
    353	 * userspace results in fault.  This could be because we
    354	 * didn't unlock the AMR or access is denied by userspace
    355	 * using a key value that blocks access. We are only interested
    356	 * in catching the use case of accessing without unlocking
    357	 * the AMR. Hence check for BLOCK_WRITE/READ against AMR.
    358	 */
    359	if (is_write) {
    360		return (regs->amr & AMR_KUAP_BLOCK_WRITE) == AMR_KUAP_BLOCK_WRITE;
    361	}
    362	return (regs->amr & AMR_KUAP_BLOCK_READ) == AMR_KUAP_BLOCK_READ;
    363}
    364
    365static __always_inline void allow_user_access(void __user *to, const void __user *from,
    366					      unsigned long size, unsigned long dir)
    367{
    368	unsigned long thread_amr = 0;
    369
    370	// This is written so we can resolve to a single case at build time
    371	BUILD_BUG_ON(!__builtin_constant_p(dir));
    372
    373	if (mmu_has_feature(MMU_FTR_PKEY))
    374		thread_amr = current_thread_amr();
    375
    376	if (dir == KUAP_READ)
    377		set_kuap(thread_amr | AMR_KUAP_BLOCK_WRITE);
    378	else if (dir == KUAP_WRITE)
    379		set_kuap(thread_amr | AMR_KUAP_BLOCK_READ);
    380	else if (dir == KUAP_READ_WRITE)
    381		set_kuap(thread_amr);
    382	else
    383		BUILD_BUG();
    384}
    385
    386#else /* CONFIG_PPC_KUAP */
    387
    388static inline unsigned long get_kuap(void)
    389{
    390	return AMR_KUAP_BLOCKED;
    391}
    392
    393static inline void set_kuap(unsigned long value) { }
    394
    395static __always_inline void allow_user_access(void __user *to, const void __user *from,
    396					      unsigned long size, unsigned long dir)
    397{ }
    398
    399#endif /* !CONFIG_PPC_KUAP */
    400
    401static __always_inline void prevent_user_access(unsigned long dir)
    402{
    403	set_kuap(AMR_KUAP_BLOCKED);
    404	if (static_branch_unlikely(&uaccess_flush_key))
    405		do_uaccess_flush();
    406}
    407
    408static inline unsigned long prevent_user_access_return(void)
    409{
    410	unsigned long flags = get_kuap();
    411
    412	set_kuap(AMR_KUAP_BLOCKED);
    413	if (static_branch_unlikely(&uaccess_flush_key))
    414		do_uaccess_flush();
    415
    416	return flags;
    417}
    418
    419static inline void restore_user_access(unsigned long flags)
    420{
    421	set_kuap(flags);
    422	if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED)
    423		do_uaccess_flush();
    424}
    425#endif /* __ASSEMBLY__ */
    426
    427#endif /* _ASM_POWERPC_BOOK3S_64_KUP_H */