cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

tprot.c (6889B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Test TEST PROTECTION emulation.
      4 *
      5 * Copyright IBM Corp. 2021
      6 */
      7
      8#include <sys/mman.h>
      9#include "test_util.h"
     10#include "kvm_util.h"
     11
     12#define PAGE_SHIFT 12
     13#define PAGE_SIZE (1 << PAGE_SHIFT)
     14#define CR0_FETCH_PROTECTION_OVERRIDE	(1UL << (63 - 38))
     15#define CR0_STORAGE_PROTECTION_OVERRIDE	(1UL << (63 - 39))
     16
     17#define VCPU_ID 1
     18
     19static __aligned(PAGE_SIZE) uint8_t pages[2][PAGE_SIZE];
     20static uint8_t *const page_store_prot = pages[0];
     21static uint8_t *const page_fetch_prot = pages[1];
     22
     23/* Nonzero return value indicates that address not mapped */
     24static int set_storage_key(void *addr, uint8_t key)
     25{
     26	int not_mapped = 0;
     27
     28	asm volatile (
     29		       "lra	%[addr], 0(0,%[addr])\n"
     30		"	jz	0f\n"
     31		"	llill	%[not_mapped],1\n"
     32		"	j	1f\n"
     33		"0:	sske	%[key], %[addr]\n"
     34		"1:"
     35		: [addr] "+&a" (addr), [not_mapped] "+r" (not_mapped)
     36		: [key] "r" (key)
     37		: "cc"
     38	);
     39	return -not_mapped;
     40}
     41
     42enum permission {
     43	READ_WRITE = 0,
     44	READ = 1,
     45	RW_PROTECTED = 2,
     46	TRANSL_UNAVAIL = 3,
     47};
     48
     49static enum permission test_protection(void *addr, uint8_t key)
     50{
     51	uint64_t mask;
     52
     53	asm volatile (
     54		       "tprot	%[addr], 0(%[key])\n"
     55		"	ipm	%[mask]\n"
     56		: [mask] "=r" (mask)
     57		: [addr] "Q" (*(char *)addr),
     58		  [key] "a" (key)
     59		: "cc"
     60	);
     61
     62	return (enum permission)(mask >> 28);
     63}
     64
     65enum stage {
     66	STAGE_END,
     67	STAGE_INIT_SIMPLE,
     68	TEST_SIMPLE,
     69	STAGE_INIT_FETCH_PROT_OVERRIDE,
     70	TEST_FETCH_PROT_OVERRIDE,
     71	TEST_STORAGE_PROT_OVERRIDE,
     72};
     73
     74struct test {
     75	enum stage stage;
     76	void *addr;
     77	uint8_t key;
     78	enum permission expected;
     79} tests[] = {
     80	/*
     81	 * We perform each test in the array by executing TEST PROTECTION on
     82	 * the specified addr with the specified key and checking if the returned
     83	 * permissions match the expected value.
     84	 * Both guest and host cooperate to set up the required test conditions.
     85	 * A central condition is that the page targeted by addr has to be DAT
     86	 * protected in the host mappings, in order for KVM to emulate the
     87	 * TEST PROTECTION instruction.
     88	 * Since the page tables are shared, the host uses mprotect to achieve
     89	 * this.
     90	 *
     91	 * Test resulting in RW_PROTECTED/TRANSL_UNAVAIL will be interpreted
     92	 * by SIE, not KVM, but there is no harm in testing them also.
     93	 * See Enhanced Suppression-on-Protection Facilities in the
     94	 * Interpretive-Execution Mode
     95	 */
     96	/*
     97	 * guest: set storage key of page_store_prot to 1
     98	 *        storage key of page_fetch_prot to 9 and enable
     99	 *        protection for it
    100	 * STAGE_INIT_SIMPLE
    101	 * host: write protect both via mprotect
    102	 */
    103	/* access key 0 matches any storage key -> RW */
    104	{ TEST_SIMPLE, page_store_prot, 0x00, READ_WRITE },
    105	/* access key matches storage key -> RW */
    106	{ TEST_SIMPLE, page_store_prot, 0x10, READ_WRITE },
    107	/* mismatched keys, but no fetch protection -> RO */
    108	{ TEST_SIMPLE, page_store_prot, 0x20, READ },
    109	/* access key 0 matches any storage key -> RW */
    110	{ TEST_SIMPLE, page_fetch_prot, 0x00, READ_WRITE },
    111	/* access key matches storage key -> RW */
    112	{ TEST_SIMPLE, page_fetch_prot, 0x90, READ_WRITE },
    113	/* mismatched keys, fetch protection -> inaccessible */
    114	{ TEST_SIMPLE, page_fetch_prot, 0x10, RW_PROTECTED },
    115	/* page 0 not mapped yet -> translation not available */
    116	{ TEST_SIMPLE, (void *)0x00, 0x10, TRANSL_UNAVAIL },
    117	/*
    118	 * host: try to map page 0
    119	 * guest: set storage key of page 0 to 9 and enable fetch protection
    120	 * STAGE_INIT_FETCH_PROT_OVERRIDE
    121	 * host: write protect page 0
    122	 *       enable fetch protection override
    123	 */
    124	/* mismatched keys, fetch protection, but override applies -> RO */
    125	{ TEST_FETCH_PROT_OVERRIDE, (void *)0x00, 0x10, READ },
    126	/* mismatched keys, fetch protection, override applies to 0-2048 only -> inaccessible */
    127	{ TEST_FETCH_PROT_OVERRIDE, (void *)2049, 0x10, RW_PROTECTED },
    128	/*
    129	 * host: enable storage protection override
    130	 */
    131	/* mismatched keys, but override applies (storage key 9) -> RW */
    132	{ TEST_STORAGE_PROT_OVERRIDE, page_fetch_prot, 0x10, READ_WRITE },
    133	/* mismatched keys, no fetch protection, override doesn't apply -> RO */
    134	{ TEST_STORAGE_PROT_OVERRIDE, page_store_prot, 0x20, READ },
    135	/* mismatched keys, but override applies (storage key 9) -> RW */
    136	{ TEST_STORAGE_PROT_OVERRIDE, (void *)2049, 0x10, READ_WRITE },
    137	/* end marker */
    138	{ STAGE_END, 0, 0, 0 },
    139};
    140
    141static enum stage perform_next_stage(int *i, bool mapped_0)
    142{
    143	enum stage stage = tests[*i].stage;
    144	enum permission result;
    145	bool skip;
    146
    147	for (; tests[*i].stage == stage; (*i)++) {
    148		/*
    149		 * Some fetch protection override tests require that page 0
    150		 * be mapped, however, when the hosts tries to map that page via
    151		 * vm_vaddr_alloc, it may happen that some other page gets mapped
    152		 * instead.
    153		 * In order to skip these tests we detect this inside the guest
    154		 */
    155		skip = tests[*i].addr < (void *)4096 &&
    156		       tests[*i].expected != TRANSL_UNAVAIL &&
    157		       !mapped_0;
    158		if (!skip) {
    159			result = test_protection(tests[*i].addr, tests[*i].key);
    160			GUEST_ASSERT_2(result == tests[*i].expected, *i, result);
    161		}
    162	}
    163	return stage;
    164}
    165
    166static void guest_code(void)
    167{
    168	bool mapped_0;
    169	int i = 0;
    170
    171	GUEST_ASSERT_EQ(set_storage_key(page_store_prot, 0x10), 0);
    172	GUEST_ASSERT_EQ(set_storage_key(page_fetch_prot, 0x98), 0);
    173	GUEST_SYNC(STAGE_INIT_SIMPLE);
    174	GUEST_SYNC(perform_next_stage(&i, false));
    175
    176	/* Fetch-protection override */
    177	mapped_0 = !set_storage_key((void *)0, 0x98);
    178	GUEST_SYNC(STAGE_INIT_FETCH_PROT_OVERRIDE);
    179	GUEST_SYNC(perform_next_stage(&i, mapped_0));
    180
    181	/* Storage-protection override */
    182	GUEST_SYNC(perform_next_stage(&i, mapped_0));
    183}
    184
    185#define HOST_SYNC(vmp, stage)							\
    186({										\
    187	struct kvm_vm *__vm = (vmp);						\
    188	struct ucall uc;							\
    189	int __stage = (stage);							\
    190										\
    191	vcpu_run(__vm, VCPU_ID);						\
    192	get_ucall(__vm, VCPU_ID, &uc);						\
    193	if (uc.cmd == UCALL_ABORT) {						\
    194		TEST_FAIL("line %lu: %s, hints: %lu, %lu", uc.args[1],		\
    195			  (const char *)uc.args[0], uc.args[2], uc.args[3]);	\
    196	}									\
    197	ASSERT_EQ(uc.cmd, UCALL_SYNC);						\
    198	ASSERT_EQ(uc.args[1], __stage);						\
    199})
    200
    201int main(int argc, char *argv[])
    202{
    203	struct kvm_vm *vm;
    204	struct kvm_run *run;
    205	vm_vaddr_t guest_0_page;
    206
    207	vm = vm_create_default(VCPU_ID, 0, guest_code);
    208	run = vcpu_state(vm, VCPU_ID);
    209
    210	HOST_SYNC(vm, STAGE_INIT_SIMPLE);
    211	mprotect(addr_gva2hva(vm, (vm_vaddr_t)pages), PAGE_SIZE * 2, PROT_READ);
    212	HOST_SYNC(vm, TEST_SIMPLE);
    213
    214	guest_0_page = vm_vaddr_alloc(vm, PAGE_SIZE, 0);
    215	if (guest_0_page != 0)
    216		print_skip("Did not allocate page at 0 for fetch protection override tests");
    217	HOST_SYNC(vm, STAGE_INIT_FETCH_PROT_OVERRIDE);
    218	if (guest_0_page == 0)
    219		mprotect(addr_gva2hva(vm, (vm_vaddr_t)0), PAGE_SIZE, PROT_READ);
    220	run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
    221	run->kvm_dirty_regs = KVM_SYNC_CRS;
    222	HOST_SYNC(vm, TEST_FETCH_PROT_OVERRIDE);
    223
    224	run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
    225	run->kvm_dirty_regs = KVM_SYNC_CRS;
    226	HOST_SYNC(vm, TEST_STORAGE_PROT_OVERRIDE);
    227}