cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

amx_test.c (10284B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * amx tests
      4 *
      5 * Copyright (C) 2021, Intel, Inc.
      6 *
      7 * Tests for amx #NM exception and save/restore.
      8 */
      9
     10#define _GNU_SOURCE /* for program_invocation_short_name */
     11#include <fcntl.h>
     12#include <stdio.h>
     13#include <stdlib.h>
     14#include <string.h>
     15#include <sys/ioctl.h>
     16#include <sys/syscall.h>
     17
     18#include "test_util.h"
     19
     20#include "kvm_util.h"
     21#include "processor.h"
     22#include "vmx.h"
     23
     24#ifndef __x86_64__
     25# error This test is 64-bit only
     26#endif
     27
     28#define VCPU_ID				0
     29#define X86_FEATURE_XSAVE		(1 << 26)
     30#define X86_FEATURE_OSXSAVE		(1 << 27)
     31
     32#define NUM_TILES			8
     33#define TILE_SIZE			1024
     34#define XSAVE_SIZE			((NUM_TILES * TILE_SIZE) + PAGE_SIZE)
     35
     36/* Tile configuration associated: */
     37#define MAX_TILES			16
     38#define RESERVED_BYTES			14
     39
     40#define XFEATURE_XTILECFG		17
     41#define XFEATURE_XTILEDATA		18
     42#define XFEATURE_MASK_XTILECFG		(1 << XFEATURE_XTILECFG)
     43#define XFEATURE_MASK_XTILEDATA		(1 << XFEATURE_XTILEDATA)
     44#define XFEATURE_MASK_XTILE		(XFEATURE_MASK_XTILECFG | XFEATURE_MASK_XTILEDATA)
     45
     46#define TILE_CPUID			0x1d
     47#define XSTATE_CPUID			0xd
     48#define TILE_PALETTE_CPUID_SUBLEAVE	0x1
     49#define XSTATE_USER_STATE_SUBLEAVE	0x0
     50
     51#define XSAVE_HDR_OFFSET		512
     52
     53struct xsave_data {
     54	u8 area[XSAVE_SIZE];
     55} __aligned(64);
     56
     57struct tile_config {
     58	u8  palette_id;
     59	u8  start_row;
     60	u8  reserved[RESERVED_BYTES];
     61	u16 colsb[MAX_TILES];
     62	u8  rows[MAX_TILES];
     63};
     64
     65struct tile_data {
     66	u8 data[NUM_TILES * TILE_SIZE];
     67};
     68
     69struct xtile_info {
     70	u16 bytes_per_tile;
     71	u16 bytes_per_row;
     72	u16 max_names;
     73	u16 max_rows;
     74	u32 xsave_offset;
     75	u32 xsave_size;
     76};
     77
     78static struct xtile_info xtile;
     79
     80static inline u64 __xgetbv(u32 index)
     81{
     82	u32 eax, edx;
     83
     84	asm volatile("xgetbv;"
     85		     : "=a" (eax), "=d" (edx)
     86		     : "c" (index));
     87	return eax + ((u64)edx << 32);
     88}
     89
     90static inline void __xsetbv(u32 index, u64 value)
     91{
     92	u32 eax = value;
     93	u32 edx = value >> 32;
     94
     95	asm volatile("xsetbv" :: "a" (eax), "d" (edx), "c" (index));
     96}
     97
     98static inline void __ldtilecfg(void *cfg)
     99{
    100	asm volatile(".byte 0xc4,0xe2,0x78,0x49,0x00"
    101		     : : "a"(cfg));
    102}
    103
    104static inline void __tileloadd(void *tile)
    105{
    106	asm volatile(".byte 0xc4,0xe2,0x7b,0x4b,0x04,0x10"
    107		     : : "a"(tile), "d"(0));
    108}
    109
    110static inline void __tilerelease(void)
    111{
    112	asm volatile(".byte 0xc4, 0xe2, 0x78, 0x49, 0xc0" ::);
    113}
    114
    115static inline void __xsavec(struct xsave_data *data, uint64_t rfbm)
    116{
    117	uint32_t rfbm_lo = rfbm;
    118	uint32_t rfbm_hi = rfbm >> 32;
    119
    120	asm volatile("xsavec (%%rdi)"
    121		     : : "D" (data), "a" (rfbm_lo), "d" (rfbm_hi)
    122		     : "memory");
    123}
    124
    125static inline void check_cpuid_xsave(void)
    126{
    127	uint32_t eax, ebx, ecx, edx;
    128
    129	eax = 1;
    130	ecx = 0;
    131	cpuid(&eax, &ebx, &ecx, &edx);
    132	if (!(ecx & X86_FEATURE_XSAVE))
    133		GUEST_ASSERT(!"cpuid: no CPU xsave support!");
    134	if (!(ecx & X86_FEATURE_OSXSAVE))
    135		GUEST_ASSERT(!"cpuid: no OS xsave support!");
    136}
    137
    138static bool check_xsave_supports_xtile(void)
    139{
    140	return __xgetbv(0) & XFEATURE_MASK_XTILE;
    141}
    142
    143static bool enum_xtile_config(void)
    144{
    145	u32 eax, ebx, ecx, edx;
    146
    147	eax = TILE_CPUID;
    148	ecx = TILE_PALETTE_CPUID_SUBLEAVE;
    149
    150	cpuid(&eax, &ebx, &ecx, &edx);
    151	if (!eax || !ebx || !ecx)
    152		return false;
    153
    154	xtile.max_names = ebx >> 16;
    155	if (xtile.max_names < NUM_TILES)
    156		return false;
    157
    158	xtile.bytes_per_tile = eax >> 16;
    159	if (xtile.bytes_per_tile < TILE_SIZE)
    160		return false;
    161
    162	xtile.bytes_per_row = ebx;
    163	xtile.max_rows = ecx;
    164
    165	return true;
    166}
    167
    168static bool enum_xsave_tile(void)
    169{
    170	u32 eax, ebx, ecx, edx;
    171
    172	eax = XSTATE_CPUID;
    173	ecx = XFEATURE_XTILEDATA;
    174
    175	cpuid(&eax, &ebx, &ecx, &edx);
    176	if (!eax || !ebx)
    177		return false;
    178
    179	xtile.xsave_offset = ebx;
    180	xtile.xsave_size = eax;
    181
    182	return true;
    183}
    184
    185static bool check_xsave_size(void)
    186{
    187	u32 eax, ebx, ecx, edx;
    188	bool valid = false;
    189
    190	eax = XSTATE_CPUID;
    191	ecx = XSTATE_USER_STATE_SUBLEAVE;
    192
    193	cpuid(&eax, &ebx, &ecx, &edx);
    194	if (ebx && ebx <= XSAVE_SIZE)
    195		valid = true;
    196
    197	return valid;
    198}
    199
    200static bool check_xtile_info(void)
    201{
    202	bool ret = false;
    203
    204	if (!check_xsave_size())
    205		return ret;
    206
    207	if (!enum_xsave_tile())
    208		return ret;
    209
    210	if (!enum_xtile_config())
    211		return ret;
    212
    213	if (sizeof(struct tile_data) >= xtile.xsave_size)
    214		ret = true;
    215
    216	return ret;
    217}
    218
    219static void set_tilecfg(struct tile_config *cfg)
    220{
    221	int i;
    222
    223	/* Only palette id 1 */
    224	cfg->palette_id = 1;
    225	for (i = 0; i < xtile.max_names; i++) {
    226		cfg->colsb[i] = xtile.bytes_per_row;
    227		cfg->rows[i] = xtile.max_rows;
    228	}
    229}
    230
    231static void set_xstatebv(void *data, uint64_t bv)
    232{
    233	*(uint64_t *)(data + XSAVE_HDR_OFFSET) = bv;
    234}
    235
    236static u64 get_xstatebv(void *data)
    237{
    238	return *(u64 *)(data + XSAVE_HDR_OFFSET);
    239}
    240
    241static void init_regs(void)
    242{
    243	uint64_t cr4, xcr0;
    244
    245	/* turn on CR4.OSXSAVE */
    246	cr4 = get_cr4();
    247	cr4 |= X86_CR4_OSXSAVE;
    248	set_cr4(cr4);
    249
    250	xcr0 = __xgetbv(0);
    251	xcr0 |= XFEATURE_MASK_XTILE;
    252	__xsetbv(0x0, xcr0);
    253}
    254
    255static void __attribute__((__flatten__)) guest_code(struct tile_config *amx_cfg,
    256						    struct tile_data *tiledata,
    257						    struct xsave_data *xsave_data)
    258{
    259	init_regs();
    260	check_cpuid_xsave();
    261	GUEST_ASSERT(check_xsave_supports_xtile());
    262	GUEST_ASSERT(check_xtile_info());
    263
    264	/* check xtile configs */
    265	GUEST_ASSERT(xtile.xsave_offset == 2816);
    266	GUEST_ASSERT(xtile.xsave_size == 8192);
    267	GUEST_ASSERT(xtile.max_names == 8);
    268	GUEST_ASSERT(xtile.bytes_per_tile == 1024);
    269	GUEST_ASSERT(xtile.bytes_per_row == 64);
    270	GUEST_ASSERT(xtile.max_rows == 16);
    271	GUEST_SYNC(1);
    272
    273	/* xfd=0, enable amx */
    274	wrmsr(MSR_IA32_XFD, 0);
    275	GUEST_SYNC(2);
    276	GUEST_ASSERT(rdmsr(MSR_IA32_XFD) == 0);
    277	set_tilecfg(amx_cfg);
    278	__ldtilecfg(amx_cfg);
    279	GUEST_SYNC(3);
    280	/* Check save/restore when trap to userspace */
    281	__tileloadd(tiledata);
    282	GUEST_SYNC(4);
    283	__tilerelease();
    284	GUEST_SYNC(5);
    285	/* bit 18 not in the XCOMP_BV after xsavec() */
    286	set_xstatebv(xsave_data, XFEATURE_MASK_XTILEDATA);
    287	__xsavec(xsave_data, XFEATURE_MASK_XTILEDATA);
    288	GUEST_ASSERT((get_xstatebv(xsave_data) & XFEATURE_MASK_XTILEDATA) == 0);
    289
    290	/* xfd=0x40000, disable amx tiledata */
    291	wrmsr(MSR_IA32_XFD, XFEATURE_MASK_XTILEDATA);
    292	GUEST_SYNC(6);
    293	GUEST_ASSERT(rdmsr(MSR_IA32_XFD) == XFEATURE_MASK_XTILEDATA);
    294	set_tilecfg(amx_cfg);
    295	__ldtilecfg(amx_cfg);
    296	/* Trigger #NM exception */
    297	__tileloadd(tiledata);
    298	GUEST_SYNC(10);
    299
    300	GUEST_DONE();
    301}
    302
    303void guest_nm_handler(struct ex_regs *regs)
    304{
    305	/* Check if #NM is triggered by XFEATURE_MASK_XTILEDATA */
    306	GUEST_SYNC(7);
    307	GUEST_ASSERT(rdmsr(MSR_IA32_XFD_ERR) == XFEATURE_MASK_XTILEDATA);
    308	GUEST_SYNC(8);
    309	GUEST_ASSERT(rdmsr(MSR_IA32_XFD_ERR) == XFEATURE_MASK_XTILEDATA);
    310	/* Clear xfd_err */
    311	wrmsr(MSR_IA32_XFD_ERR, 0);
    312	/* xfd=0, enable amx */
    313	wrmsr(MSR_IA32_XFD, 0);
    314	GUEST_SYNC(9);
    315}
    316
    317int main(int argc, char *argv[])
    318{
    319	struct kvm_cpuid_entry2 *entry;
    320	struct kvm_regs regs1, regs2;
    321	bool amx_supported = false;
    322	struct kvm_vm *vm;
    323	struct kvm_run *run;
    324	struct kvm_x86_state *state;
    325	int xsave_restore_size = 0;
    326	vm_vaddr_t amx_cfg, tiledata, xsavedata;
    327	struct ucall uc;
    328	u32 amx_offset;
    329	int stage, ret;
    330
    331	vm_xsave_req_perm(XSTATE_XTILE_DATA_BIT);
    332
    333	/* Create VM */
    334	vm = vm_create_default(VCPU_ID, 0, guest_code);
    335
    336	entry = kvm_get_supported_cpuid_entry(1);
    337	if (!(entry->ecx & X86_FEATURE_XSAVE)) {
    338		print_skip("XSAVE feature not supported");
    339		exit(KSFT_SKIP);
    340	}
    341
    342	if (kvm_get_cpuid_max_basic() >= 0xd) {
    343		entry = kvm_get_supported_cpuid_index(0xd, 0);
    344		amx_supported = entry && !!(entry->eax & XFEATURE_MASK_XTILE);
    345		if (!amx_supported) {
    346			print_skip("AMX is not supported by the vCPU (eax=0x%x)", entry->eax);
    347			exit(KSFT_SKIP);
    348		}
    349		/* Get xsave/restore max size */
    350		xsave_restore_size = entry->ecx;
    351	}
    352
    353	run = vcpu_state(vm, VCPU_ID);
    354	vcpu_regs_get(vm, VCPU_ID, &regs1);
    355
    356	/* Register #NM handler */
    357	vm_init_descriptor_tables(vm);
    358	vcpu_init_descriptor_tables(vm, VCPU_ID);
    359	vm_install_exception_handler(vm, NM_VECTOR, guest_nm_handler);
    360
    361	/* amx cfg for guest_code */
    362	amx_cfg = vm_vaddr_alloc_page(vm);
    363	memset(addr_gva2hva(vm, amx_cfg), 0x0, getpagesize());
    364
    365	/* amx tiledata for guest_code */
    366	tiledata = vm_vaddr_alloc_pages(vm, 2);
    367	memset(addr_gva2hva(vm, tiledata), rand() | 1, 2 * getpagesize());
    368
    369	/* xsave data for guest_code */
    370	xsavedata = vm_vaddr_alloc_pages(vm, 3);
    371	memset(addr_gva2hva(vm, xsavedata), 0, 3 * getpagesize());
    372	vcpu_args_set(vm, VCPU_ID, 3, amx_cfg, tiledata, xsavedata);
    373
    374	for (stage = 1; ; stage++) {
    375		_vcpu_run(vm, VCPU_ID);
    376		TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
    377			    "Stage %d: unexpected exit reason: %u (%s),\n",
    378			    stage, run->exit_reason,
    379			    exit_reason_str(run->exit_reason));
    380
    381		switch (get_ucall(vm, VCPU_ID, &uc)) {
    382		case UCALL_ABORT:
    383			TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
    384				  __FILE__, uc.args[1]);
    385			/* NOT REACHED */
    386		case UCALL_SYNC:
    387			switch (uc.args[1]) {
    388			case 1:
    389			case 2:
    390			case 3:
    391			case 5:
    392			case 6:
    393			case 7:
    394			case 8:
    395				fprintf(stderr, "GUEST_SYNC(%ld)\n", uc.args[1]);
    396				break;
    397			case 4:
    398			case 10:
    399				fprintf(stderr,
    400				"GUEST_SYNC(%ld), check save/restore status\n", uc.args[1]);
    401
    402				/* Compacted mode, get amx offset by xsave area
    403				 * size subtract 8K amx size.
    404				 */
    405				amx_offset = xsave_restore_size - NUM_TILES*TILE_SIZE;
    406				state = vcpu_save_state(vm, VCPU_ID);
    407				void *amx_start = (void *)state->xsave + amx_offset;
    408				void *tiles_data = (void *)addr_gva2hva(vm, tiledata);
    409				/* Only check TMM0 register, 1 tile */
    410				ret = memcmp(amx_start, tiles_data, TILE_SIZE);
    411				TEST_ASSERT(ret == 0, "memcmp failed, ret=%d\n", ret);
    412				kvm_x86_state_cleanup(state);
    413				break;
    414			case 9:
    415				fprintf(stderr,
    416				"GUEST_SYNC(%ld), #NM exception and enable amx\n", uc.args[1]);
    417				break;
    418			}
    419			break;
    420		case UCALL_DONE:
    421			fprintf(stderr, "UCALL_DONE\n");
    422			goto done;
    423		default:
    424			TEST_FAIL("Unknown ucall %lu", uc.cmd);
    425		}
    426
    427		state = vcpu_save_state(vm, VCPU_ID);
    428		memset(&regs1, 0, sizeof(regs1));
    429		vcpu_regs_get(vm, VCPU_ID, &regs1);
    430
    431		kvm_vm_release(vm);
    432
    433		/* Restore state in a new VM.  */
    434		kvm_vm_restart(vm, O_RDWR);
    435		vm_vcpu_add(vm, VCPU_ID);
    436		vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
    437		vcpu_load_state(vm, VCPU_ID, state);
    438		run = vcpu_state(vm, VCPU_ID);
    439		kvm_x86_state_cleanup(state);
    440
    441		memset(&regs2, 0, sizeof(regs2));
    442		vcpu_regs_get(vm, VCPU_ID, &regs2);
    443		TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
    444			    "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
    445			    (ulong) regs2.rdi, (ulong) regs2.rsi);
    446	}
    447done:
    448	kvm_vm_free(vm);
    449}