diff options
| author | Brijesh Singh <brijesh.singh@amd.com> | 2022-04-25 23:13:01 +0000 |
|---|---|---|
| committer | Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> | 2022-07-13 17:27:25 -0500 |
| commit | fab81bc5030dcb8e50ad4d8e9cd2cb1a4983e55a (patch) | |
| tree | 163d8b072271decd952b0980d819898ce4d46a0e /arch/x86/kernel | |
| parent | 3a6bdb412c2732a2c151089701d9df59e879a1eb (diff) | |
| download | cachepc-linux-fab81bc5030dcb8e50ad4d8e9cd2cb1a4983e55a.tar.gz cachepc-linux-fab81bc5030dcb8e50ad4d8e9cd2cb1a4983e55a.zip | |
x86/sev: Add RMP entry lookup helpers
The snp_lookup_page_in_rmptable() can be used by the host to read the RMP
entry for a given page. The RMP entry format is documented in AMD PPR, see
https://bugzilla.kernel.org/attachment.cgi?id=296015.
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
Diffstat (limited to 'arch/x86/kernel')
| -rw-r--r-- | arch/x86/kernel/sev.c | 43 |
1 files changed, 43 insertions, 0 deletions
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c index a1cfa6e19922..97e9bd740860 100644 --- a/arch/x86/kernel/sev.c +++ b/arch/x86/kernel/sev.c @@ -64,6 +64,8 @@ * bookkeeping, the range need to be added during the RMP entry lookup. */ #define RMPTABLE_CPU_BOOKKEEPING_SZ 0x4000 +#define RMPENTRY_SHIFT 8 +#define rmptable_page_offset(x) (RMPTABLE_CPU_BOOKKEEPING_SZ + (((unsigned long)x) >> RMPENTRY_SHIFT)) /* For early boot hypervisor communication in SEV-ES enabled guests */ static struct ghcb boot_ghcb_page __bss_decrypted __aligned(PAGE_SIZE); @@ -2429,3 +2431,44 @@ nosnp: * and it is available after subsys_initcall(). */ fs_initcall(snp_rmptable_init); + +static struct rmpentry *__snp_lookup_rmpentry(u64 pfn, int *level) +{ + unsigned long vaddr, paddr = pfn << PAGE_SHIFT; + struct rmpentry *entry, *large_entry; + + if (!pfn_valid(pfn)) + return ERR_PTR(-EINVAL); + + if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP)) + return ERR_PTR(-ENXIO); + + vaddr = rmptable_start + rmptable_page_offset(paddr); + if (unlikely(vaddr > rmptable_end)) + return ERR_PTR(-ENXIO); + + entry = (struct rmpentry *)vaddr; + + /* Read a large RMP entry to get the correct page level used in RMP entry. */ + vaddr = rmptable_start + rmptable_page_offset(paddr & PMD_MASK); + large_entry = (struct rmpentry *)vaddr; + *level = RMP_TO_X86_PG_LEVEL(rmpentry_pagesize(large_entry)); + + return entry; +} + +/* + * Return 1 if the RMP entry is assigned, 0 if it exists but is not assigned, + * and -errno if there is no corresponding RMP entry. + */ +int snp_lookup_rmpentry(u64 pfn, int *level) +{ + struct rmpentry *e; + + e = __snp_lookup_rmpentry(pfn, level); + if (IS_ERR(e)) + return PTR_ERR(e); + + return !!rmpentry_assigned(e); +} +EXPORT_SYMBOL_GPL(snp_lookup_rmpentry); |
