sev-dev.c (57669B)
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * AMD Secure Encrypted Virtualization (SEV) interface 4 * 5 * Copyright (C) 2016,2019 Advanced Micro Devices, Inc. 6 * 7 * Author: Brijesh Singh <brijesh.singh@amd.com> 8 */ 9 10#include <linux/module.h> 11#include <linux/kernel.h> 12#include <linux/kthread.h> 13#include <linux/sched.h> 14#include <linux/interrupt.h> 15#include <linux/spinlock.h> 16#include <linux/spinlock_types.h> 17#include <linux/types.h> 18#include <linux/mutex.h> 19#include <linux/delay.h> 20#include <linux/hw_random.h> 21#include <linux/ccp.h> 22#include <linux/firmware.h> 23#include <linux/gfp.h> 24#include <linux/cpufeature.h> 25#include <linux/fs.h> 26#include <linux/fs_struct.h> 27 28#include <asm/smp.h> 29#include <asm/sev.h> 30#include <asm/e820/types.h> 31 32#include "psp-dev.h" 33#include "sev-dev.h" 34 35#define DEVICE_NAME "sev" 36#define SEV_FW_FILE "amd/sev.fw" 37#define SEV_FW_NAME_SIZE 64 38 39/* Minimum firmware version required for the SEV-SNP support */ 40#define SNP_MIN_API_MAJOR 1 41#define SNP_MIN_API_MINOR 51 42 43static DEFINE_MUTEX(sev_cmd_mutex); 44static struct sev_misc_dev *misc_dev; 45 46static int psp_cmd_timeout = 100; 47module_param(psp_cmd_timeout, int, 0644); 48MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands"); 49 50static int psp_probe_timeout = 5; 51module_param(psp_probe_timeout, int, 0644); 52MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe"); 53 54static char *init_ex_path; 55module_param(init_ex_path, charp, 0444); 56MODULE_PARM_DESC(init_ex_path, " Path for INIT_EX data; if set try INIT_EX"); 57 58static bool psp_init_on_probe = true; 59module_param(psp_init_on_probe, bool, 0444); 60MODULE_PARM_DESC(psp_init_on_probe, " if true, the PSP will be initialized on module init. Else the PSP will be initialized on the first command requiring it"); 61 62MODULE_FIRMWARE("amd/amd_sev_fam17h_model0xh.sbin"); /* 1st gen EPYC */ 63MODULE_FIRMWARE("amd/amd_sev_fam17h_model3xh.sbin"); /* 2nd gen EPYC */ 64MODULE_FIRMWARE("amd/amd_sev_fam19h_model0xh.sbin"); /* 3rd gen EPYC */ 65 66static bool psp_dead; 67static int psp_timeout; 68 69/* Trusted Memory Region (TMR): 70 * The TMR is a 1MB area that must be 1MB aligned. Use the page allocator 71 * to allocate the memory, which will return aligned memory for the specified 72 * allocation order. 73 */ 74#define SEV_ES_TMR_SIZE (1024 * 1024) 75static void *sev_es_tmr; 76 77/* INIT_EX NV Storage: 78 * The NV Storage is a 32Kb area and must be 4Kb page aligned. Use the page 79 * allocator to allocate the memory, which will return aligned memory for the 80 * specified allocation order. 81 */ 82#define NV_LENGTH (32 * 1024) 83static void *sev_init_ex_buffer; 84 85/* 86 * SEV_DATA_RANGE_LIST: 87 * Array containing range of pages that firmware transitions to HV-fixed 88 * page state. 89 */ 90struct sev_data_range_list *snp_range_list; 91 92/* When SEV-SNP is enabled the TMR needs to be 2MB aligned and 2MB size. */ 93#define SEV_SNP_ES_TMR_SIZE (2 * 1024 * 1024) 94 95static size_t sev_es_tmr_size = SEV_ES_TMR_SIZE; 96 97static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret); 98static int sev_do_cmd(int cmd, void *data, int *psp_ret); 99 100static inline bool sev_version_greater_or_equal(u8 maj, u8 min) 101{ 102 struct sev_device *sev = psp_master->sev_data; 103 104 if (sev->api_major > maj) 105 return true; 106 107 if (sev->api_major == maj && sev->api_minor >= min) 108 return true; 109 110 return false; 111} 112 113static void sev_irq_handler(int irq, void *data, unsigned int status) 114{ 115 struct sev_device *sev = data; 116 int reg; 117 118 /* Check if it is command completion: */ 119 if (!(status & SEV_CMD_COMPLETE)) 120 return; 121 122 /* Check if it is SEV command completion: */ 123 reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg); 124 if (reg & PSP_CMDRESP_RESP) { 125 sev->int_rcvd = 1; 126 wake_up(&sev->int_queue); 127 } 128} 129 130static int sev_wait_cmd_ioc(struct sev_device *sev, 131 unsigned int *reg, unsigned int timeout) 132{ 133 int ret; 134 135 ret = wait_event_timeout(sev->int_queue, 136 sev->int_rcvd, timeout * HZ); 137 if (!ret) 138 return -ETIMEDOUT; 139 140 *reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg); 141 142 return 0; 143} 144 145static int sev_cmd_buffer_len(int cmd) 146{ 147 switch (cmd) { 148 case SEV_CMD_INIT: return sizeof(struct sev_data_init); 149 case SEV_CMD_INIT_EX: return sizeof(struct sev_data_init_ex); 150 case SEV_CMD_SNP_SHUTDOWN_EX: return sizeof(struct sev_data_snp_shutdown_ex); 151 case SEV_CMD_SNP_INIT_EX: return sizeof(struct sev_data_snp_init_ex); 152 case SEV_CMD_PLATFORM_STATUS: return sizeof(struct sev_user_data_status); 153 case SEV_CMD_PEK_CSR: return sizeof(struct sev_data_pek_csr); 154 case SEV_CMD_PEK_CERT_IMPORT: return sizeof(struct sev_data_pek_cert_import); 155 case SEV_CMD_PDH_CERT_EXPORT: return sizeof(struct sev_data_pdh_cert_export); 156 case SEV_CMD_LAUNCH_START: return sizeof(struct sev_data_launch_start); 157 case SEV_CMD_LAUNCH_UPDATE_DATA: return sizeof(struct sev_data_launch_update_data); 158 case SEV_CMD_LAUNCH_UPDATE_VMSA: return sizeof(struct sev_data_launch_update_vmsa); 159 case SEV_CMD_LAUNCH_FINISH: return sizeof(struct sev_data_launch_finish); 160 case SEV_CMD_LAUNCH_MEASURE: return sizeof(struct sev_data_launch_measure); 161 case SEV_CMD_ACTIVATE: return sizeof(struct sev_data_activate); 162 case SEV_CMD_DEACTIVATE: return sizeof(struct sev_data_deactivate); 163 case SEV_CMD_DECOMMISSION: return sizeof(struct sev_data_decommission); 164 case SEV_CMD_GUEST_STATUS: return sizeof(struct sev_data_guest_status); 165 case SEV_CMD_DBG_DECRYPT: return sizeof(struct sev_data_dbg); 166 case SEV_CMD_DBG_ENCRYPT: return sizeof(struct sev_data_dbg); 167 case SEV_CMD_SEND_START: return sizeof(struct sev_data_send_start); 168 case SEV_CMD_SEND_UPDATE_DATA: return sizeof(struct sev_data_send_update_data); 169 case SEV_CMD_SEND_UPDATE_VMSA: return sizeof(struct sev_data_send_update_vmsa); 170 case SEV_CMD_SEND_FINISH: return sizeof(struct sev_data_send_finish); 171 case SEV_CMD_RECEIVE_START: return sizeof(struct sev_data_receive_start); 172 case SEV_CMD_RECEIVE_FINISH: return sizeof(struct sev_data_receive_finish); 173 case SEV_CMD_RECEIVE_UPDATE_DATA: return sizeof(struct sev_data_receive_update_data); 174 case SEV_CMD_RECEIVE_UPDATE_VMSA: return sizeof(struct sev_data_receive_update_vmsa); 175 case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret); 176 case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware); 177 case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id); 178 case SEV_CMD_ATTESTATION_REPORT: return sizeof(struct sev_data_attestation_report); 179 case SEV_CMD_SEND_CANCEL: return sizeof(struct sev_data_send_cancel); 180 case SEV_CMD_SNP_GCTX_CREATE: return sizeof(struct sev_data_snp_gctx_create); 181 case SEV_CMD_SNP_LAUNCH_START: return sizeof(struct sev_data_snp_launch_start); 182 case SEV_CMD_SNP_LAUNCH_UPDATE: return sizeof(struct sev_data_snp_launch_update); 183 case SEV_CMD_SNP_ACTIVATE: return sizeof(struct sev_data_snp_activate); 184 case SEV_CMD_SNP_DECOMMISSION: return sizeof(struct sev_data_snp_decommission); 185 case SEV_CMD_SNP_PAGE_RECLAIM: return sizeof(struct sev_data_snp_page_reclaim); 186 case SEV_CMD_SNP_GUEST_STATUS: return sizeof(struct sev_data_snp_guest_status); 187 case SEV_CMD_SNP_LAUNCH_FINISH: return sizeof(struct sev_data_snp_launch_finish); 188 case SEV_CMD_SNP_DBG_DECRYPT: return sizeof(struct sev_data_snp_dbg); 189 case SEV_CMD_SNP_DBG_ENCRYPT: return sizeof(struct sev_data_snp_dbg); 190 case SEV_CMD_SNP_PAGE_UNSMASH: return sizeof(struct sev_data_snp_page_unsmash); 191 case SEV_CMD_SNP_PLATFORM_STATUS: return sizeof(struct sev_data_snp_platform_status_buf); 192 case SEV_CMD_SNP_GUEST_REQUEST: return sizeof(struct sev_data_snp_guest_request); 193 case SEV_CMD_SNP_CONFIG: return sizeof(struct sev_user_data_snp_config); 194 default: return 0; 195 } 196 197 return 0; 198} 199 200static void snp_leak_pages(unsigned long pfn, unsigned int npages) 201{ 202 WARN(1, "psc failed, pfn 0x%lx pages %d (leaking)\n", pfn, npages); 203 while (npages--) { 204 memory_failure(pfn, 0); 205 dump_rmpentry(pfn); 206 pfn++; 207 } 208} 209 210static int snp_reclaim_pages(unsigned long pfn, unsigned int npages, bool locked) 211{ 212 struct sev_data_snp_page_reclaim data; 213 int ret, err, i, n = 0; 214 215 for (i = 0; i < npages; i++) { 216 memset(&data, 0, sizeof(data)); 217 data.paddr = pfn << PAGE_SHIFT; 218 219 if (locked) 220 ret = __sev_do_cmd_locked(SEV_CMD_SNP_PAGE_RECLAIM, &data, &err); 221 else 222 ret = sev_do_cmd(SEV_CMD_SNP_PAGE_RECLAIM, &data, &err); 223 if (ret) 224 goto cleanup; 225 226 ret = rmp_make_shared(pfn, PG_LEVEL_4K); 227 if (ret) 228 goto cleanup; 229 230 pfn++; 231 n++; 232 } 233 234 return 0; 235 236cleanup: 237 /* 238 * If failed to reclaim the page then page is no longer safe to 239 * be released, leak it. 240 */ 241 snp_leak_pages(pfn, npages - n); 242 return ret; 243} 244 245static inline int rmp_make_firmware(unsigned long pfn, int level) 246{ 247 return rmp_make_private(pfn, 0, level, 0, true); 248} 249 250static int snp_set_rmp_state(unsigned long paddr, unsigned int npages, bool to_fw, bool locked, 251 bool need_reclaim) 252{ 253 unsigned long pfn = __sme_clr(paddr) >> PAGE_SHIFT; /* Cbit maybe set in the paddr */ 254 int rc, n = 0, i; 255 256 for (i = 0; i < npages; i++) { 257 if (to_fw) 258 rc = rmp_make_firmware(pfn, PG_LEVEL_4K); 259 else 260 rc = need_reclaim ? snp_reclaim_pages(pfn, 1, locked) : 261 rmp_make_shared(pfn, PG_LEVEL_4K); 262 if (rc) 263 goto cleanup; 264 265 pfn++; 266 n++; 267 } 268 269 return 0; 270 271cleanup: 272 /* Try unrolling the firmware state changes */ 273 if (to_fw) { 274 /* 275 * Reclaim the pages which were already changed to the 276 * firmware state. 277 */ 278 snp_reclaim_pages(paddr >> PAGE_SHIFT, n, locked); 279 280 return rc; 281 } 282 283 /* 284 * If failed to change the page state to shared, then its not safe 285 * to release the page back to the system, leak it. 286 */ 287 snp_leak_pages(pfn, npages - n); 288 289 return rc; 290} 291 292static struct page *__snp_alloc_firmware_pages(gfp_t gfp_mask, int order, bool locked) 293{ 294 unsigned long npages = 1ul << order, paddr; 295 struct sev_device *sev; 296 struct page *page; 297 298 if (!psp_master || !psp_master->sev_data) 299 return NULL; 300 301 page = alloc_pages(gfp_mask, order); 302 if (!page) 303 return NULL; 304 305 /* If SEV-SNP is initialized then add the page in RMP table. */ 306 sev = psp_master->sev_data; 307 if (!sev->snp_inited) 308 return page; 309 310 paddr = __pa((unsigned long)page_address(page)); 311 if (snp_set_rmp_state(paddr, npages, true, locked, false)) 312 return NULL; 313 314 return page; 315} 316 317void *snp_alloc_firmware_page(gfp_t gfp_mask) 318{ 319 struct page *page; 320 321 page = __snp_alloc_firmware_pages(gfp_mask, 0, false); 322 323 return page ? page_address(page) : NULL; 324} 325EXPORT_SYMBOL_GPL(snp_alloc_firmware_page); 326 327static void __snp_free_firmware_pages(struct page *page, int order, bool locked) 328{ 329 unsigned long paddr, npages = 1ul << order; 330 struct sev_device *sev; 331 332 if (!page) 333 return; 334 335 paddr = __pa((unsigned long)page_address(page)); 336 sev = psp_master->sev_data; 337 if (sev->snp_inited && snp_set_rmp_state(paddr, npages, false, locked, true)) 338 339 return; 340 341 __free_pages(page, order); 342} 343 344void snp_free_firmware_page(void *addr) 345{ 346 if (!addr) 347 return; 348 349 __snp_free_firmware_pages(virt_to_page(addr), 0, false); 350} 351EXPORT_SYMBOL(snp_free_firmware_page); 352 353static void *sev_fw_alloc(unsigned long len) 354{ 355 struct page *page; 356 357 page = __snp_alloc_firmware_pages(GFP_KERNEL, get_order(len), false); 358 if (!page) 359 return NULL; 360 361 return page_address(page); 362} 363 364static struct file *open_file_as_root(const char *filename, int flags, umode_t mode) 365{ 366 struct file *fp; 367 struct path root; 368 struct cred *cred; 369 const struct cred *old_cred; 370 371 task_lock(&init_task); 372 get_fs_root(init_task.fs, &root); 373 task_unlock(&init_task); 374 375 cred = prepare_creds(); 376 if (!cred) 377 return ERR_PTR(-ENOMEM); 378 cred->fsuid = GLOBAL_ROOT_UID; 379 old_cred = override_creds(cred); 380 381 fp = file_open_root(&root, filename, flags, mode); 382 path_put(&root); 383 384 revert_creds(old_cred); 385 386 return fp; 387} 388 389static int sev_read_init_ex_file(void) 390{ 391 struct sev_device *sev = psp_master->sev_data; 392 struct file *fp; 393 ssize_t nread; 394 395 lockdep_assert_held(&sev_cmd_mutex); 396 397 if (!sev_init_ex_buffer) 398 return -EOPNOTSUPP; 399 400 fp = open_file_as_root(init_ex_path, O_RDONLY, 0); 401 if (IS_ERR(fp)) { 402 int ret = PTR_ERR(fp); 403 404 dev_err(sev->dev, 405 "SEV: could not open %s for read, error %d\n", 406 init_ex_path, ret); 407 return ret; 408 } 409 410 nread = kernel_read(fp, sev_init_ex_buffer, NV_LENGTH, NULL); 411 if (nread != NV_LENGTH) { 412 dev_err(sev->dev, 413 "SEV: failed to read %u bytes to non volatile memory area, ret %ld\n", 414 NV_LENGTH, nread); 415 return -EIO; 416 } 417 418 dev_dbg(sev->dev, "SEV: read %ld bytes from NV file\n", nread); 419 filp_close(fp, NULL); 420 421 return 0; 422} 423 424static void sev_write_init_ex_file(void) 425{ 426 struct sev_device *sev = psp_master->sev_data; 427 struct file *fp; 428 loff_t offset = 0; 429 ssize_t nwrite; 430 431 lockdep_assert_held(&sev_cmd_mutex); 432 433 if (!sev_init_ex_buffer) 434 return; 435 436 fp = open_file_as_root(init_ex_path, O_CREAT | O_WRONLY, 0600); 437 if (IS_ERR(fp)) { 438 dev_err(sev->dev, 439 "SEV: could not open file for write, error %ld\n", 440 PTR_ERR(fp)); 441 return; 442 } 443 444 nwrite = kernel_write(fp, sev_init_ex_buffer, NV_LENGTH, &offset); 445 vfs_fsync(fp, 0); 446 filp_close(fp, NULL); 447 448 if (nwrite != NV_LENGTH) { 449 dev_err(sev->dev, 450 "SEV: failed to write %u bytes to non volatile memory area, ret %ld\n", 451 NV_LENGTH, nwrite); 452 return; 453 } 454 455 dev_dbg(sev->dev, "SEV: write successful to NV file\n"); 456} 457 458static void sev_write_init_ex_file_if_required(int cmd_id) 459{ 460 lockdep_assert_held(&sev_cmd_mutex); 461 462 if (!sev_init_ex_buffer) 463 return; 464 465 /* 466 * Only a few platform commands modify the SPI/NV area, but none of the 467 * non-platform commands do. Only INIT(_EX), PLATFORM_RESET, PEK_GEN, 468 * PEK_CERT_IMPORT, and PDH_GEN do. 469 */ 470 switch (cmd_id) { 471 case SEV_CMD_FACTORY_RESET: 472 case SEV_CMD_INIT_EX: 473 case SEV_CMD_PDH_GEN: 474 case SEV_CMD_PEK_CERT_IMPORT: 475 case SEV_CMD_PEK_GEN: 476 break; 477 default: 478 return; 479 } 480 481 sev_write_init_ex_file(); 482} 483 484static int alloc_snp_host_map(struct sev_device *sev) 485{ 486 struct page *page; 487 int i; 488 489 for (i = 0; i < MAX_SNP_HOST_MAP_BUFS; i++) { 490 struct snp_host_map *map = &sev->snp_host_map[i]; 491 492 memset(map, 0, sizeof(*map)); 493 494 page = alloc_pages(GFP_KERNEL_ACCOUNT, get_order(SEV_FW_BLOB_MAX_SIZE)); 495 if (!page) 496 return -ENOMEM; 497 498 map->host = page_address(page); 499 } 500 501 return 0; 502} 503 504static void free_snp_host_map(struct sev_device *sev) 505{ 506 int i; 507 508 for (i = 0; i < MAX_SNP_HOST_MAP_BUFS; i++) { 509 struct snp_host_map *map = &sev->snp_host_map[i]; 510 511 if (map->host) { 512 __free_pages(virt_to_page(map->host), get_order(SEV_FW_BLOB_MAX_SIZE)); 513 memset(map, 0, sizeof(*map)); 514 } 515 } 516} 517 518static int map_firmware_writeable(u64 *paddr, u32 len, bool guest, struct snp_host_map *map) 519{ 520 unsigned int npages = PAGE_ALIGN(len) >> PAGE_SHIFT; 521 522 map->active = false; 523 524 if (!paddr || !len) 525 return 0; 526 527 map->paddr = *paddr; 528 map->len = len; 529 530 /* If paddr points to a guest memory then change the page state to firmwware. */ 531 if (guest) { 532 if (snp_set_rmp_state(*paddr, npages, true, true, false)) 533 return -EFAULT; 534 535 goto done; 536 } 537 538 if (!map->host) 539 return -ENOMEM; 540 541 /* Check if the pre-allocated buffer can be used to fullfil the request. */ 542 if (len > SEV_FW_BLOB_MAX_SIZE) 543 return -EINVAL; 544 545 /* Transition the pre-allocated buffer to the firmware state. */ 546 if (snp_set_rmp_state(__pa(map->host), npages, true, true, false)) 547 return -EFAULT; 548 549 /* Set the paddr to use pre-allocated firmware buffer */ 550 *paddr = __psp_pa(map->host); 551 552done: 553 map->active = true; 554 return 0; 555} 556 557static int unmap_firmware_writeable(u64 *paddr, u32 len, bool guest, struct snp_host_map *map) 558{ 559 unsigned int npages = PAGE_ALIGN(len) >> PAGE_SHIFT; 560 561 if (!map->active) 562 return 0; 563 564 /* If paddr points to a guest memory then restore the page state to hypervisor. */ 565 if (guest) { 566 if (snp_set_rmp_state(*paddr, npages, false, true, true)) 567 return -EFAULT; 568 569 goto done; 570 } 571 572 /* 573 * Transition the pre-allocated buffer to hypervisor state before the access. 574 * 575 * This is because while changing the page state to firmware, the kernel unmaps 576 * the pages from the direct map, and to restore the direct map we must 577 * transition the pages to shared state. 578 */ 579 if (snp_set_rmp_state(__pa(map->host), npages, false, true, true)) 580 return -EFAULT; 581 582 /* Copy the response data firmware buffer to the callers buffer. */ 583 memcpy(__va(__sme_clr(map->paddr)), map->host, min_t(size_t, len, map->len)); 584 *paddr = map->paddr; 585 586done: 587 map->active = false; 588 return 0; 589} 590 591static bool sev_legacy_cmd_buf_writable(int cmd) 592{ 593 switch (cmd) { 594 case SEV_CMD_PLATFORM_STATUS: 595 case SEV_CMD_GUEST_STATUS: 596 case SEV_CMD_LAUNCH_START: 597 case SEV_CMD_RECEIVE_START: 598 case SEV_CMD_LAUNCH_MEASURE: 599 case SEV_CMD_SEND_START: 600 case SEV_CMD_SEND_UPDATE_DATA: 601 case SEV_CMD_SEND_UPDATE_VMSA: 602 case SEV_CMD_PEK_CSR: 603 case SEV_CMD_PDH_CERT_EXPORT: 604 case SEV_CMD_GET_ID: 605 case SEV_CMD_ATTESTATION_REPORT: 606 return true; 607 default: 608 return false; 609 } 610} 611 612#define prep_buffer(name, addr, len, guest, map) \ 613 func(&((typeof(name *))cmd_buf)->addr, ((typeof(name *))cmd_buf)->len, guest, map) 614 615static int __snp_cmd_buf_copy(int cmd, void *cmd_buf, bool to_fw, int fw_err) 616{ 617 int (*func)(u64 *paddr, u32 len, bool guest, struct snp_host_map *map); 618 struct sev_device *sev = psp_master->sev_data; 619 bool from_fw = !to_fw; 620 621 /* 622 * After the command is completed, change the command buffer memory to 623 * hypervisor state. 624 * 625 * The immutable bit is automatically cleared by the firmware, so 626 * no not need to reclaim the page. 627 */ 628 if (from_fw && sev_legacy_cmd_buf_writable(cmd)) { 629 if (snp_set_rmp_state(__pa(cmd_buf), 1, false, true, false)) 630 return -EFAULT; 631 632 /* No need to go further if firmware failed to execute command. */ 633 if (fw_err) 634 return 0; 635 } 636 637 if (to_fw) 638 func = map_firmware_writeable; 639 else 640 func = unmap_firmware_writeable; 641 642 /* 643 * A command buffer may contains a system physical address. If the address 644 * points to a host memory then use an intermediate firmware page otherwise 645 * change the page state in the RMP table. 646 */ 647 switch (cmd) { 648 case SEV_CMD_PDH_CERT_EXPORT: 649 if (prep_buffer(struct sev_data_pdh_cert_export, pdh_cert_address, 650 pdh_cert_len, false, &sev->snp_host_map[0])) 651 goto err; 652 if (prep_buffer(struct sev_data_pdh_cert_export, cert_chain_address, 653 cert_chain_len, false, &sev->snp_host_map[1])) 654 goto err; 655 break; 656 case SEV_CMD_GET_ID: 657 if (prep_buffer(struct sev_data_get_id, address, len, 658 false, &sev->snp_host_map[0])) 659 goto err; 660 break; 661 case SEV_CMD_PEK_CSR: 662 if (prep_buffer(struct sev_data_pek_csr, address, len, 663 false, &sev->snp_host_map[0])) 664 goto err; 665 break; 666 case SEV_CMD_LAUNCH_UPDATE_DATA: 667 if (prep_buffer(struct sev_data_launch_update_data, address, len, 668 true, &sev->snp_host_map[0])) 669 goto err; 670 break; 671 case SEV_CMD_LAUNCH_UPDATE_VMSA: 672 if (prep_buffer(struct sev_data_launch_update_vmsa, address, len, 673 true, &sev->snp_host_map[0])) 674 goto err; 675 break; 676 case SEV_CMD_LAUNCH_MEASURE: 677 if (prep_buffer(struct sev_data_launch_measure, address, len, 678 false, &sev->snp_host_map[0])) 679 goto err; 680 break; 681 case SEV_CMD_LAUNCH_UPDATE_SECRET: 682 if (prep_buffer(struct sev_data_launch_secret, guest_address, guest_len, 683 true, &sev->snp_host_map[0])) 684 goto err; 685 break; 686 case SEV_CMD_DBG_DECRYPT: 687 if (prep_buffer(struct sev_data_dbg, dst_addr, len, false, 688 &sev->snp_host_map[0])) 689 goto err; 690 break; 691 case SEV_CMD_DBG_ENCRYPT: 692 if (prep_buffer(struct sev_data_dbg, dst_addr, len, true, 693 &sev->snp_host_map[0])) 694 goto err; 695 break; 696 case SEV_CMD_ATTESTATION_REPORT: 697 if (prep_buffer(struct sev_data_attestation_report, address, len, 698 false, &sev->snp_host_map[0])) 699 goto err; 700 break; 701 case SEV_CMD_SEND_START: 702 if (prep_buffer(struct sev_data_send_start, session_address, 703 session_len, false, &sev->snp_host_map[0])) 704 goto err; 705 break; 706 case SEV_CMD_SEND_UPDATE_DATA: 707 if (prep_buffer(struct sev_data_send_update_data, hdr_address, hdr_len, 708 false, &sev->snp_host_map[0])) 709 goto err; 710 if (prep_buffer(struct sev_data_send_update_data, trans_address, 711 trans_len, false, &sev->snp_host_map[1])) 712 goto err; 713 break; 714 case SEV_CMD_SEND_UPDATE_VMSA: 715 if (prep_buffer(struct sev_data_send_update_vmsa, hdr_address, hdr_len, 716 false, &sev->snp_host_map[0])) 717 goto err; 718 if (prep_buffer(struct sev_data_send_update_vmsa, trans_address, 719 trans_len, false, &sev->snp_host_map[1])) 720 goto err; 721 break; 722 case SEV_CMD_RECEIVE_UPDATE_DATA: 723 if (prep_buffer(struct sev_data_receive_update_data, guest_address, 724 guest_len, true, &sev->snp_host_map[0])) 725 goto err; 726 break; 727 case SEV_CMD_RECEIVE_UPDATE_VMSA: 728 if (prep_buffer(struct sev_data_receive_update_vmsa, guest_address, 729 guest_len, true, &sev->snp_host_map[0])) 730 goto err; 731 break; 732 default: 733 break; 734 } 735 736 /* The command buffer need to be in the firmware state. */ 737 if (to_fw && sev_legacy_cmd_buf_writable(cmd)) { 738 if (snp_set_rmp_state(__pa(cmd_buf), 1, true, true, false)) 739 return -EFAULT; 740 } 741 742 return 0; 743 744err: 745 return -EINVAL; 746} 747 748static inline bool need_firmware_copy(int cmd) 749{ 750 struct sev_device *sev = psp_master->sev_data; 751 752 /* After SNP is INIT'ed, the behavior of legacy SEV command is changed. */ 753 return ((cmd < SEV_CMD_SNP_INIT) && sev->snp_inited) ? true : false; 754} 755 756static int snp_aware_copy_to_firmware(int cmd, void *data) 757{ 758 return __snp_cmd_buf_copy(cmd, data, true, 0); 759} 760 761static int snp_aware_copy_from_firmware(int cmd, void *data, int fw_err) 762{ 763 return __snp_cmd_buf_copy(cmd, data, false, fw_err); 764} 765 766static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) 767{ 768 struct psp_device *psp = psp_master; 769 struct sev_device *sev; 770 unsigned int phys_lsb, phys_msb; 771 unsigned int reg, ret = 0; 772 void *cmd_buf; 773 int buf_len; 774 775 if (!psp || !psp->sev_data) 776 return -ENODEV; 777 778 if (psp_dead) 779 return -EBUSY; 780 781 sev = psp->sev_data; 782 783 buf_len = sev_cmd_buffer_len(cmd); 784 if (WARN_ON_ONCE(!data != !buf_len)) 785 return -EINVAL; 786 787 /* 788 * Copy the incoming data to driver's scratch buffer as __pa() will not 789 * work for some memory, e.g. vmalloc'd addresses, and @data may not be 790 * physically contiguous. 791 */ 792 if (data) { 793 if (sev->cmd_buf_active > 2) 794 return -EBUSY; 795 796 cmd_buf = sev->cmd_buf_active ? sev->cmd_buf_backup : sev->cmd_buf; 797 798 memcpy(cmd_buf, data, buf_len); 799 sev->cmd_buf_active++; 800 801 /* 802 * The behavior of the SEV-legacy commands is altered when the 803 * SNP firmware is in the INIT state. 804 */ 805 if (need_firmware_copy(cmd) && snp_aware_copy_to_firmware(cmd, sev->cmd_buf)) 806 return -EFAULT; 807 } else { 808 cmd_buf = sev->cmd_buf; 809 } 810 811 /* Get the physical address of the command buffer */ 812 phys_lsb = data ? lower_32_bits(__psp_pa(cmd_buf)) : 0; 813 phys_msb = data ? upper_32_bits(__psp_pa(cmd_buf)) : 0; 814 815 dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", 816 cmd, phys_msb, phys_lsb, psp_timeout); 817 818 print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, 819 buf_len, false); 820 821 iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); 822 iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); 823 824 sev->int_rcvd = 0; 825 826 reg = cmd; 827 reg <<= SEV_CMDRESP_CMD_SHIFT; 828 reg |= SEV_CMDRESP_IOC; 829 iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg); 830 831 /* wait for command completion */ 832 ret = sev_wait_cmd_ioc(sev, ®, psp_timeout); 833 if (ret) { 834 if (psp_ret) 835 *psp_ret = 0; 836 837 dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd); 838 psp_dead = true; 839 840 return ret; 841 } 842 843 psp_timeout = psp_cmd_timeout; 844 845 if (psp_ret) 846 *psp_ret = reg & PSP_CMDRESP_ERR_MASK; 847 848 if (reg & PSP_CMDRESP_ERR_MASK) { 849 dev_dbg(sev->dev, "sev command %#x failed (%#010x)\n", 850 cmd, reg & PSP_CMDRESP_ERR_MASK); 851 ret = -EIO; 852 } else { 853 sev_write_init_ex_file_if_required(cmd); 854 } 855 856 /* 857 * Copy potential output from the PSP back to data. Do this even on 858 * failure in case the caller wants to glean something from the error. 859 */ 860 if (data) { 861 /* 862 * Restore the page state after the command completes. 863 */ 864 if (need_firmware_copy(cmd) && 865 snp_aware_copy_from_firmware(cmd, cmd_buf, ret)) 866 return -EFAULT; 867 868 memcpy(data, cmd_buf, buf_len); 869 sev->cmd_buf_active--; 870 } 871 872 print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data, 873 buf_len, false); 874 875 return ret; 876} 877 878static int sev_do_cmd(int cmd, void *data, int *psp_ret) 879{ 880 int rc; 881 882 mutex_lock(&sev_cmd_mutex); 883 rc = __sev_do_cmd_locked(cmd, data, psp_ret); 884 mutex_unlock(&sev_cmd_mutex); 885 886 return rc; 887} 888 889static int __sev_init_locked(int *error) 890{ 891 struct sev_data_init data; 892 893 memset(&data, 0, sizeof(data)); 894 if (sev_es_tmr) { 895 /* 896 * Do not include the encryption mask on the physical 897 * address of the TMR (firmware should clear it anyway). 898 */ 899 data.tmr_address = __pa(sev_es_tmr); 900 901 data.flags |= SEV_INIT_FLAGS_SEV_ES; 902 data.tmr_len = sev_es_tmr_size; 903 } 904 905 return __sev_do_cmd_locked(SEV_CMD_INIT, &data, error); 906} 907 908static int __sev_init_ex_locked(int *error) 909{ 910 struct sev_data_init_ex data; 911 int ret; 912 913 memset(&data, 0, sizeof(data)); 914 data.length = sizeof(data); 915 data.nv_address = __psp_pa(sev_init_ex_buffer); 916 data.nv_len = NV_LENGTH; 917 918 ret = sev_read_init_ex_file(); 919 if (ret) 920 return ret; 921 922 if (sev_es_tmr) { 923 /* 924 * Do not include the encryption mask on the physical 925 * address of the TMR (firmware should clear it anyway). 926 */ 927 data.tmr_address = __pa(sev_es_tmr); 928 929 data.flags |= SEV_INIT_FLAGS_SEV_ES; 930 data.tmr_len = sev_es_tmr_size; 931 } 932 933 return __sev_do_cmd_locked(SEV_CMD_INIT_EX, &data, error); 934} 935 936static int __sev_platform_init_locked(int *error) 937{ 938 struct psp_device *psp = psp_master; 939 struct sev_device *sev; 940 int rc, psp_ret = -1; 941 int (*init_function)(int *error); 942 943 if (!psp || !psp->sev_data) 944 return -ENODEV; 945 946 sev = psp->sev_data; 947 948 if (sev->state == SEV_STATE_INIT) 949 return 0; 950 951 init_function = sev_init_ex_buffer ? __sev_init_ex_locked : 952 __sev_init_locked; 953 rc = init_function(&psp_ret); 954 if (rc && psp_ret == SEV_RET_SECURE_DATA_INVALID) { 955 /* 956 * Initialization command returned an integrity check failure 957 * status code, meaning that firmware load and validation of SEV 958 * related persistent data has failed. Retrying the 959 * initialization function should succeed by replacing the state 960 * with a reset state. 961 */ 962 dev_err(sev->dev, "SEV: retrying INIT command because of SECURE_DATA_INVALID error. Retrying once to reset PSP SEV state."); 963 rc = init_function(&psp_ret); 964 } 965 if (error) 966 *error = psp_ret; 967 968 if (rc) 969 return rc; 970 971 sev->state = SEV_STATE_INIT; 972 973 /* Prepare for first SEV guest launch after INIT */ 974 wbinvd_on_all_cpus(); 975 rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error); 976 if (rc) 977 return rc; 978 979 dev_dbg(sev->dev, "SEV firmware initialized\n"); 980 981 dev_info(sev->dev, "SEV API:%d.%d build:%d\n", sev->api_major, 982 sev->api_minor, sev->build); 983 984 return 0; 985} 986 987int sev_platform_init(int *error) 988{ 989 int rc; 990 991 mutex_lock(&sev_cmd_mutex); 992 rc = __sev_platform_init_locked(error); 993 mutex_unlock(&sev_cmd_mutex); 994 995 return rc; 996} 997EXPORT_SYMBOL_GPL(sev_platform_init); 998 999static int __sev_platform_shutdown_locked(int *error) 1000{ 1001 struct sev_device *sev = psp_master->sev_data; 1002 int ret; 1003 1004 if (sev->state == SEV_STATE_UNINIT) 1005 return 0; 1006 1007 ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error); 1008 if (ret) 1009 return ret; 1010 1011 sev->state = SEV_STATE_UNINIT; 1012 dev_dbg(sev->dev, "SEV firmware shutdown\n"); 1013 1014 return ret; 1015} 1016 1017static int sev_platform_shutdown(int *error) 1018{ 1019 int rc; 1020 1021 mutex_lock(&sev_cmd_mutex); 1022 rc = __sev_platform_shutdown_locked(NULL); 1023 mutex_unlock(&sev_cmd_mutex); 1024 1025 return rc; 1026} 1027 1028static int sev_get_platform_state(int *state, int *error) 1029{ 1030 struct sev_user_data_status data; 1031 int rc; 1032 1033 rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, error); 1034 if (rc) 1035 return rc; 1036 1037 *state = data.state; 1038 return rc; 1039} 1040 1041static int sev_ioctl_do_reset(struct sev_issue_cmd *argp, bool writable) 1042{ 1043 int state, rc; 1044 1045 if (!writable) 1046 return -EPERM; 1047 1048 /* 1049 * The SEV spec requires that FACTORY_RESET must be issued in 1050 * UNINIT state. Before we go further lets check if any guest is 1051 * active. 1052 * 1053 * If FW is in WORKING state then deny the request otherwise issue 1054 * SHUTDOWN command do INIT -> UNINIT before issuing the FACTORY_RESET. 1055 * 1056 */ 1057 rc = sev_get_platform_state(&state, &argp->error); 1058 if (rc) 1059 return rc; 1060 1061 if (state == SEV_STATE_WORKING) 1062 return -EBUSY; 1063 1064 if (state == SEV_STATE_INIT) { 1065 rc = __sev_platform_shutdown_locked(&argp->error); 1066 if (rc) 1067 return rc; 1068 } 1069 1070 return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, NULL, &argp->error); 1071} 1072 1073static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp) 1074{ 1075 struct sev_user_data_status data; 1076 int ret; 1077 1078 ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, &argp->error); 1079 if (ret) 1080 return ret; 1081 1082 if (copy_to_user((void __user *)argp->data, &data, sizeof(data))) 1083 ret = -EFAULT; 1084 1085 return ret; 1086} 1087 1088static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp, bool writable) 1089{ 1090 struct sev_device *sev = psp_master->sev_data; 1091 int rc; 1092 1093 if (!writable) 1094 return -EPERM; 1095 1096 if (sev->state == SEV_STATE_UNINIT) { 1097 rc = __sev_platform_init_locked(&argp->error); 1098 if (rc) 1099 return rc; 1100 } 1101 1102 return __sev_do_cmd_locked(cmd, NULL, &argp->error); 1103} 1104 1105static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable) 1106{ 1107 struct sev_device *sev = psp_master->sev_data; 1108 struct sev_user_data_pek_csr input; 1109 struct sev_data_pek_csr data; 1110 void __user *input_address; 1111 void *blob = NULL; 1112 int ret; 1113 1114 if (!writable) 1115 return -EPERM; 1116 1117 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) 1118 return -EFAULT; 1119 1120 memset(&data, 0, sizeof(data)); 1121 1122 /* userspace wants to query CSR length */ 1123 if (!input.address || !input.length) 1124 goto cmd; 1125 1126 /* allocate a physically contiguous buffer to store the CSR blob */ 1127 input_address = (void __user *)input.address; 1128 if (input.length > SEV_FW_BLOB_MAX_SIZE) 1129 return -EFAULT; 1130 1131 blob = kmalloc(input.length, GFP_KERNEL); 1132 if (!blob) 1133 return -ENOMEM; 1134 1135 data.address = __psp_pa(blob); 1136 data.len = input.length; 1137 1138cmd: 1139 if (sev->state == SEV_STATE_UNINIT) { 1140 ret = __sev_platform_init_locked(&argp->error); 1141 if (ret) 1142 goto e_free_blob; 1143 } 1144 1145 ret = __sev_do_cmd_locked(SEV_CMD_PEK_CSR, &data, &argp->error); 1146 1147 /* If we query the CSR length, FW responded with expected data. */ 1148 input.length = data.len; 1149 1150 if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) { 1151 ret = -EFAULT; 1152 goto e_free_blob; 1153 } 1154 1155 if (blob) { 1156 if (copy_to_user(input_address, blob, input.length)) 1157 ret = -EFAULT; 1158 } 1159 1160e_free_blob: 1161 kfree(blob); 1162 return ret; 1163} 1164 1165void *psp_copy_user_blob(u64 uaddr, u32 len) 1166{ 1167 if (!uaddr || !len) 1168 return ERR_PTR(-EINVAL); 1169 1170 /* verify that blob length does not exceed our limit */ 1171 if (len > SEV_FW_BLOB_MAX_SIZE) 1172 return ERR_PTR(-EINVAL); 1173 1174 return memdup_user((void __user *)uaddr, len); 1175} 1176EXPORT_SYMBOL_GPL(psp_copy_user_blob); 1177 1178static int sev_get_api_version(void) 1179{ 1180 struct sev_device *sev = psp_master->sev_data; 1181 struct sev_user_data_status status; 1182 int error = 0, ret; 1183 1184 ret = sev_platform_status(&status, &error); 1185 if (ret) { 1186 dev_err(sev->dev, 1187 "SEV: failed to get status. Error: %#x\n", error); 1188 return 1; 1189 } 1190 1191 sev->api_major = status.api_major; 1192 sev->api_minor = status.api_minor; 1193 sev->build = status.build; 1194 sev->state = status.state; 1195 1196 return 0; 1197} 1198 1199static int sev_get_firmware(struct device *dev, 1200 const struct firmware **firmware) 1201{ 1202 char fw_name_specific[SEV_FW_NAME_SIZE]; 1203 char fw_name_subset[SEV_FW_NAME_SIZE]; 1204 1205 snprintf(fw_name_specific, sizeof(fw_name_specific), 1206 "amd/amd_sev_fam%.2xh_model%.2xh.sbin", 1207 boot_cpu_data.x86, boot_cpu_data.x86_model); 1208 1209 snprintf(fw_name_subset, sizeof(fw_name_subset), 1210 "amd/amd_sev_fam%.2xh_model%.1xxh.sbin", 1211 boot_cpu_data.x86, (boot_cpu_data.x86_model & 0xf0) >> 4); 1212 1213 /* Check for SEV FW for a particular model. 1214 * Ex. amd_sev_fam17h_model00h.sbin for Family 17h Model 00h 1215 * 1216 * or 1217 * 1218 * Check for SEV FW common to a subset of models. 1219 * Ex. amd_sev_fam17h_model0xh.sbin for 1220 * Family 17h Model 00h -- Family 17h Model 0Fh 1221 * 1222 * or 1223 * 1224 * Fall-back to using generic name: sev.fw 1225 */ 1226 if ((firmware_request_nowarn(firmware, fw_name_specific, dev) >= 0) || 1227 (firmware_request_nowarn(firmware, fw_name_subset, dev) >= 0) || 1228 (firmware_request_nowarn(firmware, SEV_FW_FILE, dev) >= 0)) 1229 return 0; 1230 1231 return -ENOENT; 1232} 1233 1234/* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */ 1235static int sev_update_firmware(struct device *dev) 1236{ 1237 struct sev_data_download_firmware *data; 1238 const struct firmware *firmware; 1239 int ret, error, order; 1240 struct page *p; 1241 u64 data_size; 1242 1243 if (sev_get_firmware(dev, &firmware) == -ENOENT) { 1244 dev_dbg(dev, "No SEV firmware file present\n"); 1245 return -1; 1246 } 1247 1248 /* 1249 * SEV FW expects the physical address given to it to be 32 1250 * byte aligned. Memory allocated has structure placed at the 1251 * beginning followed by the firmware being passed to the SEV 1252 * FW. Allocate enough memory for data structure + alignment 1253 * padding + SEV FW. 1254 */ 1255 data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32); 1256 1257 order = get_order(firmware->size + data_size); 1258 p = alloc_pages(GFP_KERNEL, order); 1259 if (!p) { 1260 ret = -1; 1261 goto fw_err; 1262 } 1263 1264 /* 1265 * Copy firmware data to a kernel allocated contiguous 1266 * memory region. 1267 */ 1268 data = page_address(p); 1269 memcpy(page_address(p) + data_size, firmware->data, firmware->size); 1270 1271 data->address = __psp_pa(page_address(p) + data_size); 1272 data->len = firmware->size; 1273 1274 ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error); 1275 if (ret) 1276 dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error); 1277 else 1278 dev_info(dev, "SEV firmware update successful\n"); 1279 1280 __free_pages(p, order); 1281 1282fw_err: 1283 release_firmware(firmware); 1284 1285 return ret; 1286} 1287 1288static void snp_set_hsave_pa(void *arg) 1289{ 1290 wrmsrl(MSR_VM_HSAVE_PA, 0); 1291} 1292 1293static int snp_filter_reserved_mem_regions(struct resource *rs, void *arg) 1294{ 1295 struct sev_data_range_list *range_list = arg; 1296 struct sev_data_range *range = &range_list->ranges[range_list->num_elements]; 1297 size_t size; 1298 1299 if ((range_list->num_elements * sizeof(struct sev_data_range) + 1300 sizeof(struct sev_data_range_list)) > PAGE_SIZE) 1301 return -E2BIG; 1302 1303 switch(rs->desc) { 1304 case E820_TYPE_RESERVED: 1305 case E820_TYPE_PMEM: 1306 case E820_TYPE_ACPI: 1307 range->base = rs->start & PAGE_MASK; 1308 size = (rs->end + 1) - rs->start; 1309 range->page_count = size >> PAGE_SHIFT; 1310 range_list->num_elements++; 1311 break; 1312 default: 1313 break; 1314 } 1315 1316 return 0; 1317} 1318 1319static int __sev_snp_init_locked(int *error) 1320{ 1321 struct psp_device *psp = psp_master; 1322 struct sev_data_snp_init_ex data; 1323 struct sev_device *sev; 1324 int rc = 0; 1325 1326 if (!psp || !psp->sev_data) 1327 return -ENODEV; 1328 1329 sev = psp->sev_data; 1330 1331 if (sev->snp_inited) 1332 return 0; 1333 1334 /* 1335 * The SNP_INIT requires the MSR_VM_HSAVE_PA must be set to 0h 1336 * across all cores. 1337 */ 1338 on_each_cpu(snp_set_hsave_pa, NULL, 1); 1339 1340 /* 1341 * Starting in SNP firmware v1.52, the SNP_INIT_EX command takes a list of 1342 * system physical address ranges to convert into the HV-fixed page states 1343 * during the RMP initialization. For instance, the memory that UEFI 1344 * reserves should be included in the range list. This allows system 1345 * components that occasionally write to memory (e.g. logging to UEFI 1346 * reserved regions) to not fail due to RMP initialization and SNP enablement. 1347 */ 1348 if (sev_version_greater_or_equal(SNP_MIN_API_MAJOR, 52)) { 1349 /* 1350 * Firmware checks that the pages containing the ranges enumerated 1351 * in the RANGES structure are either in the Default page state or in the 1352 * firmware page state. 1353 */ 1354 snp_range_list = sev_fw_alloc(PAGE_SIZE); 1355 if (!snp_range_list) { 1356 dev_err(sev->dev, 1357 "SEV: SNP_INIT_EX range list memory allocation failed\n"); 1358 return -ENOMEM; 1359 } 1360 1361 memset(snp_range_list, 0, PAGE_SIZE); 1362 1363 /* 1364 * Retrieve all reserved memory regions setup by UEFI from the e820 memory map 1365 * to be setup as HV-fixed pages. 1366 */ 1367 1368 rc = walk_iomem_res_desc(IORES_DESC_NONE, IORESOURCE_MEM, 0, ~0, snp_range_list, snp_filter_reserved_mem_regions); 1369 if (rc) { 1370 dev_err(sev->dev, 1371 "SEV: SNP_INIT_EX walk_iomem_res_desc failed rc = %d\n", rc); 1372 return rc; 1373 } 1374 1375 memset(&data, 0, sizeof(data)); 1376 data.init_rmp = 1; 1377 data.list_paddr_en = 1; 1378 data.list_paddr = __pa(snp_range_list); 1379 1380 /* Issue the SNP_INIT_EX firmware command. */ 1381 rc = __sev_do_cmd_locked(SEV_CMD_SNP_INIT_EX, &data, error); 1382 if (rc) 1383 return rc; 1384 } else { 1385 /* Issue the SNP_INIT firmware command. */ 1386 rc = __sev_do_cmd_locked(SEV_CMD_SNP_INIT, NULL, error); 1387 if (rc) 1388 return rc; 1389 } 1390 1391 /* Prepare for first SNP guest launch after INIT */ 1392 wbinvd_on_all_cpus(); 1393 rc = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, error); 1394 if (rc) 1395 return rc; 1396 1397 sev->snp_inited = true; 1398 dev_dbg(sev->dev, "SEV-SNP firmware initialized\n"); 1399 1400 sev_es_tmr_size = SEV_SNP_ES_TMR_SIZE; 1401 1402 return rc; 1403} 1404 1405int sev_snp_init(int *error) 1406{ 1407 int rc; 1408 1409 if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP)) 1410 return -ENODEV; 1411 1412 mutex_lock(&sev_cmd_mutex); 1413 rc = __sev_snp_init_locked(error); 1414 mutex_unlock(&sev_cmd_mutex); 1415 1416 return rc; 1417} 1418EXPORT_SYMBOL_GPL(sev_snp_init); 1419 1420static int __sev_snp_shutdown_locked(int *error) 1421{ 1422 struct sev_device *sev = psp_master->sev_data; 1423 struct sev_data_snp_shutdown_ex data; 1424 int ret; 1425 1426 if (!sev->snp_inited) 1427 return 0; 1428 1429 memset(&data, 0, sizeof(data)); 1430 data.length = sizeof(data); 1431 data.iommu_snp_shutdown = 1; 1432 1433 /* Free the memory used for caching the certificate data */ 1434 kfree(sev->snp_certs_data); 1435 sev->snp_certs_data = NULL; 1436 1437 /* SHUTDOWN requires the DF_FLUSH */ 1438 wbinvd_on_all_cpus(); 1439 __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, NULL); 1440 1441 ret = __sev_do_cmd_locked(SEV_CMD_SNP_SHUTDOWN_EX, &data, error); 1442 if (ret) { 1443 dev_err(sev->dev, "SEV-SNP firmware shutdown failed\n"); 1444 return ret; 1445 } 1446 1447 sev->snp_inited = false; 1448 dev_dbg(sev->dev, "SEV-SNP firmware shutdown\n"); 1449 1450 return ret; 1451} 1452 1453static int sev_snp_shutdown(int *error) 1454{ 1455 int rc; 1456 1457 mutex_lock(&sev_cmd_mutex); 1458 rc = __sev_snp_shutdown_locked(NULL); 1459 mutex_unlock(&sev_cmd_mutex); 1460 1461 return rc; 1462} 1463 1464static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable) 1465{ 1466 struct sev_device *sev = psp_master->sev_data; 1467 struct sev_user_data_pek_cert_import input; 1468 struct sev_data_pek_cert_import data; 1469 void *pek_blob, *oca_blob; 1470 int ret; 1471 1472 if (!writable) 1473 return -EPERM; 1474 1475 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) 1476 return -EFAULT; 1477 1478 /* copy PEK certificate blobs from userspace */ 1479 pek_blob = psp_copy_user_blob(input.pek_cert_address, input.pek_cert_len); 1480 if (IS_ERR(pek_blob)) 1481 return PTR_ERR(pek_blob); 1482 1483 data.reserved = 0; 1484 data.pek_cert_address = __psp_pa(pek_blob); 1485 data.pek_cert_len = input.pek_cert_len; 1486 1487 /* copy PEK certificate blobs from userspace */ 1488 oca_blob = psp_copy_user_blob(input.oca_cert_address, input.oca_cert_len); 1489 if (IS_ERR(oca_blob)) { 1490 ret = PTR_ERR(oca_blob); 1491 goto e_free_pek; 1492 } 1493 1494 data.oca_cert_address = __psp_pa(oca_blob); 1495 data.oca_cert_len = input.oca_cert_len; 1496 1497 /* If platform is not in INIT state then transition it to INIT */ 1498 if (sev->state != SEV_STATE_INIT) { 1499 ret = __sev_platform_init_locked(&argp->error); 1500 if (ret) 1501 goto e_free_oca; 1502 } 1503 1504 ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, &data, &argp->error); 1505 1506e_free_oca: 1507 kfree(oca_blob); 1508e_free_pek: 1509 kfree(pek_blob); 1510 return ret; 1511} 1512 1513static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp) 1514{ 1515 struct sev_user_data_get_id2 input; 1516 struct sev_data_get_id data; 1517 void __user *input_address; 1518 void *id_blob = NULL; 1519 int ret; 1520 1521 /* SEV GET_ID is available from SEV API v0.16 and up */ 1522 if (!sev_version_greater_or_equal(0, 16)) 1523 return -ENOTSUPP; 1524 1525 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) 1526 return -EFAULT; 1527 1528 input_address = (void __user *)input.address; 1529 1530 if (input.address && input.length) { 1531 id_blob = kmalloc(input.length, GFP_KERNEL); 1532 if (!id_blob) 1533 return -ENOMEM; 1534 1535 data.address = __psp_pa(id_blob); 1536 data.len = input.length; 1537 } else { 1538 data.address = 0; 1539 data.len = 0; 1540 } 1541 1542 ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, &data, &argp->error); 1543 1544 /* 1545 * Firmware will return the length of the ID value (either the minimum 1546 * required length or the actual length written), return it to the user. 1547 */ 1548 input.length = data.len; 1549 1550 if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) { 1551 ret = -EFAULT; 1552 goto e_free; 1553 } 1554 1555 if (id_blob) { 1556 if (copy_to_user(input_address, id_blob, data.len)) { 1557 ret = -EFAULT; 1558 goto e_free; 1559 } 1560 } 1561 1562e_free: 1563 kfree(id_blob); 1564 1565 return ret; 1566} 1567 1568static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp) 1569{ 1570 struct sev_data_get_id *data; 1571 u64 data_size, user_size; 1572 void *id_blob, *mem; 1573 int ret; 1574 1575 /* SEV GET_ID available from SEV API v0.16 and up */ 1576 if (!sev_version_greater_or_equal(0, 16)) 1577 return -ENOTSUPP; 1578 1579 /* SEV FW expects the buffer it fills with the ID to be 1580 * 8-byte aligned. Memory allocated should be enough to 1581 * hold data structure + alignment padding + memory 1582 * where SEV FW writes the ID. 1583 */ 1584 data_size = ALIGN(sizeof(struct sev_data_get_id), 8); 1585 user_size = sizeof(struct sev_user_data_get_id); 1586 1587 mem = kzalloc(data_size + user_size, GFP_KERNEL); 1588 if (!mem) 1589 return -ENOMEM; 1590 1591 data = mem; 1592 id_blob = mem + data_size; 1593 1594 data->address = __psp_pa(id_blob); 1595 data->len = user_size; 1596 1597 ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error); 1598 if (!ret) { 1599 if (copy_to_user((void __user *)argp->data, id_blob, data->len)) 1600 ret = -EFAULT; 1601 } 1602 1603 kfree(mem); 1604 1605 return ret; 1606} 1607 1608static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable) 1609{ 1610 struct sev_device *sev = psp_master->sev_data; 1611 struct sev_user_data_pdh_cert_export input; 1612 void *pdh_blob = NULL, *cert_blob = NULL; 1613 struct sev_data_pdh_cert_export data; 1614 void __user *input_cert_chain_address; 1615 void __user *input_pdh_cert_address; 1616 int ret; 1617 1618 /* If platform is not in INIT state then transition it to INIT. */ 1619 if (sev->state != SEV_STATE_INIT) { 1620 if (!writable) 1621 return -EPERM; 1622 1623 ret = __sev_platform_init_locked(&argp->error); 1624 if (ret) 1625 return ret; 1626 } 1627 1628 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) 1629 return -EFAULT; 1630 1631 memset(&data, 0, sizeof(data)); 1632 1633 /* Userspace wants to query the certificate length. */ 1634 if (!input.pdh_cert_address || 1635 !input.pdh_cert_len || 1636 !input.cert_chain_address) 1637 goto cmd; 1638 1639 input_pdh_cert_address = (void __user *)input.pdh_cert_address; 1640 input_cert_chain_address = (void __user *)input.cert_chain_address; 1641 1642 /* Allocate a physically contiguous buffer to store the PDH blob. */ 1643 if (input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) 1644 return -EFAULT; 1645 1646 /* Allocate a physically contiguous buffer to store the cert chain blob. */ 1647 if (input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) 1648 return -EFAULT; 1649 1650 pdh_blob = kmalloc(input.pdh_cert_len, GFP_KERNEL); 1651 if (!pdh_blob) 1652 return -ENOMEM; 1653 1654 data.pdh_cert_address = __psp_pa(pdh_blob); 1655 data.pdh_cert_len = input.pdh_cert_len; 1656 1657 cert_blob = kmalloc(input.cert_chain_len, GFP_KERNEL); 1658 if (!cert_blob) { 1659 ret = -ENOMEM; 1660 goto e_free_pdh; 1661 } 1662 1663 data.cert_chain_address = __psp_pa(cert_blob); 1664 data.cert_chain_len = input.cert_chain_len; 1665 1666cmd: 1667 ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, &data, &argp->error); 1668 1669 /* If we query the length, FW responded with expected data. */ 1670 input.cert_chain_len = data.cert_chain_len; 1671 input.pdh_cert_len = data.pdh_cert_len; 1672 1673 if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) { 1674 ret = -EFAULT; 1675 goto e_free_cert; 1676 } 1677 1678 if (pdh_blob) { 1679 if (copy_to_user(input_pdh_cert_address, 1680 pdh_blob, input.pdh_cert_len)) { 1681 ret = -EFAULT; 1682 goto e_free_cert; 1683 } 1684 } 1685 1686 if (cert_blob) { 1687 if (copy_to_user(input_cert_chain_address, 1688 cert_blob, input.cert_chain_len)) 1689 ret = -EFAULT; 1690 } 1691 1692e_free_cert: 1693 kfree(cert_blob); 1694e_free_pdh: 1695 kfree(pdh_blob); 1696 return ret; 1697} 1698 1699static int sev_ioctl_snp_platform_status(struct sev_issue_cmd *argp) 1700{ 1701 struct sev_device *sev = psp_master->sev_data; 1702 struct sev_data_snp_platform_status_buf buf; 1703 struct page *status_page; 1704 void *data; 1705 int ret; 1706 1707 if (!sev->snp_inited || !argp->data) 1708 return -EINVAL; 1709 1710 status_page = alloc_page(GFP_KERNEL_ACCOUNT); 1711 if (!status_page) 1712 return -ENOMEM; 1713 1714 data = page_address(status_page); 1715 if (snp_set_rmp_state(__pa(data), 1, true, true, false)) { 1716 __free_pages(status_page, 0); 1717 return -EFAULT; 1718 } 1719 1720 buf.status_paddr = __psp_pa(data); 1721 ret = __sev_do_cmd_locked(SEV_CMD_SNP_PLATFORM_STATUS, &buf, &argp->error); 1722 1723 /* Change the page state before accessing it */ 1724 if (snp_set_rmp_state(__pa(data), 1, false, true, true)) { 1725 snp_leak_pages(__pa(data) >> PAGE_SHIFT, 1); 1726 return -EFAULT; 1727 } 1728 1729 if (ret) 1730 goto cleanup; 1731 1732 if (copy_to_user((void __user *)argp->data, data, 1733 sizeof(struct sev_user_data_snp_status))) 1734 ret = -EFAULT; 1735 1736cleanup: 1737 __free_pages(status_page, 0); 1738 return ret; 1739} 1740 1741static int sev_ioctl_snp_get_config(struct sev_issue_cmd *argp) 1742{ 1743 struct sev_device *sev = psp_master->sev_data; 1744 struct sev_user_data_ext_snp_config input; 1745 int ret; 1746 1747 if (!sev->snp_inited || !argp->data) 1748 return -EINVAL; 1749 1750 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) 1751 return -EFAULT; 1752 1753 /* Copy the TCB version programmed through the SET_CONFIG to userspace */ 1754 if (input.config_address) { 1755 if (copy_to_user((void * __user)input.config_address, 1756 &sev->snp_config, sizeof(struct sev_user_data_snp_config))) 1757 return -EFAULT; 1758 } 1759 1760 /* Copy the extended certs programmed through the SNP_SET_CONFIG */ 1761 if (input.certs_address && sev->snp_certs_data) { 1762 if (input.certs_len < sev->snp_certs_len) { 1763 /* Return the certs length to userspace */ 1764 input.certs_len = sev->snp_certs_len; 1765 1766 ret = -ENOSR; 1767 goto e_done; 1768 } 1769 1770 if (copy_to_user((void * __user)input.certs_address, 1771 sev->snp_certs_data, sev->snp_certs_len)) 1772 return -EFAULT; 1773 } 1774 1775 ret = 0; 1776 1777e_done: 1778 if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) 1779 ret = -EFAULT; 1780 1781 return ret; 1782} 1783 1784static int sev_ioctl_snp_set_config(struct sev_issue_cmd *argp, bool writable) 1785{ 1786 struct sev_device *sev = psp_master->sev_data; 1787 struct sev_user_data_ext_snp_config input; 1788 struct sev_user_data_snp_config config; 1789 void *certs = NULL; 1790 int ret = 0; 1791 1792 if (!sev->snp_inited || !argp->data) 1793 return -EINVAL; 1794 1795 if (!writable) 1796 return -EPERM; 1797 1798 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) 1799 return -EFAULT; 1800 1801 /* Copy the certs from userspace */ 1802 if (input.certs_address) { 1803 if (!input.certs_len || !IS_ALIGNED(input.certs_len, PAGE_SIZE)) 1804 return -EINVAL; 1805 1806 certs = psp_copy_user_blob(input.certs_address, input.certs_len); 1807 if (IS_ERR(certs)) 1808 return PTR_ERR(certs); 1809 } 1810 1811 /* Issue the PSP command to update the TCB version using the SNP_CONFIG. */ 1812 if (input.config_address) { 1813 if (copy_from_user(&config, 1814 (void __user *)input.config_address, sizeof(config))) { 1815 ret = -EFAULT; 1816 goto e_free; 1817 } 1818 1819 ret = __sev_do_cmd_locked(SEV_CMD_SNP_CONFIG, &config, &argp->error); 1820 if (ret) 1821 goto e_free; 1822 1823 memcpy(&sev->snp_config, &config, sizeof(config)); 1824 } 1825 1826 /* 1827 * If the new certs are passed then cache it else free the old certs. 1828 */ 1829 if (certs) { 1830 kfree(sev->snp_certs_data); 1831 sev->snp_certs_data = certs; 1832 sev->snp_certs_len = input.certs_len; 1833 } else { 1834 kfree(sev->snp_certs_data); 1835 sev->snp_certs_data = NULL; 1836 sev->snp_certs_len = 0; 1837 } 1838 1839 return 0; 1840 1841e_free: 1842 kfree(certs); 1843 return ret; 1844} 1845 1846static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) 1847{ 1848 void __user *argp = (void __user *)arg; 1849 struct sev_issue_cmd input; 1850 int ret = -EFAULT; 1851 bool writable = file->f_mode & FMODE_WRITE; 1852 1853 if (!psp_master || !psp_master->sev_data) 1854 return -ENODEV; 1855 1856 if (ioctl != SEV_ISSUE_CMD) 1857 return -EINVAL; 1858 1859 if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd))) 1860 return -EFAULT; 1861 1862 if (input.cmd > SEV_MAX) 1863 return -EINVAL; 1864 1865 mutex_lock(&sev_cmd_mutex); 1866 1867 switch (input.cmd) { 1868 1869 case SEV_FACTORY_RESET: 1870 ret = sev_ioctl_do_reset(&input, writable); 1871 break; 1872 case SEV_PLATFORM_STATUS: 1873 ret = sev_ioctl_do_platform_status(&input); 1874 break; 1875 case SEV_PEK_GEN: 1876 ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input, writable); 1877 break; 1878 case SEV_PDH_GEN: 1879 ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input, writable); 1880 break; 1881 case SEV_PEK_CSR: 1882 ret = sev_ioctl_do_pek_csr(&input, writable); 1883 break; 1884 case SEV_PEK_CERT_IMPORT: 1885 ret = sev_ioctl_do_pek_import(&input, writable); 1886 break; 1887 case SEV_PDH_CERT_EXPORT: 1888 ret = sev_ioctl_do_pdh_export(&input, writable); 1889 break; 1890 case SEV_GET_ID: 1891 pr_warn_once("SEV_GET_ID command is deprecated, use SEV_GET_ID2\n"); 1892 ret = sev_ioctl_do_get_id(&input); 1893 break; 1894 case SEV_GET_ID2: 1895 ret = sev_ioctl_do_get_id2(&input); 1896 break; 1897 case SNP_PLATFORM_STATUS: 1898 ret = sev_ioctl_snp_platform_status(&input); 1899 break; 1900 case SNP_SET_EXT_CONFIG: 1901 ret = sev_ioctl_snp_set_config(&input, writable); 1902 break; 1903 case SNP_GET_EXT_CONFIG: 1904 ret = sev_ioctl_snp_get_config(&input); 1905 break; 1906 default: 1907 ret = -EINVAL; 1908 goto out; 1909 } 1910 1911 if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd))) 1912 ret = -EFAULT; 1913out: 1914 mutex_unlock(&sev_cmd_mutex); 1915 1916 return ret; 1917} 1918 1919static const struct file_operations sev_fops = { 1920 .owner = THIS_MODULE, 1921 .unlocked_ioctl = sev_ioctl, 1922}; 1923 1924int sev_platform_status(struct sev_user_data_status *data, int *error) 1925{ 1926 return sev_do_cmd(SEV_CMD_PLATFORM_STATUS, data, error); 1927} 1928EXPORT_SYMBOL_GPL(sev_platform_status); 1929 1930int sev_guest_deactivate(struct sev_data_deactivate *data, int *error) 1931{ 1932 return sev_do_cmd(SEV_CMD_DEACTIVATE, data, error); 1933} 1934EXPORT_SYMBOL_GPL(sev_guest_deactivate); 1935 1936int sev_guest_activate(struct sev_data_activate *data, int *error) 1937{ 1938 return sev_do_cmd(SEV_CMD_ACTIVATE, data, error); 1939} 1940EXPORT_SYMBOL_GPL(sev_guest_activate); 1941 1942int sev_guest_decommission(struct sev_data_decommission *data, int *error) 1943{ 1944 return sev_do_cmd(SEV_CMD_DECOMMISSION, data, error); 1945} 1946EXPORT_SYMBOL_GPL(sev_guest_decommission); 1947 1948int sev_guest_df_flush(int *error) 1949{ 1950 return sev_do_cmd(SEV_CMD_DF_FLUSH, NULL, error); 1951} 1952EXPORT_SYMBOL_GPL(sev_guest_df_flush); 1953 1954int snp_guest_decommission(struct sev_data_snp_decommission *data, int *error) 1955{ 1956 return sev_do_cmd(SEV_CMD_SNP_DECOMMISSION, data, error); 1957} 1958EXPORT_SYMBOL_GPL(snp_guest_decommission); 1959 1960int snp_guest_df_flush(int *error) 1961{ 1962 return sev_do_cmd(SEV_CMD_SNP_DF_FLUSH, NULL, error); 1963} 1964EXPORT_SYMBOL_GPL(snp_guest_df_flush); 1965 1966int snp_guest_page_reclaim(struct sev_data_snp_page_reclaim *data, int *error) 1967{ 1968 return sev_do_cmd(SEV_CMD_SNP_PAGE_RECLAIM, data, error); 1969} 1970EXPORT_SYMBOL_GPL(snp_guest_page_reclaim); 1971 1972int snp_guest_dbg_decrypt_page(u64 gctx_pfn, u64 src_pfn, u64 dst_pfn, int *error) 1973{ 1974 struct sev_data_snp_dbg data = {0}; 1975 struct sev_device *sev; 1976 int ret; 1977 1978 if (!psp_master || !psp_master->sev_data) 1979 return -ENODEV; 1980 1981 sev = psp_master->sev_data; 1982 1983 if (!sev->snp_inited) 1984 return -EINVAL; 1985 1986 data.gctx_paddr = sme_me_mask | (gctx_pfn << PAGE_SHIFT); 1987 data.src_addr = sme_me_mask | (src_pfn << PAGE_SHIFT); 1988 data.dst_addr = sme_me_mask | (dst_pfn << PAGE_SHIFT); 1989 data.len = PAGE_SIZE; 1990 1991 /* The destination page must be in the firmware state. */ 1992 if (snp_set_rmp_state(data.dst_addr, 1, true, false, false)) 1993 return -EIO; 1994 1995 ret = sev_do_cmd(SEV_CMD_SNP_DBG_DECRYPT, &data, error); 1996 1997 /* Restore the page state */ 1998 if (snp_set_rmp_state(data.dst_addr, 1, false, false, true)) 1999 ret = -EIO; 2000 2001 return ret; 2002} 2003EXPORT_SYMBOL_GPL(snp_guest_dbg_decrypt_page); 2004 2005int snp_guest_ext_guest_request(struct sev_data_snp_guest_request *data, 2006 unsigned long vaddr, unsigned long *npages, unsigned long *fw_err) 2007{ 2008 unsigned long expected_npages; 2009 struct sev_device *sev; 2010 int rc; 2011 2012 if (!psp_master || !psp_master->sev_data) 2013 return -ENODEV; 2014 2015 sev = psp_master->sev_data; 2016 2017 if (!sev->snp_inited) 2018 return -EINVAL; 2019 2020 /* 2021 * Check if there is enough space to copy the certificate chain. Otherwise 2022 * return ERROR code defined in the GHCB specification. 2023 */ 2024 expected_npages = sev->snp_certs_len >> PAGE_SHIFT; 2025 if (*npages < expected_npages) { 2026 *npages = expected_npages; 2027 *fw_err = SNP_GUEST_REQ_INVALID_LEN; 2028 return -EINVAL; 2029 } 2030 2031 rc = sev_do_cmd(SEV_CMD_SNP_GUEST_REQUEST, data, (int *)&fw_err); 2032 if (rc) 2033 return rc; 2034 2035 /* Copy the certificate blob */ 2036 if (sev->snp_certs_data) { 2037 *npages = expected_npages; 2038 memcpy((void *)vaddr, sev->snp_certs_data, *npages << PAGE_SHIFT); 2039 } else { 2040 *npages = 0; 2041 } 2042 2043 return rc; 2044} 2045EXPORT_SYMBOL_GPL(snp_guest_ext_guest_request); 2046 2047static void sev_exit(struct kref *ref) 2048{ 2049 misc_deregister(&misc_dev->misc); 2050 kfree(misc_dev); 2051 misc_dev = NULL; 2052} 2053 2054static int sev_misc_init(struct sev_device *sev) 2055{ 2056 struct device *dev = sev->dev; 2057 int ret; 2058 2059 /* 2060 * SEV feature support can be detected on multiple devices but the SEV 2061 * FW commands must be issued on the master. During probe, we do not 2062 * know the master hence we create /dev/sev on the first device probe. 2063 * sev_do_cmd() finds the right master device to which to issue the 2064 * command to the firmware. 2065 */ 2066 if (!misc_dev) { 2067 struct miscdevice *misc; 2068 2069 misc_dev = kzalloc(sizeof(*misc_dev), GFP_KERNEL); 2070 if (!misc_dev) 2071 return -ENOMEM; 2072 2073 misc = &misc_dev->misc; 2074 misc->minor = MISC_DYNAMIC_MINOR; 2075 misc->name = DEVICE_NAME; 2076 misc->fops = &sev_fops; 2077 2078 ret = misc_register(misc); 2079 if (ret) 2080 return ret; 2081 2082 kref_init(&misc_dev->refcount); 2083 } else { 2084 kref_get(&misc_dev->refcount); 2085 } 2086 2087 init_waitqueue_head(&sev->int_queue); 2088 sev->misc = misc_dev; 2089 dev_dbg(dev, "registered SEV device\n"); 2090 2091 return 0; 2092} 2093 2094int sev_dev_init(struct psp_device *psp) 2095{ 2096 struct device *dev = psp->dev; 2097 struct sev_device *sev; 2098 int ret = -ENOMEM; 2099 2100 if (!boot_cpu_has(X86_FEATURE_SEV)) { 2101 dev_info_once(dev, "SEV: memory encryption not enabled by BIOS\n"); 2102 return 0; 2103 } 2104 2105 sev = devm_kzalloc(dev, sizeof(*sev), GFP_KERNEL); 2106 if (!sev) 2107 goto e_err; 2108 2109 sev->cmd_buf = (void *)devm_get_free_pages(dev, GFP_KERNEL, 1); 2110 if (!sev->cmd_buf) 2111 goto e_sev; 2112 2113 sev->cmd_buf_backup = (uint8_t *)sev->cmd_buf + PAGE_SIZE; 2114 2115 psp->sev_data = sev; 2116 2117 sev->dev = dev; 2118 sev->psp = psp; 2119 2120 sev->io_regs = psp->io_regs; 2121 2122 sev->vdata = (struct sev_vdata *)psp->vdata->sev; 2123 if (!sev->vdata) { 2124 ret = -ENODEV; 2125 dev_err(dev, "sev: missing driver data\n"); 2126 goto e_buf; 2127 } 2128 2129 psp_set_sev_irq_handler(psp, sev_irq_handler, sev); 2130 2131 ret = sev_misc_init(sev); 2132 if (ret) 2133 goto e_irq; 2134 2135 dev_notice(dev, "sev enabled\n"); 2136 2137 return 0; 2138 2139e_irq: 2140 psp_clear_sev_irq_handler(psp); 2141e_buf: 2142 devm_free_pages(dev, (unsigned long)sev->cmd_buf); 2143e_sev: 2144 devm_kfree(dev, sev); 2145e_err: 2146 psp->sev_data = NULL; 2147 2148 dev_notice(dev, "sev initialization failed\n"); 2149 2150 return ret; 2151} 2152 2153static void sev_firmware_shutdown(struct sev_device *sev) 2154{ 2155 sev_platform_shutdown(NULL); 2156 2157 if (sev_es_tmr) { 2158 /* The TMR area was encrypted, flush it from the cache */ 2159 wbinvd_on_all_cpus(); 2160 2161 __snp_free_firmware_pages(virt_to_page(sev_es_tmr), 2162 get_order(sev_es_tmr_size), 2163 false); 2164 sev_es_tmr = NULL; 2165 } 2166 2167 if (sev_init_ex_buffer) { 2168 free_pages((unsigned long)sev_init_ex_buffer, 2169 get_order(NV_LENGTH)); 2170 sev_init_ex_buffer = NULL; 2171 } 2172 2173 if (snp_range_list) { 2174 free_pages((unsigned long)snp_range_list, 2175 get_order(PAGE_SIZE)); 2176 snp_range_list = NULL; 2177 } 2178 2179 /* 2180 * The host map need to clear the immutable bit so it must be free'd before the 2181 * SNP firmware shutdown. 2182 */ 2183 free_snp_host_map(sev); 2184 2185 sev_snp_shutdown(NULL); 2186} 2187 2188void sev_dev_destroy(struct psp_device *psp) 2189{ 2190 struct sev_device *sev = psp->sev_data; 2191 2192 if (!sev) 2193 return; 2194 2195 sev_firmware_shutdown(sev); 2196 2197 if (sev->misc) 2198 kref_put(&misc_dev->refcount, sev_exit); 2199 2200 psp_clear_sev_irq_handler(psp); 2201} 2202 2203int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd, 2204 void *data, int *error) 2205{ 2206 if (!filep || filep->f_op != &sev_fops) 2207 return -EBADF; 2208 2209 return sev_do_cmd(cmd, data, error); 2210} 2211EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user); 2212 2213void sev_pci_init(void) 2214{ 2215 struct sev_device *sev = psp_master->sev_data; 2216 int error, rc; 2217 2218 if (!sev) 2219 return; 2220 2221 psp_timeout = psp_probe_timeout; 2222 2223 if (sev_get_api_version()) 2224 goto err; 2225 2226 if (sev_version_greater_or_equal(0, 15) && 2227 sev_update_firmware(sev->dev) == 0) 2228 sev_get_api_version(); 2229 2230 /* If an init_ex_path is provided rely on INIT_EX for PSP initialization 2231 * instead of INIT. 2232 */ 2233 if (init_ex_path) { 2234 sev_init_ex_buffer = sev_fw_alloc(NV_LENGTH); 2235 if (!sev_init_ex_buffer) { 2236 dev_err(sev->dev, 2237 "SEV: INIT_EX NV memory allocation failed\n"); 2238 goto err; 2239 } 2240 } 2241 2242 /* 2243 * If boot CPU supports the SNP, then first attempt to initialize 2244 * the SNP firmware. 2245 */ 2246 if (cpu_feature_enabled(X86_FEATURE_SEV_SNP)) { 2247 if (!sev_version_greater_or_equal(SNP_MIN_API_MAJOR, SNP_MIN_API_MINOR)) { 2248 dev_err(sev->dev, "SEV-SNP support requires firmware version >= %d:%d\n", 2249 SNP_MIN_API_MAJOR, SNP_MIN_API_MINOR); 2250 } else { 2251 rc = sev_snp_init(&error); 2252 if (rc) { 2253 /* 2254 * If we failed to INIT SNP then don't abort the probe. 2255 * Continue to initialize the legacy SEV firmware. 2256 */ 2257 dev_err(sev->dev, "SEV-SNP: failed to INIT error %#x\n", error); 2258 } 2259 } 2260 2261 /* 2262 * Allocate the intermediate buffers used for the legacy command handling. 2263 */ 2264 if (alloc_snp_host_map(sev)) { 2265 dev_notice(sev->dev, "Failed to alloc host map (disabling legacy SEV)\n"); 2266 goto skip_legacy; 2267 } 2268 } 2269 2270 /* Obtain the TMR memory area for SEV-ES use */ 2271 sev_es_tmr = sev_fw_alloc(sev_es_tmr_size); 2272 if (!sev_es_tmr) 2273 dev_warn(sev->dev, 2274 "SEV: TMR allocation failed, SEV-ES support unavailable\n"); 2275 2276 if (!psp_init_on_probe) 2277 return; 2278 2279 /* Initialize the platform */ 2280 rc = sev_platform_init(&error); 2281 if (rc) 2282 dev_err(sev->dev, "SEV: failed to INIT error %#x, rc %d\n", 2283 error, rc); 2284 2285skip_legacy: 2286 dev_info(sev->dev, "SEV%s API:%d.%d build:%d\n", sev->snp_inited ? 2287 "-SNP" : "", sev->api_major, sev->api_minor, sev->build); 2288 2289 return; 2290 2291err: 2292 free_snp_host_map(sev); 2293 psp_master->sev_data = NULL; 2294} 2295 2296void sev_pci_exit(void) 2297{ 2298 struct sev_device *sev = psp_master->sev_data; 2299 2300 if (!sev) 2301 return; 2302 2303 sev_firmware_shutdown(sev); 2304}