xen_pt_msi.c (18418B)
1/* 2 * Copyright (c) 2007, Intel Corporation. 3 * 4 * This work is licensed under the terms of the GNU GPL, version 2. See 5 * the COPYING file in the top-level directory. 6 * 7 * Jiang Yunhong <yunhong.jiang@intel.com> 8 * 9 * This file implements direct PCI assignment to a HVM guest 10 */ 11 12#include "qemu/osdep.h" 13 14#include "hw/xen/xen-legacy-backend.h" 15#include "xen_pt.h" 16#include "hw/i386/apic-msidef.h" 17 18 19#define XEN_PT_AUTO_ASSIGN -1 20 21/* shift count for gflags */ 22#define XEN_PT_GFLAGS_SHIFT_DEST_ID 0 23#define XEN_PT_GFLAGS_SHIFT_RH 8 24#define XEN_PT_GFLAGS_SHIFT_DM 9 25#define XEN_PT_GFLAGSSHIFT_DELIV_MODE 12 26#define XEN_PT_GFLAGSSHIFT_TRG_MODE 15 27#define XEN_PT_GFLAGSSHIFT_UNMASKED 16 28 29#define latch(fld) latch[PCI_MSIX_ENTRY_##fld / sizeof(uint32_t)] 30 31/* 32 * Helpers 33 */ 34 35static inline uint8_t msi_vector(uint32_t data) 36{ 37 return (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT; 38} 39 40static inline uint8_t msi_dest_id(uint32_t addr) 41{ 42 return (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT; 43} 44 45static inline uint32_t msi_ext_dest_id(uint32_t addr_hi) 46{ 47 return addr_hi & 0xffffff00; 48} 49 50static uint32_t msi_gflags(uint32_t data, uint64_t addr) 51{ 52 uint32_t result = 0; 53 int rh, dm, dest_id, deliv_mode, trig_mode; 54 55 rh = (addr >> MSI_ADDR_REDIRECTION_SHIFT) & 0x1; 56 dm = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1; 57 dest_id = msi_dest_id(addr); 58 deliv_mode = (data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x7; 59 trig_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1; 60 61 result = dest_id | (rh << XEN_PT_GFLAGS_SHIFT_RH) 62 | (dm << XEN_PT_GFLAGS_SHIFT_DM) 63 | (deliv_mode << XEN_PT_GFLAGSSHIFT_DELIV_MODE) 64 | (trig_mode << XEN_PT_GFLAGSSHIFT_TRG_MODE); 65 66 return result; 67} 68 69static inline uint64_t msi_addr64(XenPTMSI *msi) 70{ 71 return (uint64_t)msi->addr_hi << 32 | msi->addr_lo; 72} 73 74static int msi_msix_enable(XenPCIPassthroughState *s, 75 uint32_t address, 76 uint16_t flag, 77 bool enable) 78{ 79 uint16_t val = 0; 80 int rc; 81 82 if (!address) { 83 return -1; 84 } 85 86 rc = xen_host_pci_get_word(&s->real_device, address, &val); 87 if (rc) { 88 XEN_PT_ERR(&s->dev, "Failed to read MSI/MSI-X register (0x%x), rc:%d\n", 89 address, rc); 90 return rc; 91 } 92 if (enable) { 93 val |= flag; 94 } else { 95 val &= ~flag; 96 } 97 rc = xen_host_pci_set_word(&s->real_device, address, val); 98 if (rc) { 99 XEN_PT_ERR(&s->dev, "Failed to write MSI/MSI-X register (0x%x), rc:%d\n", 100 address, rc); 101 } 102 return rc; 103} 104 105static int msi_msix_setup(XenPCIPassthroughState *s, 106 uint64_t addr, 107 uint32_t data, 108 int *ppirq, 109 bool is_msix, 110 int msix_entry, 111 bool is_not_mapped) 112{ 113 uint8_t gvec = msi_vector(data); 114 int rc = 0; 115 116 assert((!is_msix && msix_entry == 0) || is_msix); 117 118 if (xen_is_pirq_msi(data)) { 119 *ppirq = msi_ext_dest_id(addr >> 32) | msi_dest_id(addr); 120 if (!*ppirq) { 121 /* this probably identifies an misconfiguration of the guest, 122 * try the emulated path */ 123 *ppirq = XEN_PT_UNASSIGNED_PIRQ; 124 } else { 125 XEN_PT_LOG(&s->dev, "requested pirq %d for MSI%s" 126 " (vec: 0x%x, entry: 0x%x)\n", 127 *ppirq, is_msix ? "-X" : "", gvec, msix_entry); 128 } 129 } 130 131 if (is_not_mapped) { 132 uint64_t table_base = 0; 133 134 if (is_msix) { 135 table_base = s->msix->table_base; 136 } 137 138 rc = xc_physdev_map_pirq_msi(xen_xc, xen_domid, XEN_PT_AUTO_ASSIGN, 139 ppirq, PCI_DEVFN(s->real_device.dev, 140 s->real_device.func), 141 s->real_device.bus, 142 msix_entry, table_base); 143 if (rc) { 144 XEN_PT_ERR(&s->dev, 145 "Mapping of MSI%s (err: %i, vec: 0x%x, entry 0x%x)\n", 146 is_msix ? "-X" : "", errno, gvec, msix_entry); 147 return rc; 148 } 149 } 150 151 return 0; 152} 153static int msi_msix_update(XenPCIPassthroughState *s, 154 uint64_t addr, 155 uint32_t data, 156 int pirq, 157 bool is_msix, 158 int msix_entry, 159 int *old_pirq, 160 bool masked) 161{ 162 PCIDevice *d = &s->dev; 163 uint8_t gvec = msi_vector(data); 164 uint32_t gflags = msi_gflags(data, addr); 165 int rc = 0; 166 uint64_t table_addr = 0; 167 168 XEN_PT_LOG(d, "Updating MSI%s with pirq %d gvec 0x%x gflags 0x%x" 169 " (entry: 0x%x)\n", 170 is_msix ? "-X" : "", pirq, gvec, gflags, msix_entry); 171 172 if (is_msix) { 173 table_addr = s->msix->mmio_base_addr; 174 } 175 176 gflags |= masked ? 0 : (1u << XEN_PT_GFLAGSSHIFT_UNMASKED); 177 178 rc = xc_domain_update_msi_irq(xen_xc, xen_domid, gvec, 179 pirq, gflags, table_addr); 180 181 if (rc) { 182 XEN_PT_ERR(d, "Updating of MSI%s failed. (err: %d)\n", 183 is_msix ? "-X" : "", errno); 184 185 if (xc_physdev_unmap_pirq(xen_xc, xen_domid, *old_pirq)) { 186 XEN_PT_ERR(d, "Unmapping of MSI%s pirq %d failed. (err: %d)\n", 187 is_msix ? "-X" : "", *old_pirq, errno); 188 } 189 *old_pirq = XEN_PT_UNASSIGNED_PIRQ; 190 } 191 return rc; 192} 193 194static int msi_msix_disable(XenPCIPassthroughState *s, 195 uint64_t addr, 196 uint32_t data, 197 int pirq, 198 bool is_msix, 199 bool is_binded) 200{ 201 PCIDevice *d = &s->dev; 202 uint8_t gvec = msi_vector(data); 203 uint32_t gflags = msi_gflags(data, addr); 204 int rc = 0; 205 206 if (pirq == XEN_PT_UNASSIGNED_PIRQ) { 207 return 0; 208 } 209 210 if (is_binded) { 211 XEN_PT_LOG(d, "Unbind MSI%s with pirq %d, gvec 0x%x\n", 212 is_msix ? "-X" : "", pirq, gvec); 213 rc = xc_domain_unbind_msi_irq(xen_xc, xen_domid, gvec, pirq, gflags); 214 if (rc) { 215 XEN_PT_ERR(d, "Unbinding of MSI%s failed. (err: %d, pirq: %d, gvec: 0x%x)\n", 216 is_msix ? "-X" : "", errno, pirq, gvec); 217 return rc; 218 } 219 } 220 221 XEN_PT_LOG(d, "Unmap MSI%s pirq %d\n", is_msix ? "-X" : "", pirq); 222 rc = xc_physdev_unmap_pirq(xen_xc, xen_domid, pirq); 223 if (rc) { 224 XEN_PT_ERR(d, "Unmapping of MSI%s pirq %d failed. (err: %i)\n", 225 is_msix ? "-X" : "", pirq, errno); 226 return rc; 227 } 228 229 return 0; 230} 231 232/* 233 * MSI virtualization functions 234 */ 235 236static int xen_pt_msi_set_enable(XenPCIPassthroughState *s, bool enable) 237{ 238 XEN_PT_LOG(&s->dev, "%s MSI.\n", enable ? "enabling" : "disabling"); 239 240 if (!s->msi) { 241 return -1; 242 } 243 244 return msi_msix_enable(s, s->msi->ctrl_offset, PCI_MSI_FLAGS_ENABLE, 245 enable); 246} 247 248/* setup physical msi, but don't enable it */ 249int xen_pt_msi_setup(XenPCIPassthroughState *s) 250{ 251 int pirq = XEN_PT_UNASSIGNED_PIRQ; 252 int rc = 0; 253 XenPTMSI *msi = s->msi; 254 255 if (msi->initialized) { 256 XEN_PT_ERR(&s->dev, 257 "Setup physical MSI when it has been properly initialized.\n"); 258 return -1; 259 } 260 261 rc = msi_msix_setup(s, msi_addr64(msi), msi->data, &pirq, false, 0, true); 262 if (rc) { 263 return rc; 264 } 265 266 if (pirq < 0) { 267 XEN_PT_ERR(&s->dev, "Invalid pirq number: %d.\n", pirq); 268 return -1; 269 } 270 271 msi->pirq = pirq; 272 XEN_PT_LOG(&s->dev, "MSI mapped with pirq %d.\n", pirq); 273 274 return 0; 275} 276 277int xen_pt_msi_update(XenPCIPassthroughState *s) 278{ 279 XenPTMSI *msi = s->msi; 280 281 /* Current MSI emulation in QEMU only supports 1 vector */ 282 return msi_msix_update(s, msi_addr64(msi), msi->data, msi->pirq, 283 false, 0, &msi->pirq, msi->mask & 1); 284} 285 286void xen_pt_msi_disable(XenPCIPassthroughState *s) 287{ 288 XenPTMSI *msi = s->msi; 289 290 if (!msi) { 291 return; 292 } 293 294 (void)xen_pt_msi_set_enable(s, false); 295 296 msi_msix_disable(s, msi_addr64(msi), msi->data, msi->pirq, false, 297 msi->initialized); 298 299 /* clear msi info */ 300 msi->flags &= ~PCI_MSI_FLAGS_ENABLE; 301 msi->initialized = false; 302 msi->mapped = false; 303 msi->pirq = XEN_PT_UNASSIGNED_PIRQ; 304} 305 306/* 307 * MSI-X virtualization functions 308 */ 309 310static int msix_set_enable(XenPCIPassthroughState *s, bool enabled) 311{ 312 XEN_PT_LOG(&s->dev, "%s MSI-X.\n", enabled ? "enabling" : "disabling"); 313 314 if (!s->msix) { 315 return -1; 316 } 317 318 return msi_msix_enable(s, s->msix->ctrl_offset, PCI_MSIX_FLAGS_ENABLE, 319 enabled); 320} 321 322static int xen_pt_msix_update_one(XenPCIPassthroughState *s, int entry_nr, 323 uint32_t vec_ctrl) 324{ 325 XenPTMSIXEntry *entry = NULL; 326 int pirq; 327 int rc; 328 329 if (entry_nr < 0 || entry_nr >= s->msix->total_entries) { 330 return -EINVAL; 331 } 332 333 entry = &s->msix->msix_entry[entry_nr]; 334 335 if (!entry->updated) { 336 return 0; 337 } 338 339 pirq = entry->pirq; 340 341 /* 342 * Update the entry addr and data to the latest values only when the 343 * entry is masked or they are all masked, as required by the spec. 344 * Addr and data changes while the MSI-X entry is unmasked get deferred 345 * until the next masked -> unmasked transition. 346 */ 347 if (pirq == XEN_PT_UNASSIGNED_PIRQ || s->msix->maskall || 348 (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT)) { 349 entry->addr = entry->latch(LOWER_ADDR) | 350 ((uint64_t)entry->latch(UPPER_ADDR) << 32); 351 entry->data = entry->latch(DATA); 352 } 353 354 rc = msi_msix_setup(s, entry->addr, entry->data, &pirq, true, entry_nr, 355 entry->pirq == XEN_PT_UNASSIGNED_PIRQ); 356 if (rc) { 357 return rc; 358 } 359 if (entry->pirq == XEN_PT_UNASSIGNED_PIRQ) { 360 entry->pirq = pirq; 361 } 362 363 rc = msi_msix_update(s, entry->addr, entry->data, pirq, true, 364 entry_nr, &entry->pirq, 365 vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT); 366 367 if (!rc) { 368 entry->updated = false; 369 } 370 371 return rc; 372} 373 374int xen_pt_msix_update(XenPCIPassthroughState *s) 375{ 376 XenPTMSIX *msix = s->msix; 377 int i; 378 379 for (i = 0; i < msix->total_entries; i++) { 380 xen_pt_msix_update_one(s, i, msix->msix_entry[i].latch(VECTOR_CTRL)); 381 } 382 383 return 0; 384} 385 386void xen_pt_msix_disable(XenPCIPassthroughState *s) 387{ 388 int i = 0; 389 390 msix_set_enable(s, false); 391 392 for (i = 0; i < s->msix->total_entries; i++) { 393 XenPTMSIXEntry *entry = &s->msix->msix_entry[i]; 394 395 msi_msix_disable(s, entry->addr, entry->data, entry->pirq, true, true); 396 397 /* clear MSI-X info */ 398 entry->pirq = XEN_PT_UNASSIGNED_PIRQ; 399 entry->updated = false; 400 } 401} 402 403int xen_pt_msix_update_remap(XenPCIPassthroughState *s, int bar_index) 404{ 405 XenPTMSIXEntry *entry; 406 int i, ret; 407 408 if (!(s->msix && s->msix->bar_index == bar_index)) { 409 return 0; 410 } 411 412 for (i = 0; i < s->msix->total_entries; i++) { 413 entry = &s->msix->msix_entry[i]; 414 if (entry->pirq != XEN_PT_UNASSIGNED_PIRQ) { 415 ret = xc_domain_unbind_pt_irq(xen_xc, xen_domid, entry->pirq, 416 PT_IRQ_TYPE_MSI, 0, 0, 0, 0); 417 if (ret) { 418 XEN_PT_ERR(&s->dev, "unbind MSI-X entry %d failed (err: %d)\n", 419 entry->pirq, errno); 420 } 421 entry->updated = true; 422 } 423 } 424 return xen_pt_msix_update(s); 425} 426 427static uint32_t get_entry_value(XenPTMSIXEntry *e, int offset) 428{ 429 assert(!(offset % sizeof(*e->latch))); 430 return e->latch[offset / sizeof(*e->latch)]; 431} 432 433static void set_entry_value(XenPTMSIXEntry *e, int offset, uint32_t val) 434{ 435 assert(!(offset % sizeof(*e->latch))); 436 e->latch[offset / sizeof(*e->latch)] = val; 437} 438 439static void pci_msix_write(void *opaque, hwaddr addr, 440 uint64_t val, unsigned size) 441{ 442 XenPCIPassthroughState *s = opaque; 443 XenPTMSIX *msix = s->msix; 444 XenPTMSIXEntry *entry; 445 unsigned int entry_nr, offset; 446 447 entry_nr = addr / PCI_MSIX_ENTRY_SIZE; 448 if (entry_nr >= msix->total_entries) { 449 return; 450 } 451 entry = &msix->msix_entry[entry_nr]; 452 offset = addr % PCI_MSIX_ENTRY_SIZE; 453 454 if (offset != PCI_MSIX_ENTRY_VECTOR_CTRL) { 455 if (get_entry_value(entry, offset) == val 456 && entry->pirq != XEN_PT_UNASSIGNED_PIRQ) { 457 return; 458 } 459 460 entry->updated = true; 461 } else if (msix->enabled && entry->updated && 462 !(val & PCI_MSIX_ENTRY_CTRL_MASKBIT)) { 463 const volatile uint32_t *vec_ctrl; 464 465 /* 466 * If Xen intercepts the mask bit access, entry->vec_ctrl may not be 467 * up-to-date. Read from hardware directly. 468 */ 469 vec_ctrl = s->msix->phys_iomem_base + entry_nr * PCI_MSIX_ENTRY_SIZE 470 + PCI_MSIX_ENTRY_VECTOR_CTRL; 471 xen_pt_msix_update_one(s, entry_nr, *vec_ctrl); 472 } 473 474 set_entry_value(entry, offset, val); 475} 476 477static uint64_t pci_msix_read(void *opaque, hwaddr addr, 478 unsigned size) 479{ 480 XenPCIPassthroughState *s = opaque; 481 XenPTMSIX *msix = s->msix; 482 int entry_nr, offset; 483 484 entry_nr = addr / PCI_MSIX_ENTRY_SIZE; 485 if (entry_nr < 0) { 486 XEN_PT_ERR(&s->dev, "asked MSI-X entry '%i' invalid!\n", entry_nr); 487 return 0; 488 } 489 490 offset = addr % PCI_MSIX_ENTRY_SIZE; 491 492 if (addr < msix->total_entries * PCI_MSIX_ENTRY_SIZE) { 493 return get_entry_value(&msix->msix_entry[entry_nr], offset); 494 } else { 495 /* Pending Bit Array (PBA) */ 496 return *(uint32_t *)(msix->phys_iomem_base + addr); 497 } 498} 499 500static bool pci_msix_accepts(void *opaque, hwaddr addr, 501 unsigned size, bool is_write, 502 MemTxAttrs attrs) 503{ 504 return !(addr & (size - 1)); 505} 506 507static const MemoryRegionOps pci_msix_ops = { 508 .read = pci_msix_read, 509 .write = pci_msix_write, 510 .endianness = DEVICE_NATIVE_ENDIAN, 511 .valid = { 512 .min_access_size = 4, 513 .max_access_size = 4, 514 .unaligned = false, 515 .accepts = pci_msix_accepts 516 }, 517 .impl = { 518 .min_access_size = 4, 519 .max_access_size = 4, 520 .unaligned = false 521 } 522}; 523 524int xen_pt_msix_init(XenPCIPassthroughState *s, uint32_t base) 525{ 526 uint8_t id = 0; 527 uint16_t control = 0; 528 uint32_t table_off = 0; 529 int i, total_entries, bar_index; 530 XenHostPCIDevice *hd = &s->real_device; 531 PCIDevice *d = &s->dev; 532 int fd = -1; 533 XenPTMSIX *msix = NULL; 534 int rc = 0; 535 536 rc = xen_host_pci_get_byte(hd, base + PCI_CAP_LIST_ID, &id); 537 if (rc) { 538 return rc; 539 } 540 541 if (id != PCI_CAP_ID_MSIX) { 542 XEN_PT_ERR(d, "Invalid id 0x%x base 0x%x\n", id, base); 543 return -1; 544 } 545 546 rc = xen_host_pci_get_word(hd, base + PCI_MSIX_FLAGS, &control); 547 if (rc) { 548 XEN_PT_ERR(d, "Failed to read PCI_MSIX_FLAGS field\n"); 549 return rc; 550 } 551 total_entries = control & PCI_MSIX_FLAGS_QSIZE; 552 total_entries += 1; 553 554 s->msix = g_malloc0(sizeof (XenPTMSIX) 555 + total_entries * sizeof (XenPTMSIXEntry)); 556 msix = s->msix; 557 558 msix->total_entries = total_entries; 559 for (i = 0; i < total_entries; i++) { 560 msix->msix_entry[i].pirq = XEN_PT_UNASSIGNED_PIRQ; 561 } 562 563 memory_region_init_io(&msix->mmio, OBJECT(s), &pci_msix_ops, 564 s, "xen-pci-pt-msix", 565 (total_entries * PCI_MSIX_ENTRY_SIZE 566 + XC_PAGE_SIZE - 1) 567 & XC_PAGE_MASK); 568 569 rc = xen_host_pci_get_long(hd, base + PCI_MSIX_TABLE, &table_off); 570 if (rc) { 571 XEN_PT_ERR(d, "Failed to read PCI_MSIX_TABLE field\n"); 572 goto error_out; 573 } 574 bar_index = msix->bar_index = table_off & PCI_MSIX_FLAGS_BIRMASK; 575 table_off = table_off & ~PCI_MSIX_FLAGS_BIRMASK; 576 msix->table_base = s->real_device.io_regions[bar_index].base_addr; 577 XEN_PT_LOG(d, "get MSI-X table BAR base 0x%"PRIx64"\n", msix->table_base); 578 579 fd = open("/dev/mem", O_RDWR); 580 if (fd == -1) { 581 rc = -errno; 582 XEN_PT_ERR(d, "Can't open /dev/mem: %s\n", strerror(errno)); 583 goto error_out; 584 } 585 XEN_PT_LOG(d, "table_off = 0x%x, total_entries = %d\n", 586 table_off, total_entries); 587 msix->table_offset_adjust = table_off & 0x0fff; 588 msix->phys_iomem_base = 589 mmap(NULL, 590 total_entries * PCI_MSIX_ENTRY_SIZE + msix->table_offset_adjust, 591 PROT_READ, 592 MAP_SHARED | MAP_LOCKED, 593 fd, 594 msix->table_base + table_off - msix->table_offset_adjust); 595 close(fd); 596 if (msix->phys_iomem_base == MAP_FAILED) { 597 rc = -errno; 598 XEN_PT_ERR(d, "Can't map physical MSI-X table: %s\n", strerror(errno)); 599 goto error_out; 600 } 601 msix->phys_iomem_base = (char *)msix->phys_iomem_base 602 + msix->table_offset_adjust; 603 604 XEN_PT_LOG(d, "mapping physical MSI-X table to %p\n", 605 msix->phys_iomem_base); 606 607 memory_region_add_subregion_overlap(&s->bar[bar_index], table_off, 608 &msix->mmio, 609 2); /* Priority: pci default + 1 */ 610 611 return 0; 612 613error_out: 614 g_free(s->msix); 615 s->msix = NULL; 616 return rc; 617} 618 619void xen_pt_msix_unmap(XenPCIPassthroughState *s) 620{ 621 XenPTMSIX *msix = s->msix; 622 623 if (!msix) { 624 return; 625 } 626 627 /* unmap the MSI-X memory mapped register area */ 628 if (msix->phys_iomem_base) { 629 XEN_PT_LOG(&s->dev, "unmapping physical MSI-X table from %p\n", 630 msix->phys_iomem_base); 631 munmap(msix->phys_iomem_base, msix->total_entries * PCI_MSIX_ENTRY_SIZE 632 + msix->table_offset_adjust); 633 } 634 635 memory_region_del_subregion(&s->bar[msix->bar_index], &msix->mmio); 636} 637 638void xen_pt_msix_delete(XenPCIPassthroughState *s) 639{ 640 XenPTMSIX *msix = s->msix; 641 642 if (!msix) { 643 return; 644 } 645 646 object_unparent(OBJECT(&msix->mmio)); 647 648 g_free(s->msix); 649 s->msix = NULL; 650}