pasid.c (17996B)
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * intel-pasid.c - PASID idr, table and entry manipulation 4 * 5 * Copyright (C) 2018 Intel Corporation 6 * 7 * Author: Lu Baolu <baolu.lu@linux.intel.com> 8 */ 9 10#define pr_fmt(fmt) "DMAR: " fmt 11 12#include <linux/bitops.h> 13#include <linux/cpufeature.h> 14#include <linux/dmar.h> 15#include <linux/intel-iommu.h> 16#include <linux/iommu.h> 17#include <linux/memory.h> 18#include <linux/pci.h> 19#include <linux/pci-ats.h> 20#include <linux/spinlock.h> 21 22#include "pasid.h" 23 24/* 25 * Intel IOMMU system wide PASID name space: 26 */ 27u32 intel_pasid_max_id = PASID_MAX; 28 29int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid) 30{ 31 unsigned long flags; 32 u8 status_code; 33 int ret = 0; 34 u64 res; 35 36 raw_spin_lock_irqsave(&iommu->register_lock, flags); 37 dmar_writeq(iommu->reg + DMAR_VCMD_REG, VCMD_CMD_ALLOC); 38 IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq, 39 !(res & VCMD_VRSP_IP), res); 40 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); 41 42 status_code = VCMD_VRSP_SC(res); 43 switch (status_code) { 44 case VCMD_VRSP_SC_SUCCESS: 45 *pasid = VCMD_VRSP_RESULT_PASID(res); 46 break; 47 case VCMD_VRSP_SC_NO_PASID_AVAIL: 48 pr_info("IOMMU: %s: No PASID available\n", iommu->name); 49 ret = -ENOSPC; 50 break; 51 default: 52 ret = -ENODEV; 53 pr_warn("IOMMU: %s: Unexpected error code %d\n", 54 iommu->name, status_code); 55 } 56 57 return ret; 58} 59 60void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid) 61{ 62 unsigned long flags; 63 u8 status_code; 64 u64 res; 65 66 raw_spin_lock_irqsave(&iommu->register_lock, flags); 67 dmar_writeq(iommu->reg + DMAR_VCMD_REG, 68 VCMD_CMD_OPERAND(pasid) | VCMD_CMD_FREE); 69 IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq, 70 !(res & VCMD_VRSP_IP), res); 71 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); 72 73 status_code = VCMD_VRSP_SC(res); 74 switch (status_code) { 75 case VCMD_VRSP_SC_SUCCESS: 76 break; 77 case VCMD_VRSP_SC_INVALID_PASID: 78 pr_info("IOMMU: %s: Invalid PASID\n", iommu->name); 79 break; 80 default: 81 pr_warn("IOMMU: %s: Unexpected error code %d\n", 82 iommu->name, status_code); 83 } 84} 85 86/* 87 * Per device pasid table management: 88 */ 89 90/* 91 * Allocate a pasid table for @dev. It should be called in a 92 * single-thread context. 93 */ 94int intel_pasid_alloc_table(struct device *dev) 95{ 96 struct device_domain_info *info; 97 struct pasid_table *pasid_table; 98 struct page *pages; 99 u32 max_pasid = 0; 100 int order, size; 101 102 might_sleep(); 103 info = dev_iommu_priv_get(dev); 104 if (WARN_ON(!info || !dev_is_pci(dev) || info->pasid_table)) 105 return -EINVAL; 106 107 pasid_table = kzalloc(sizeof(*pasid_table), GFP_KERNEL); 108 if (!pasid_table) 109 return -ENOMEM; 110 111 if (info->pasid_supported) 112 max_pasid = min_t(u32, pci_max_pasids(to_pci_dev(dev)), 113 intel_pasid_max_id); 114 115 size = max_pasid >> (PASID_PDE_SHIFT - 3); 116 order = size ? get_order(size) : 0; 117 pages = alloc_pages_node(info->iommu->node, 118 GFP_KERNEL | __GFP_ZERO, order); 119 if (!pages) { 120 kfree(pasid_table); 121 return -ENOMEM; 122 } 123 124 pasid_table->table = page_address(pages); 125 pasid_table->order = order; 126 pasid_table->max_pasid = 1 << (order + PAGE_SHIFT + 3); 127 info->pasid_table = pasid_table; 128 129 return 0; 130} 131 132void intel_pasid_free_table(struct device *dev) 133{ 134 struct device_domain_info *info; 135 struct pasid_table *pasid_table; 136 struct pasid_dir_entry *dir; 137 struct pasid_entry *table; 138 int i, max_pde; 139 140 info = dev_iommu_priv_get(dev); 141 if (!info || !dev_is_pci(dev) || !info->pasid_table) 142 return; 143 144 pasid_table = info->pasid_table; 145 info->pasid_table = NULL; 146 147 /* Free scalable mode PASID directory tables: */ 148 dir = pasid_table->table; 149 max_pde = pasid_table->max_pasid >> PASID_PDE_SHIFT; 150 for (i = 0; i < max_pde; i++) { 151 table = get_pasid_table_from_pde(&dir[i]); 152 free_pgtable_page(table); 153 } 154 155 free_pages((unsigned long)pasid_table->table, pasid_table->order); 156 kfree(pasid_table); 157} 158 159struct pasid_table *intel_pasid_get_table(struct device *dev) 160{ 161 struct device_domain_info *info; 162 163 info = dev_iommu_priv_get(dev); 164 if (!info) 165 return NULL; 166 167 return info->pasid_table; 168} 169 170static int intel_pasid_get_dev_max_id(struct device *dev) 171{ 172 struct device_domain_info *info; 173 174 info = dev_iommu_priv_get(dev); 175 if (!info || !info->pasid_table) 176 return 0; 177 178 return info->pasid_table->max_pasid; 179} 180 181static struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid) 182{ 183 struct device_domain_info *info; 184 struct pasid_table *pasid_table; 185 struct pasid_dir_entry *dir; 186 struct pasid_entry *entries; 187 int dir_index, index; 188 189 pasid_table = intel_pasid_get_table(dev); 190 if (WARN_ON(!pasid_table || pasid >= intel_pasid_get_dev_max_id(dev))) 191 return NULL; 192 193 dir = pasid_table->table; 194 info = dev_iommu_priv_get(dev); 195 dir_index = pasid >> PASID_PDE_SHIFT; 196 index = pasid & PASID_PTE_MASK; 197 198retry: 199 entries = get_pasid_table_from_pde(&dir[dir_index]); 200 if (!entries) { 201 entries = alloc_pgtable_page(info->iommu->node); 202 if (!entries) 203 return NULL; 204 205 /* 206 * The pasid directory table entry won't be freed after 207 * allocation. No worry about the race with free and 208 * clear. However, this entry might be populated by others 209 * while we are preparing it. Use theirs with a retry. 210 */ 211 if (cmpxchg64(&dir[dir_index].val, 0ULL, 212 (u64)virt_to_phys(entries) | PASID_PTE_PRESENT)) { 213 free_pgtable_page(entries); 214 goto retry; 215 } 216 } 217 218 return &entries[index]; 219} 220 221/* 222 * Interfaces for PASID table entry manipulation: 223 */ 224static inline void pasid_clear_entry(struct pasid_entry *pe) 225{ 226 WRITE_ONCE(pe->val[0], 0); 227 WRITE_ONCE(pe->val[1], 0); 228 WRITE_ONCE(pe->val[2], 0); 229 WRITE_ONCE(pe->val[3], 0); 230 WRITE_ONCE(pe->val[4], 0); 231 WRITE_ONCE(pe->val[5], 0); 232 WRITE_ONCE(pe->val[6], 0); 233 WRITE_ONCE(pe->val[7], 0); 234} 235 236static inline void pasid_clear_entry_with_fpd(struct pasid_entry *pe) 237{ 238 WRITE_ONCE(pe->val[0], PASID_PTE_FPD); 239 WRITE_ONCE(pe->val[1], 0); 240 WRITE_ONCE(pe->val[2], 0); 241 WRITE_ONCE(pe->val[3], 0); 242 WRITE_ONCE(pe->val[4], 0); 243 WRITE_ONCE(pe->val[5], 0); 244 WRITE_ONCE(pe->val[6], 0); 245 WRITE_ONCE(pe->val[7], 0); 246} 247 248static void 249intel_pasid_clear_entry(struct device *dev, u32 pasid, bool fault_ignore) 250{ 251 struct pasid_entry *pe; 252 253 pe = intel_pasid_get_entry(dev, pasid); 254 if (WARN_ON(!pe)) 255 return; 256 257 if (fault_ignore && pasid_pte_is_present(pe)) 258 pasid_clear_entry_with_fpd(pe); 259 else 260 pasid_clear_entry(pe); 261} 262 263static inline void pasid_set_bits(u64 *ptr, u64 mask, u64 bits) 264{ 265 u64 old; 266 267 old = READ_ONCE(*ptr); 268 WRITE_ONCE(*ptr, (old & ~mask) | bits); 269} 270 271/* 272 * Setup the DID(Domain Identifier) field (Bit 64~79) of scalable mode 273 * PASID entry. 274 */ 275static inline void 276pasid_set_domain_id(struct pasid_entry *pe, u64 value) 277{ 278 pasid_set_bits(&pe->val[1], GENMASK_ULL(15, 0), value); 279} 280 281/* 282 * Get domain ID value of a scalable mode PASID entry. 283 */ 284static inline u16 285pasid_get_domain_id(struct pasid_entry *pe) 286{ 287 return (u16)(READ_ONCE(pe->val[1]) & GENMASK_ULL(15, 0)); 288} 289 290/* 291 * Setup the SLPTPTR(Second Level Page Table Pointer) field (Bit 12~63) 292 * of a scalable mode PASID entry. 293 */ 294static inline void 295pasid_set_slptr(struct pasid_entry *pe, u64 value) 296{ 297 pasid_set_bits(&pe->val[0], VTD_PAGE_MASK, value); 298} 299 300/* 301 * Setup the AW(Address Width) field (Bit 2~4) of a scalable mode PASID 302 * entry. 303 */ 304static inline void 305pasid_set_address_width(struct pasid_entry *pe, u64 value) 306{ 307 pasid_set_bits(&pe->val[0], GENMASK_ULL(4, 2), value << 2); 308} 309 310/* 311 * Setup the PGTT(PASID Granular Translation Type) field (Bit 6~8) 312 * of a scalable mode PASID entry. 313 */ 314static inline void 315pasid_set_translation_type(struct pasid_entry *pe, u64 value) 316{ 317 pasid_set_bits(&pe->val[0], GENMASK_ULL(8, 6), value << 6); 318} 319 320/* 321 * Enable fault processing by clearing the FPD(Fault Processing 322 * Disable) field (Bit 1) of a scalable mode PASID entry. 323 */ 324static inline void pasid_set_fault_enable(struct pasid_entry *pe) 325{ 326 pasid_set_bits(&pe->val[0], 1 << 1, 0); 327} 328 329/* 330 * Setup the SRE(Supervisor Request Enable) field (Bit 128) of a 331 * scalable mode PASID entry. 332 */ 333static inline void pasid_set_sre(struct pasid_entry *pe) 334{ 335 pasid_set_bits(&pe->val[2], 1 << 0, 1); 336} 337 338/* 339 * Setup the WPE(Write Protect Enable) field (Bit 132) of a 340 * scalable mode PASID entry. 341 */ 342static inline void pasid_set_wpe(struct pasid_entry *pe) 343{ 344 pasid_set_bits(&pe->val[2], 1 << 4, 1 << 4); 345} 346 347/* 348 * Setup the P(Present) field (Bit 0) of a scalable mode PASID 349 * entry. 350 */ 351static inline void pasid_set_present(struct pasid_entry *pe) 352{ 353 pasid_set_bits(&pe->val[0], 1 << 0, 1); 354} 355 356/* 357 * Setup Page Walk Snoop bit (Bit 87) of a scalable mode PASID 358 * entry. 359 */ 360static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value) 361{ 362 pasid_set_bits(&pe->val[1], 1 << 23, value << 23); 363} 364 365/* 366 * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode 367 * PASID entry. 368 */ 369static inline void 370pasid_set_pgsnp(struct pasid_entry *pe) 371{ 372 pasid_set_bits(&pe->val[1], 1ULL << 24, 1ULL << 24); 373} 374 375/* 376 * Setup the First Level Page table Pointer field (Bit 140~191) 377 * of a scalable mode PASID entry. 378 */ 379static inline void 380pasid_set_flptr(struct pasid_entry *pe, u64 value) 381{ 382 pasid_set_bits(&pe->val[2], VTD_PAGE_MASK, value); 383} 384 385/* 386 * Setup the First Level Paging Mode field (Bit 130~131) of a 387 * scalable mode PASID entry. 388 */ 389static inline void 390pasid_set_flpm(struct pasid_entry *pe, u64 value) 391{ 392 pasid_set_bits(&pe->val[2], GENMASK_ULL(3, 2), value << 2); 393} 394 395/* 396 * Setup the Extended Access Flag Enable (EAFE) field (Bit 135) 397 * of a scalable mode PASID entry. 398 */ 399static inline void 400pasid_set_eafe(struct pasid_entry *pe) 401{ 402 pasid_set_bits(&pe->val[2], 1 << 7, 1 << 7); 403} 404 405static void 406pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu, 407 u16 did, u32 pasid) 408{ 409 struct qi_desc desc; 410 411 desc.qw0 = QI_PC_DID(did) | QI_PC_GRAN(QI_PC_PASID_SEL) | 412 QI_PC_PASID(pasid) | QI_PC_TYPE; 413 desc.qw1 = 0; 414 desc.qw2 = 0; 415 desc.qw3 = 0; 416 417 qi_submit_sync(iommu, &desc, 1, 0); 418} 419 420static void 421devtlb_invalidation_with_pasid(struct intel_iommu *iommu, 422 struct device *dev, u32 pasid) 423{ 424 struct device_domain_info *info; 425 u16 sid, qdep, pfsid; 426 427 info = dev_iommu_priv_get(dev); 428 if (!info || !info->ats_enabled) 429 return; 430 431 sid = info->bus << 8 | info->devfn; 432 qdep = info->ats_qdep; 433 pfsid = info->pfsid; 434 435 /* 436 * When PASID 0 is used, it indicates RID2PASID(DMA request w/o PASID), 437 * devTLB flush w/o PASID should be used. For non-zero PASID under 438 * SVA usage, device could do DMA with multiple PASIDs. It is more 439 * efficient to flush devTLB specific to the PASID. 440 */ 441 if (pasid == PASID_RID2PASID) 442 qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT); 443 else 444 qi_flush_dev_iotlb_pasid(iommu, sid, pfsid, pasid, qdep, 0, 64 - VTD_PAGE_SHIFT); 445} 446 447void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev, 448 u32 pasid, bool fault_ignore) 449{ 450 struct pasid_entry *pte; 451 u16 did, pgtt; 452 453 pte = intel_pasid_get_entry(dev, pasid); 454 if (WARN_ON(!pte)) 455 return; 456 457 if (!pasid_pte_is_present(pte)) 458 return; 459 460 did = pasid_get_domain_id(pte); 461 pgtt = pasid_pte_get_pgtt(pte); 462 463 intel_pasid_clear_entry(dev, pasid, fault_ignore); 464 465 if (!ecap_coherent(iommu->ecap)) 466 clflush_cache_range(pte, sizeof(*pte)); 467 468 pasid_cache_invalidation_with_pasid(iommu, did, pasid); 469 470 if (pgtt == PASID_ENTRY_PGTT_PT || pgtt == PASID_ENTRY_PGTT_FL_ONLY) 471 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); 472 else 473 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); 474 475 /* Device IOTLB doesn't need to be flushed in caching mode. */ 476 if (!cap_caching_mode(iommu->cap)) 477 devtlb_invalidation_with_pasid(iommu, dev, pasid); 478} 479 480/* 481 * This function flushes cache for a newly setup pasid table entry. 482 * Caller of it should not modify the in-use pasid table entries. 483 */ 484static void pasid_flush_caches(struct intel_iommu *iommu, 485 struct pasid_entry *pte, 486 u32 pasid, u16 did) 487{ 488 if (!ecap_coherent(iommu->ecap)) 489 clflush_cache_range(pte, sizeof(*pte)); 490 491 if (cap_caching_mode(iommu->cap)) { 492 pasid_cache_invalidation_with_pasid(iommu, did, pasid); 493 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); 494 } else { 495 iommu_flush_write_buffer(iommu); 496 } 497} 498 499static inline int pasid_enable_wpe(struct pasid_entry *pte) 500{ 501#ifdef CONFIG_X86 502 unsigned long cr0 = read_cr0(); 503 504 /* CR0.WP is normally set but just to be sure */ 505 if (unlikely(!(cr0 & X86_CR0_WP))) { 506 pr_err_ratelimited("No CPU write protect!\n"); 507 return -EINVAL; 508 } 509#endif 510 pasid_set_wpe(pte); 511 512 return 0; 513}; 514 515/* 516 * Set up the scalable mode pasid table entry for first only 517 * translation type. 518 */ 519int intel_pasid_setup_first_level(struct intel_iommu *iommu, 520 struct device *dev, pgd_t *pgd, 521 u32 pasid, u16 did, int flags) 522{ 523 struct pasid_entry *pte; 524 525 if (!ecap_flts(iommu->ecap)) { 526 pr_err("No first level translation support on %s\n", 527 iommu->name); 528 return -EINVAL; 529 } 530 531 pte = intel_pasid_get_entry(dev, pasid); 532 if (WARN_ON(!pte)) 533 return -EINVAL; 534 535 /* Caller must ensure PASID entry is not in use. */ 536 if (pasid_pte_is_present(pte)) 537 return -EBUSY; 538 539 pasid_clear_entry(pte); 540 541 /* Setup the first level page table pointer: */ 542 pasid_set_flptr(pte, (u64)__pa(pgd)); 543 if (flags & PASID_FLAG_SUPERVISOR_MODE) { 544 if (!ecap_srs(iommu->ecap)) { 545 pr_err("No supervisor request support on %s\n", 546 iommu->name); 547 return -EINVAL; 548 } 549 pasid_set_sre(pte); 550 if (pasid_enable_wpe(pte)) 551 return -EINVAL; 552 553 } 554 555 if (flags & PASID_FLAG_FL5LP) { 556 if (cap_5lp_support(iommu->cap)) { 557 pasid_set_flpm(pte, 1); 558 } else { 559 pr_err("No 5-level paging support for first-level\n"); 560 pasid_clear_entry(pte); 561 return -EINVAL; 562 } 563 } 564 565 if (flags & PASID_FLAG_PAGE_SNOOP) 566 pasid_set_pgsnp(pte); 567 568 pasid_set_domain_id(pte, did); 569 pasid_set_address_width(pte, iommu->agaw); 570 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); 571 572 /* Setup Present and PASID Granular Transfer Type: */ 573 pasid_set_translation_type(pte, PASID_ENTRY_PGTT_FL_ONLY); 574 pasid_set_present(pte); 575 pasid_flush_caches(iommu, pte, pasid, did); 576 577 return 0; 578} 579 580/* 581 * Skip top levels of page tables for iommu which has less agaw 582 * than default. Unnecessary for PT mode. 583 */ 584static inline int iommu_skip_agaw(struct dmar_domain *domain, 585 struct intel_iommu *iommu, 586 struct dma_pte **pgd) 587{ 588 int agaw; 589 590 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { 591 *pgd = phys_to_virt(dma_pte_addr(*pgd)); 592 if (!dma_pte_present(*pgd)) 593 return -EINVAL; 594 } 595 596 return agaw; 597} 598 599/* 600 * Set up the scalable mode pasid entry for second only translation type. 601 */ 602int intel_pasid_setup_second_level(struct intel_iommu *iommu, 603 struct dmar_domain *domain, 604 struct device *dev, u32 pasid) 605{ 606 struct pasid_entry *pte; 607 struct dma_pte *pgd; 608 u64 pgd_val; 609 int agaw; 610 u16 did; 611 612 /* 613 * If hardware advertises no support for second level 614 * translation, return directly. 615 */ 616 if (!ecap_slts(iommu->ecap)) { 617 pr_err("No second level translation support on %s\n", 618 iommu->name); 619 return -EINVAL; 620 } 621 622 pgd = domain->pgd; 623 agaw = iommu_skip_agaw(domain, iommu, &pgd); 624 if (agaw < 0) { 625 dev_err(dev, "Invalid domain page table\n"); 626 return -EINVAL; 627 } 628 629 pgd_val = virt_to_phys(pgd); 630 did = domain->iommu_did[iommu->seq_id]; 631 632 pte = intel_pasid_get_entry(dev, pasid); 633 if (!pte) { 634 dev_err(dev, "Failed to get pasid entry of PASID %d\n", pasid); 635 return -ENODEV; 636 } 637 638 /* Caller must ensure PASID entry is not in use. */ 639 if (pasid_pte_is_present(pte)) 640 return -EBUSY; 641 642 pasid_clear_entry(pte); 643 pasid_set_domain_id(pte, did); 644 pasid_set_slptr(pte, pgd_val); 645 pasid_set_address_width(pte, agaw); 646 pasid_set_translation_type(pte, PASID_ENTRY_PGTT_SL_ONLY); 647 pasid_set_fault_enable(pte); 648 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); 649 650 /* 651 * Since it is a second level only translation setup, we should 652 * set SRE bit as well (addresses are expected to be GPAs). 653 */ 654 if (pasid != PASID_RID2PASID) 655 pasid_set_sre(pte); 656 pasid_set_present(pte); 657 pasid_flush_caches(iommu, pte, pasid, did); 658 659 return 0; 660} 661 662/* 663 * Set up the scalable mode pasid entry for passthrough translation type. 664 */ 665int intel_pasid_setup_pass_through(struct intel_iommu *iommu, 666 struct dmar_domain *domain, 667 struct device *dev, u32 pasid) 668{ 669 u16 did = FLPT_DEFAULT_DID; 670 struct pasid_entry *pte; 671 672 pte = intel_pasid_get_entry(dev, pasid); 673 if (!pte) { 674 dev_err(dev, "Failed to get pasid entry of PASID %d\n", pasid); 675 return -ENODEV; 676 } 677 678 /* Caller must ensure PASID entry is not in use. */ 679 if (pasid_pte_is_present(pte)) 680 return -EBUSY; 681 682 pasid_clear_entry(pte); 683 pasid_set_domain_id(pte, did); 684 pasid_set_address_width(pte, iommu->agaw); 685 pasid_set_translation_type(pte, PASID_ENTRY_PGTT_PT); 686 pasid_set_fault_enable(pte); 687 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); 688 689 /* 690 * We should set SRE bit as well since the addresses are expected 691 * to be GPAs. 692 */ 693 pasid_set_sre(pte); 694 pasid_set_present(pte); 695 pasid_flush_caches(iommu, pte, pasid, did); 696 697 return 0; 698} 699 700/* 701 * Set the page snoop control for a pasid entry which has been set up. 702 */ 703void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu, 704 struct device *dev, u32 pasid) 705{ 706 struct pasid_entry *pte; 707 u16 did; 708 709 spin_lock(&iommu->lock); 710 pte = intel_pasid_get_entry(dev, pasid); 711 if (WARN_ON(!pte || !pasid_pte_is_present(pte))) { 712 spin_unlock(&iommu->lock); 713 return; 714 } 715 716 pasid_set_pgsnp(pte); 717 did = pasid_get_domain_id(pte); 718 spin_unlock(&iommu->lock); 719 720 if (!ecap_coherent(iommu->ecap)) 721 clflush_cache_range(pte, sizeof(*pte)); 722 723 /* 724 * VT-d spec 3.4 table23 states guides for cache invalidation: 725 * 726 * - PASID-selective-within-Domain PASID-cache invalidation 727 * - PASID-selective PASID-based IOTLB invalidation 728 * - If (pasid is RID_PASID) 729 * - Global Device-TLB invalidation to affected functions 730 * Else 731 * - PASID-based Device-TLB invalidation (with S=1 and 732 * Addr[63:12]=0x7FFFFFFF_FFFFF) to affected functions 733 */ 734 pasid_cache_invalidation_with_pasid(iommu, did, pasid); 735 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); 736 737 /* Device IOTLB doesn't need to be flushed in caching mode. */ 738 if (!cap_caching_mode(iommu->cap)) 739 devtlb_invalidation_with_pasid(iommu, dev, pasid); 740}