fsl_pamu_domain.c (12780B)
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * 4 * Copyright (C) 2013 Freescale Semiconductor, Inc. 5 * Author: Varun Sethi <varun.sethi@freescale.com> 6 */ 7 8#define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__ 9 10#include "fsl_pamu_domain.h" 11 12#include <linux/platform_device.h> 13#include <sysdev/fsl_pci.h> 14 15/* 16 * Global spinlock that needs to be held while 17 * configuring PAMU. 18 */ 19static DEFINE_SPINLOCK(iommu_lock); 20 21static struct kmem_cache *fsl_pamu_domain_cache; 22static struct kmem_cache *iommu_devinfo_cache; 23static DEFINE_SPINLOCK(device_domain_lock); 24 25struct iommu_device pamu_iommu; /* IOMMU core code handle */ 26 27static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom) 28{ 29 return container_of(dom, struct fsl_dma_domain, iommu_domain); 30} 31 32static int __init iommu_init_mempool(void) 33{ 34 fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain", 35 sizeof(struct fsl_dma_domain), 36 0, 37 SLAB_HWCACHE_ALIGN, 38 NULL); 39 if (!fsl_pamu_domain_cache) { 40 pr_debug("Couldn't create fsl iommu_domain cache\n"); 41 return -ENOMEM; 42 } 43 44 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo", 45 sizeof(struct device_domain_info), 46 0, 47 SLAB_HWCACHE_ALIGN, 48 NULL); 49 if (!iommu_devinfo_cache) { 50 pr_debug("Couldn't create devinfo cache\n"); 51 kmem_cache_destroy(fsl_pamu_domain_cache); 52 return -ENOMEM; 53 } 54 55 return 0; 56} 57 58static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain, 59 u32 val) 60{ 61 int ret = 0; 62 unsigned long flags; 63 64 spin_lock_irqsave(&iommu_lock, flags); 65 ret = pamu_update_paace_stash(liodn, val); 66 if (ret) { 67 pr_debug("Failed to update SPAACE for liodn %d\n ", liodn); 68 spin_unlock_irqrestore(&iommu_lock, flags); 69 return ret; 70 } 71 72 spin_unlock_irqrestore(&iommu_lock, flags); 73 74 return ret; 75} 76 77/* Set the geometry parameters for a LIODN */ 78static int pamu_set_liodn(struct fsl_dma_domain *dma_domain, struct device *dev, 79 int liodn) 80{ 81 u32 omi_index = ~(u32)0; 82 unsigned long flags; 83 int ret; 84 85 /* 86 * Configure the omi_index at the geometry setup time. 87 * This is a static value which depends on the type of 88 * device and would not change thereafter. 89 */ 90 get_ome_index(&omi_index, dev); 91 92 spin_lock_irqsave(&iommu_lock, flags); 93 ret = pamu_disable_liodn(liodn); 94 if (ret) 95 goto out_unlock; 96 ret = pamu_config_ppaace(liodn, omi_index, dma_domain->stash_id, 0); 97 if (ret) 98 goto out_unlock; 99 ret = pamu_config_ppaace(liodn, ~(u32)0, dma_domain->stash_id, 100 PAACE_AP_PERMS_QUERY | PAACE_AP_PERMS_UPDATE); 101out_unlock: 102 spin_unlock_irqrestore(&iommu_lock, flags); 103 if (ret) { 104 pr_debug("PAACE configuration failed for liodn %d\n", 105 liodn); 106 } 107 return ret; 108} 109 110static void remove_device_ref(struct device_domain_info *info) 111{ 112 unsigned long flags; 113 114 list_del(&info->link); 115 spin_lock_irqsave(&iommu_lock, flags); 116 pamu_disable_liodn(info->liodn); 117 spin_unlock_irqrestore(&iommu_lock, flags); 118 spin_lock_irqsave(&device_domain_lock, flags); 119 dev_iommu_priv_set(info->dev, NULL); 120 kmem_cache_free(iommu_devinfo_cache, info); 121 spin_unlock_irqrestore(&device_domain_lock, flags); 122} 123 124static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain) 125{ 126 struct device_domain_info *info, *tmp; 127 unsigned long flags; 128 129 spin_lock_irqsave(&dma_domain->domain_lock, flags); 130 /* Remove the device from the domain device list */ 131 list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) { 132 if (!dev || (info->dev == dev)) 133 remove_device_ref(info); 134 } 135 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 136} 137 138static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev) 139{ 140 struct device_domain_info *info, *old_domain_info; 141 unsigned long flags; 142 143 spin_lock_irqsave(&device_domain_lock, flags); 144 /* 145 * Check here if the device is already attached to domain or not. 146 * If the device is already attached to a domain detach it. 147 */ 148 old_domain_info = dev_iommu_priv_get(dev); 149 if (old_domain_info && old_domain_info->domain != dma_domain) { 150 spin_unlock_irqrestore(&device_domain_lock, flags); 151 detach_device(dev, old_domain_info->domain); 152 spin_lock_irqsave(&device_domain_lock, flags); 153 } 154 155 info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC); 156 157 info->dev = dev; 158 info->liodn = liodn; 159 info->domain = dma_domain; 160 161 list_add(&info->link, &dma_domain->devices); 162 /* 163 * In case of devices with multiple LIODNs just store 164 * the info for the first LIODN as all 165 * LIODNs share the same domain 166 */ 167 if (!dev_iommu_priv_get(dev)) 168 dev_iommu_priv_set(dev, info); 169 spin_unlock_irqrestore(&device_domain_lock, flags); 170} 171 172static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain, 173 dma_addr_t iova) 174{ 175 if (iova < domain->geometry.aperture_start || 176 iova > domain->geometry.aperture_end) 177 return 0; 178 return iova; 179} 180 181static bool fsl_pamu_capable(enum iommu_cap cap) 182{ 183 return cap == IOMMU_CAP_CACHE_COHERENCY; 184} 185 186static void fsl_pamu_domain_free(struct iommu_domain *domain) 187{ 188 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); 189 190 /* remove all the devices from the device list */ 191 detach_device(NULL, dma_domain); 192 kmem_cache_free(fsl_pamu_domain_cache, dma_domain); 193} 194 195static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type) 196{ 197 struct fsl_dma_domain *dma_domain; 198 199 if (type != IOMMU_DOMAIN_UNMANAGED) 200 return NULL; 201 202 dma_domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL); 203 if (!dma_domain) 204 return NULL; 205 206 dma_domain->stash_id = ~(u32)0; 207 INIT_LIST_HEAD(&dma_domain->devices); 208 spin_lock_init(&dma_domain->domain_lock); 209 210 /* default geometry 64 GB i.e. maximum system address */ 211 dma_domain->iommu_domain. geometry.aperture_start = 0; 212 dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1; 213 dma_domain->iommu_domain.geometry.force_aperture = true; 214 215 return &dma_domain->iommu_domain; 216} 217 218/* Update stash destination for all LIODNs associated with the domain */ 219static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val) 220{ 221 struct device_domain_info *info; 222 int ret = 0; 223 224 list_for_each_entry(info, &dma_domain->devices, link) { 225 ret = update_liodn_stash(info->liodn, dma_domain, val); 226 if (ret) 227 break; 228 } 229 230 return ret; 231} 232 233static int fsl_pamu_attach_device(struct iommu_domain *domain, 234 struct device *dev) 235{ 236 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); 237 unsigned long flags; 238 int len, ret = 0, i; 239 const u32 *liodn; 240 struct pci_dev *pdev = NULL; 241 struct pci_controller *pci_ctl; 242 243 /* 244 * Use LIODN of the PCI controller while attaching a 245 * PCI device. 246 */ 247 if (dev_is_pci(dev)) { 248 pdev = to_pci_dev(dev); 249 pci_ctl = pci_bus_to_host(pdev->bus); 250 /* 251 * make dev point to pci controller device 252 * so we can get the LIODN programmed by 253 * u-boot. 254 */ 255 dev = pci_ctl->parent; 256 } 257 258 liodn = of_get_property(dev->of_node, "fsl,liodn", &len); 259 if (!liodn) { 260 pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node); 261 return -EINVAL; 262 } 263 264 spin_lock_irqsave(&dma_domain->domain_lock, flags); 265 for (i = 0; i < len / sizeof(u32); i++) { 266 /* Ensure that LIODN value is valid */ 267 if (liodn[i] >= PAACE_NUMBER_ENTRIES) { 268 pr_debug("Invalid liodn %d, attach device failed for %pOF\n", 269 liodn[i], dev->of_node); 270 ret = -EINVAL; 271 break; 272 } 273 274 attach_device(dma_domain, liodn[i], dev); 275 ret = pamu_set_liodn(dma_domain, dev, liodn[i]); 276 if (ret) 277 break; 278 ret = pamu_enable_liodn(liodn[i]); 279 if (ret) 280 break; 281 } 282 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 283 return ret; 284} 285 286static void fsl_pamu_detach_device(struct iommu_domain *domain, 287 struct device *dev) 288{ 289 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); 290 const u32 *prop; 291 int len; 292 struct pci_dev *pdev = NULL; 293 struct pci_controller *pci_ctl; 294 295 /* 296 * Use LIODN of the PCI controller while detaching a 297 * PCI device. 298 */ 299 if (dev_is_pci(dev)) { 300 pdev = to_pci_dev(dev); 301 pci_ctl = pci_bus_to_host(pdev->bus); 302 /* 303 * make dev point to pci controller device 304 * so we can get the LIODN programmed by 305 * u-boot. 306 */ 307 dev = pci_ctl->parent; 308 } 309 310 prop = of_get_property(dev->of_node, "fsl,liodn", &len); 311 if (prop) 312 detach_device(dev, dma_domain); 313 else 314 pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node); 315} 316 317/* Set the domain stash attribute */ 318int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu) 319{ 320 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); 321 unsigned long flags; 322 int ret; 323 324 spin_lock_irqsave(&dma_domain->domain_lock, flags); 325 dma_domain->stash_id = get_stash_id(PAMU_ATTR_CACHE_L1, cpu); 326 if (dma_domain->stash_id == ~(u32)0) { 327 pr_debug("Invalid stash attributes\n"); 328 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 329 return -EINVAL; 330 } 331 ret = update_domain_stash(dma_domain, dma_domain->stash_id); 332 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 333 334 return ret; 335} 336 337static struct iommu_group *get_device_iommu_group(struct device *dev) 338{ 339 struct iommu_group *group; 340 341 group = iommu_group_get(dev); 342 if (!group) 343 group = iommu_group_alloc(); 344 345 return group; 346} 347 348static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl) 349{ 350 u32 version; 351 352 /* Check the PCI controller version number by readding BRR1 register */ 353 version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2)); 354 version &= PCI_FSL_BRR1_VER; 355 /* If PCI controller version is >= 0x204 we can partition endpoints */ 356 return version >= 0x204; 357} 358 359/* Get iommu group information from peer devices or devices on the parent bus */ 360static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev) 361{ 362 struct pci_dev *tmp; 363 struct iommu_group *group; 364 struct pci_bus *bus = pdev->bus; 365 366 /* 367 * Traverese the pci bus device list to get 368 * the shared iommu group. 369 */ 370 while (bus) { 371 list_for_each_entry(tmp, &bus->devices, bus_list) { 372 if (tmp == pdev) 373 continue; 374 group = iommu_group_get(&tmp->dev); 375 if (group) 376 return group; 377 } 378 379 bus = bus->parent; 380 } 381 382 return NULL; 383} 384 385static struct iommu_group *get_pci_device_group(struct pci_dev *pdev) 386{ 387 struct pci_controller *pci_ctl; 388 bool pci_endpt_partitioning; 389 struct iommu_group *group = NULL; 390 391 pci_ctl = pci_bus_to_host(pdev->bus); 392 pci_endpt_partitioning = check_pci_ctl_endpt_part(pci_ctl); 393 /* We can partition PCIe devices so assign device group to the device */ 394 if (pci_endpt_partitioning) { 395 group = pci_device_group(&pdev->dev); 396 397 /* 398 * PCIe controller is not a paritionable entity 399 * free the controller device iommu_group. 400 */ 401 if (pci_ctl->parent->iommu_group) 402 iommu_group_remove_device(pci_ctl->parent); 403 } else { 404 /* 405 * All devices connected to the controller will share the 406 * PCI controllers device group. If this is the first 407 * device to be probed for the pci controller, copy the 408 * device group information from the PCI controller device 409 * node and remove the PCI controller iommu group. 410 * For subsequent devices, the iommu group information can 411 * be obtained from sibling devices (i.e. from the bus_devices 412 * link list). 413 */ 414 if (pci_ctl->parent->iommu_group) { 415 group = get_device_iommu_group(pci_ctl->parent); 416 iommu_group_remove_device(pci_ctl->parent); 417 } else { 418 group = get_shared_pci_device_group(pdev); 419 } 420 } 421 422 if (!group) 423 group = ERR_PTR(-ENODEV); 424 425 return group; 426} 427 428static struct iommu_group *fsl_pamu_device_group(struct device *dev) 429{ 430 struct iommu_group *group = ERR_PTR(-ENODEV); 431 int len; 432 433 /* 434 * For platform devices we allocate a separate group for 435 * each of the devices. 436 */ 437 if (dev_is_pci(dev)) 438 group = get_pci_device_group(to_pci_dev(dev)); 439 else if (of_get_property(dev->of_node, "fsl,liodn", &len)) 440 group = get_device_iommu_group(dev); 441 442 return group; 443} 444 445static struct iommu_device *fsl_pamu_probe_device(struct device *dev) 446{ 447 return &pamu_iommu; 448} 449 450static void fsl_pamu_release_device(struct device *dev) 451{ 452} 453 454static const struct iommu_ops fsl_pamu_ops = { 455 .capable = fsl_pamu_capable, 456 .domain_alloc = fsl_pamu_domain_alloc, 457 .probe_device = fsl_pamu_probe_device, 458 .release_device = fsl_pamu_release_device, 459 .device_group = fsl_pamu_device_group, 460 .default_domain_ops = &(const struct iommu_domain_ops) { 461 .attach_dev = fsl_pamu_attach_device, 462 .detach_dev = fsl_pamu_detach_device, 463 .iova_to_phys = fsl_pamu_iova_to_phys, 464 .free = fsl_pamu_domain_free, 465 } 466}; 467 468int __init pamu_domain_init(void) 469{ 470 int ret = 0; 471 472 ret = iommu_init_mempool(); 473 if (ret) 474 return ret; 475 476 ret = iommu_device_sysfs_add(&pamu_iommu, NULL, NULL, "iommu0"); 477 if (ret) 478 return ret; 479 480 ret = iommu_device_register(&pamu_iommu, &fsl_pamu_ops, NULL); 481 if (ret) { 482 iommu_device_sysfs_remove(&pamu_iommu); 483 pr_err("Can't register iommu device\n"); 484 return ret; 485 } 486 487 bus_set_iommu(&platform_bus_type, &fsl_pamu_ops); 488 bus_set_iommu(&pci_bus_type, &fsl_pamu_ops); 489 490 return ret; 491}