cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

arm-smmu-v3-sva.c (13624B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Implementation of the IOMMU SVA API for the ARM SMMUv3
      4 */
      5
      6#include <linux/mm.h>
      7#include <linux/mmu_context.h>
      8#include <linux/mmu_notifier.h>
      9#include <linux/sched/mm.h>
     10#include <linux/slab.h>
     11
     12#include "arm-smmu-v3.h"
     13#include "../../iommu-sva-lib.h"
     14#include "../../io-pgtable-arm.h"
     15
     16struct arm_smmu_mmu_notifier {
     17	struct mmu_notifier		mn;
     18	struct arm_smmu_ctx_desc	*cd;
     19	bool				cleared;
     20	refcount_t			refs;
     21	struct list_head		list;
     22	struct arm_smmu_domain		*domain;
     23};
     24
     25#define mn_to_smmu(mn) container_of(mn, struct arm_smmu_mmu_notifier, mn)
     26
     27struct arm_smmu_bond {
     28	struct iommu_sva		sva;
     29	struct mm_struct		*mm;
     30	struct arm_smmu_mmu_notifier	*smmu_mn;
     31	struct list_head		list;
     32	refcount_t			refs;
     33};
     34
     35#define sva_to_bond(handle) \
     36	container_of(handle, struct arm_smmu_bond, sva)
     37
     38static DEFINE_MUTEX(sva_lock);
     39
     40/*
     41 * Check if the CPU ASID is available on the SMMU side. If a private context
     42 * descriptor is using it, try to replace it.
     43 */
     44static struct arm_smmu_ctx_desc *
     45arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
     46{
     47	int ret;
     48	u32 new_asid;
     49	struct arm_smmu_ctx_desc *cd;
     50	struct arm_smmu_device *smmu;
     51	struct arm_smmu_domain *smmu_domain;
     52
     53	cd = xa_load(&arm_smmu_asid_xa, asid);
     54	if (!cd)
     55		return NULL;
     56
     57	if (cd->mm) {
     58		if (WARN_ON(cd->mm != mm))
     59			return ERR_PTR(-EINVAL);
     60		/* All devices bound to this mm use the same cd struct. */
     61		refcount_inc(&cd->refs);
     62		return cd;
     63	}
     64
     65	smmu_domain = container_of(cd, struct arm_smmu_domain, s1_cfg.cd);
     66	smmu = smmu_domain->smmu;
     67
     68	ret = xa_alloc(&arm_smmu_asid_xa, &new_asid, cd,
     69		       XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
     70	if (ret)
     71		return ERR_PTR(-ENOSPC);
     72	/*
     73	 * Race with unmap: TLB invalidations will start targeting the new ASID,
     74	 * which isn't assigned yet. We'll do an invalidate-all on the old ASID
     75	 * later, so it doesn't matter.
     76	 */
     77	cd->asid = new_asid;
     78	/*
     79	 * Update ASID and invalidate CD in all associated masters. There will
     80	 * be some overlap between use of both ASIDs, until we invalidate the
     81	 * TLB.
     82	 */
     83	arm_smmu_write_ctx_desc(smmu_domain, 0, cd);
     84
     85	/* Invalidate TLB entries previously associated with that context */
     86	arm_smmu_tlb_inv_asid(smmu, asid);
     87
     88	xa_erase(&arm_smmu_asid_xa, asid);
     89	return NULL;
     90}
     91
     92static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
     93{
     94	u16 asid;
     95	int err = 0;
     96	u64 tcr, par, reg;
     97	struct arm_smmu_ctx_desc *cd;
     98	struct arm_smmu_ctx_desc *ret = NULL;
     99
    100	/* Don't free the mm until we release the ASID */
    101	mmgrab(mm);
    102
    103	asid = arm64_mm_context_get(mm);
    104	if (!asid) {
    105		err = -ESRCH;
    106		goto out_drop_mm;
    107	}
    108
    109	cd = kzalloc(sizeof(*cd), GFP_KERNEL);
    110	if (!cd) {
    111		err = -ENOMEM;
    112		goto out_put_context;
    113	}
    114
    115	refcount_set(&cd->refs, 1);
    116
    117	mutex_lock(&arm_smmu_asid_lock);
    118	ret = arm_smmu_share_asid(mm, asid);
    119	if (ret) {
    120		mutex_unlock(&arm_smmu_asid_lock);
    121		goto out_free_cd;
    122	}
    123
    124	err = xa_insert(&arm_smmu_asid_xa, asid, cd, GFP_KERNEL);
    125	mutex_unlock(&arm_smmu_asid_lock);
    126
    127	if (err)
    128		goto out_free_asid;
    129
    130	tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, 64ULL - vabits_actual) |
    131	      FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, ARM_LPAE_TCR_RGN_WBWA) |
    132	      FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, ARM_LPAE_TCR_RGN_WBWA) |
    133	      FIELD_PREP(CTXDESC_CD_0_TCR_SH0, ARM_LPAE_TCR_SH_IS) |
    134	      CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64;
    135
    136	switch (PAGE_SIZE) {
    137	case SZ_4K:
    138		tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_4K);
    139		break;
    140	case SZ_16K:
    141		tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_16K);
    142		break;
    143	case SZ_64K:
    144		tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_64K);
    145		break;
    146	default:
    147		WARN_ON(1);
    148		err = -EINVAL;
    149		goto out_free_asid;
    150	}
    151
    152	reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
    153	par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT);
    154	tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par);
    155
    156	cd->ttbr = virt_to_phys(mm->pgd);
    157	cd->tcr = tcr;
    158	/*
    159	 * MAIR value is pretty much constant and global, so we can just get it
    160	 * from the current CPU register
    161	 */
    162	cd->mair = read_sysreg(mair_el1);
    163	cd->asid = asid;
    164	cd->mm = mm;
    165
    166	return cd;
    167
    168out_free_asid:
    169	arm_smmu_free_asid(cd);
    170out_free_cd:
    171	kfree(cd);
    172out_put_context:
    173	arm64_mm_context_put(mm);
    174out_drop_mm:
    175	mmdrop(mm);
    176	return err < 0 ? ERR_PTR(err) : ret;
    177}
    178
    179static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
    180{
    181	if (arm_smmu_free_asid(cd)) {
    182		/* Unpin ASID */
    183		arm64_mm_context_put(cd->mm);
    184		mmdrop(cd->mm);
    185		kfree(cd);
    186	}
    187}
    188
    189static void arm_smmu_mm_invalidate_range(struct mmu_notifier *mn,
    190					 struct mm_struct *mm,
    191					 unsigned long start, unsigned long end)
    192{
    193	struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
    194	struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
    195	size_t size;
    196
    197	/*
    198	 * The mm_types defines vm_end as the first byte after the end address,
    199	 * different from IOMMU subsystem using the last address of an address
    200	 * range. So do a simple translation here by calculating size correctly.
    201	 */
    202	size = end - start;
    203
    204	if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM))
    205		arm_smmu_tlb_inv_range_asid(start, size, smmu_mn->cd->asid,
    206					    PAGE_SIZE, false, smmu_domain);
    207	arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, start, size);
    208}
    209
    210static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
    211{
    212	struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
    213	struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
    214
    215	mutex_lock(&sva_lock);
    216	if (smmu_mn->cleared) {
    217		mutex_unlock(&sva_lock);
    218		return;
    219	}
    220
    221	/*
    222	 * DMA may still be running. Keep the cd valid to avoid C_BAD_CD events,
    223	 * but disable translation.
    224	 */
    225	arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, &quiet_cd);
    226
    227	arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid);
    228	arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0);
    229
    230	smmu_mn->cleared = true;
    231	mutex_unlock(&sva_lock);
    232}
    233
    234static void arm_smmu_mmu_notifier_free(struct mmu_notifier *mn)
    235{
    236	kfree(mn_to_smmu(mn));
    237}
    238
    239static const struct mmu_notifier_ops arm_smmu_mmu_notifier_ops = {
    240	.invalidate_range	= arm_smmu_mm_invalidate_range,
    241	.release		= arm_smmu_mm_release,
    242	.free_notifier		= arm_smmu_mmu_notifier_free,
    243};
    244
    245/* Allocate or get existing MMU notifier for this {domain, mm} pair */
    246static struct arm_smmu_mmu_notifier *
    247arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
    248			  struct mm_struct *mm)
    249{
    250	int ret;
    251	struct arm_smmu_ctx_desc *cd;
    252	struct arm_smmu_mmu_notifier *smmu_mn;
    253
    254	list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) {
    255		if (smmu_mn->mn.mm == mm) {
    256			refcount_inc(&smmu_mn->refs);
    257			return smmu_mn;
    258		}
    259	}
    260
    261	cd = arm_smmu_alloc_shared_cd(mm);
    262	if (IS_ERR(cd))
    263		return ERR_CAST(cd);
    264
    265	smmu_mn = kzalloc(sizeof(*smmu_mn), GFP_KERNEL);
    266	if (!smmu_mn) {
    267		ret = -ENOMEM;
    268		goto err_free_cd;
    269	}
    270
    271	refcount_set(&smmu_mn->refs, 1);
    272	smmu_mn->cd = cd;
    273	smmu_mn->domain = smmu_domain;
    274	smmu_mn->mn.ops = &arm_smmu_mmu_notifier_ops;
    275
    276	ret = mmu_notifier_register(&smmu_mn->mn, mm);
    277	if (ret) {
    278		kfree(smmu_mn);
    279		goto err_free_cd;
    280	}
    281
    282	ret = arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, cd);
    283	if (ret)
    284		goto err_put_notifier;
    285
    286	list_add(&smmu_mn->list, &smmu_domain->mmu_notifiers);
    287	return smmu_mn;
    288
    289err_put_notifier:
    290	/* Frees smmu_mn */
    291	mmu_notifier_put(&smmu_mn->mn);
    292err_free_cd:
    293	arm_smmu_free_shared_cd(cd);
    294	return ERR_PTR(ret);
    295}
    296
    297static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
    298{
    299	struct mm_struct *mm = smmu_mn->mn.mm;
    300	struct arm_smmu_ctx_desc *cd = smmu_mn->cd;
    301	struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
    302
    303	if (!refcount_dec_and_test(&smmu_mn->refs))
    304		return;
    305
    306	list_del(&smmu_mn->list);
    307	arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, NULL);
    308
    309	/*
    310	 * If we went through clear(), we've already invalidated, and no
    311	 * new TLB entry can have been formed.
    312	 */
    313	if (!smmu_mn->cleared) {
    314		arm_smmu_tlb_inv_asid(smmu_domain->smmu, cd->asid);
    315		arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0);
    316	}
    317
    318	/* Frees smmu_mn */
    319	mmu_notifier_put(&smmu_mn->mn);
    320	arm_smmu_free_shared_cd(cd);
    321}
    322
    323static struct iommu_sva *
    324__arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
    325{
    326	int ret;
    327	struct arm_smmu_bond *bond;
    328	struct arm_smmu_master *master = dev_iommu_priv_get(dev);
    329	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
    330	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
    331
    332	if (!master || !master->sva_enabled)
    333		return ERR_PTR(-ENODEV);
    334
    335	/* If bind() was already called for this {dev, mm} pair, reuse it. */
    336	list_for_each_entry(bond, &master->bonds, list) {
    337		if (bond->mm == mm) {
    338			refcount_inc(&bond->refs);
    339			return &bond->sva;
    340		}
    341	}
    342
    343	bond = kzalloc(sizeof(*bond), GFP_KERNEL);
    344	if (!bond)
    345		return ERR_PTR(-ENOMEM);
    346
    347	/* Allocate a PASID for this mm if necessary */
    348	ret = iommu_sva_alloc_pasid(mm, 1, (1U << master->ssid_bits) - 1);
    349	if (ret)
    350		goto err_free_bond;
    351
    352	bond->mm = mm;
    353	bond->sva.dev = dev;
    354	refcount_set(&bond->refs, 1);
    355
    356	bond->smmu_mn = arm_smmu_mmu_notifier_get(smmu_domain, mm);
    357	if (IS_ERR(bond->smmu_mn)) {
    358		ret = PTR_ERR(bond->smmu_mn);
    359		goto err_free_bond;
    360	}
    361
    362	list_add(&bond->list, &master->bonds);
    363	return &bond->sva;
    364
    365err_free_bond:
    366	kfree(bond);
    367	return ERR_PTR(ret);
    368}
    369
    370struct iommu_sva *
    371arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
    372{
    373	struct iommu_sva *handle;
    374	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
    375	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
    376
    377	if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1)
    378		return ERR_PTR(-EINVAL);
    379
    380	mutex_lock(&sva_lock);
    381	handle = __arm_smmu_sva_bind(dev, mm);
    382	mutex_unlock(&sva_lock);
    383	return handle;
    384}
    385
    386void arm_smmu_sva_unbind(struct iommu_sva *handle)
    387{
    388	struct arm_smmu_bond *bond = sva_to_bond(handle);
    389
    390	mutex_lock(&sva_lock);
    391	if (refcount_dec_and_test(&bond->refs)) {
    392		list_del(&bond->list);
    393		arm_smmu_mmu_notifier_put(bond->smmu_mn);
    394		kfree(bond);
    395	}
    396	mutex_unlock(&sva_lock);
    397}
    398
    399u32 arm_smmu_sva_get_pasid(struct iommu_sva *handle)
    400{
    401	struct arm_smmu_bond *bond = sva_to_bond(handle);
    402
    403	return bond->mm->pasid;
    404}
    405
    406bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
    407{
    408	unsigned long reg, fld;
    409	unsigned long oas;
    410	unsigned long asid_bits;
    411	u32 feat_mask = ARM_SMMU_FEAT_COHERENCY;
    412
    413	if (vabits_actual == 52)
    414		feat_mask |= ARM_SMMU_FEAT_VAX;
    415
    416	if ((smmu->features & feat_mask) != feat_mask)
    417		return false;
    418
    419	if (!(smmu->pgsize_bitmap & PAGE_SIZE))
    420		return false;
    421
    422	/*
    423	 * Get the smallest PA size of all CPUs (sanitized by cpufeature). We're
    424	 * not even pretending to support AArch32 here. Abort if the MMU outputs
    425	 * addresses larger than what we support.
    426	 */
    427	reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
    428	fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT);
    429	oas = id_aa64mmfr0_parange_to_phys_shift(fld);
    430	if (smmu->oas < oas)
    431		return false;
    432
    433	/* We can support bigger ASIDs than the CPU, but not smaller */
    434	fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_ASID_SHIFT);
    435	asid_bits = fld ? 16 : 8;
    436	if (smmu->asid_bits < asid_bits)
    437		return false;
    438
    439	/*
    440	 * See max_pinned_asids in arch/arm64/mm/context.c. The following is
    441	 * generally the maximum number of bindable processes.
    442	 */
    443	if (arm64_kernel_unmapped_at_el0())
    444		asid_bits--;
    445	dev_dbg(smmu->dev, "%d shared contexts\n", (1 << asid_bits) -
    446		num_possible_cpus() - 2);
    447
    448	return true;
    449}
    450
    451bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master)
    452{
    453	/* We're not keeping track of SIDs in fault events */
    454	if (master->num_streams != 1)
    455		return false;
    456
    457	return master->stall_enabled;
    458}
    459
    460bool arm_smmu_master_sva_supported(struct arm_smmu_master *master)
    461{
    462	if (!(master->smmu->features & ARM_SMMU_FEAT_SVA))
    463		return false;
    464
    465	/* SSID support is mandatory for the moment */
    466	return master->ssid_bits;
    467}
    468
    469bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
    470{
    471	bool enabled;
    472
    473	mutex_lock(&sva_lock);
    474	enabled = master->sva_enabled;
    475	mutex_unlock(&sva_lock);
    476	return enabled;
    477}
    478
    479static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master)
    480{
    481	int ret;
    482	struct device *dev = master->dev;
    483
    484	/*
    485	 * Drivers for devices supporting PRI or stall should enable IOPF first.
    486	 * Others have device-specific fault handlers and don't need IOPF.
    487	 */
    488	if (!arm_smmu_master_iopf_supported(master))
    489		return 0;
    490
    491	if (!master->iopf_enabled)
    492		return -EINVAL;
    493
    494	ret = iopf_queue_add_device(master->smmu->evtq.iopf, dev);
    495	if (ret)
    496		return ret;
    497
    498	ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
    499	if (ret) {
    500		iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
    501		return ret;
    502	}
    503	return 0;
    504}
    505
    506static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master)
    507{
    508	struct device *dev = master->dev;
    509
    510	if (!master->iopf_enabled)
    511		return;
    512
    513	iommu_unregister_device_fault_handler(dev);
    514	iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
    515}
    516
    517int arm_smmu_master_enable_sva(struct arm_smmu_master *master)
    518{
    519	int ret;
    520
    521	mutex_lock(&sva_lock);
    522	ret = arm_smmu_master_sva_enable_iopf(master);
    523	if (!ret)
    524		master->sva_enabled = true;
    525	mutex_unlock(&sva_lock);
    526
    527	return ret;
    528}
    529
    530int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
    531{
    532	mutex_lock(&sva_lock);
    533	if (!list_empty(&master->bonds)) {
    534		dev_err(master->dev, "cannot disable SVA, device is bound\n");
    535		mutex_unlock(&sva_lock);
    536		return -EBUSY;
    537	}
    538	arm_smmu_master_sva_disable_iopf(master);
    539	master->sva_enabled = false;
    540	mutex_unlock(&sva_lock);
    541
    542	return 0;
    543}
    544
    545void arm_smmu_sva_notifier_synchronize(void)
    546{
    547	/*
    548	 * Some MMU notifiers may still be waiting to be freed, using
    549	 * arm_smmu_mmu_notifier_free(). Wait for them.
    550	 */
    551	mmu_notifier_synchronize();
    552}