0001
0002
0003
0004
0005
0006 #include <linux/mm.h>
0007 #include <linux/mmu_context.h>
0008 #include <linux/mmu_notifier.h>
0009 #include <linux/sched/mm.h>
0010 #include <linux/slab.h>
0011
0012 #include "arm-smmu-v3.h"
0013 #include "../../iommu-sva-lib.h"
0014 #include "../../io-pgtable-arm.h"
0015
0016 struct arm_smmu_mmu_notifier {
0017 struct mmu_notifier mn;
0018 struct arm_smmu_ctx_desc *cd;
0019 bool cleared;
0020 refcount_t refs;
0021 struct list_head list;
0022 struct arm_smmu_domain *domain;
0023 };
0024
0025 #define mn_to_smmu(mn) container_of(mn, struct arm_smmu_mmu_notifier, mn)
0026
0027 struct arm_smmu_bond {
0028 struct iommu_sva sva;
0029 struct mm_struct *mm;
0030 struct arm_smmu_mmu_notifier *smmu_mn;
0031 struct list_head list;
0032 refcount_t refs;
0033 };
0034
0035 #define sva_to_bond(handle) \
0036 container_of(handle, struct arm_smmu_bond, sva)
0037
0038 static DEFINE_MUTEX(sva_lock);
0039
0040
0041
0042
0043
0044 static struct arm_smmu_ctx_desc *
0045 arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
0046 {
0047 int ret;
0048 u32 new_asid;
0049 struct arm_smmu_ctx_desc *cd;
0050 struct arm_smmu_device *smmu;
0051 struct arm_smmu_domain *smmu_domain;
0052
0053 cd = xa_load(&arm_smmu_asid_xa, asid);
0054 if (!cd)
0055 return NULL;
0056
0057 if (cd->mm) {
0058 if (WARN_ON(cd->mm != mm))
0059 return ERR_PTR(-EINVAL);
0060
0061 refcount_inc(&cd->refs);
0062 return cd;
0063 }
0064
0065 smmu_domain = container_of(cd, struct arm_smmu_domain, s1_cfg.cd);
0066 smmu = smmu_domain->smmu;
0067
0068 ret = xa_alloc(&arm_smmu_asid_xa, &new_asid, cd,
0069 XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
0070 if (ret)
0071 return ERR_PTR(-ENOSPC);
0072
0073
0074
0075
0076
0077 cd->asid = new_asid;
0078
0079
0080
0081
0082
0083 arm_smmu_write_ctx_desc(smmu_domain, 0, cd);
0084
0085
0086 arm_smmu_tlb_inv_asid(smmu, asid);
0087
0088 xa_erase(&arm_smmu_asid_xa, asid);
0089 return NULL;
0090 }
0091
0092 static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
0093 {
0094 u16 asid;
0095 int err = 0;
0096 u64 tcr, par, reg;
0097 struct arm_smmu_ctx_desc *cd;
0098 struct arm_smmu_ctx_desc *ret = NULL;
0099
0100
0101 mmgrab(mm);
0102
0103 asid = arm64_mm_context_get(mm);
0104 if (!asid) {
0105 err = -ESRCH;
0106 goto out_drop_mm;
0107 }
0108
0109 cd = kzalloc(sizeof(*cd), GFP_KERNEL);
0110 if (!cd) {
0111 err = -ENOMEM;
0112 goto out_put_context;
0113 }
0114
0115 refcount_set(&cd->refs, 1);
0116
0117 mutex_lock(&arm_smmu_asid_lock);
0118 ret = arm_smmu_share_asid(mm, asid);
0119 if (ret) {
0120 mutex_unlock(&arm_smmu_asid_lock);
0121 goto out_free_cd;
0122 }
0123
0124 err = xa_insert(&arm_smmu_asid_xa, asid, cd, GFP_KERNEL);
0125 mutex_unlock(&arm_smmu_asid_lock);
0126
0127 if (err)
0128 goto out_free_asid;
0129
0130 tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, 64ULL - vabits_actual) |
0131 FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, ARM_LPAE_TCR_RGN_WBWA) |
0132 FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, ARM_LPAE_TCR_RGN_WBWA) |
0133 FIELD_PREP(CTXDESC_CD_0_TCR_SH0, ARM_LPAE_TCR_SH_IS) |
0134 CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64;
0135
0136 switch (PAGE_SIZE) {
0137 case SZ_4K:
0138 tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_4K);
0139 break;
0140 case SZ_16K:
0141 tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_16K);
0142 break;
0143 case SZ_64K:
0144 tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_64K);
0145 break;
0146 default:
0147 WARN_ON(1);
0148 err = -EINVAL;
0149 goto out_free_asid;
0150 }
0151
0152 reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
0153 par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT);
0154 tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par);
0155
0156 cd->ttbr = virt_to_phys(mm->pgd);
0157 cd->tcr = tcr;
0158
0159
0160
0161
0162 cd->mair = read_sysreg(mair_el1);
0163 cd->asid = asid;
0164 cd->mm = mm;
0165
0166 return cd;
0167
0168 out_free_asid:
0169 arm_smmu_free_asid(cd);
0170 out_free_cd:
0171 kfree(cd);
0172 out_put_context:
0173 arm64_mm_context_put(mm);
0174 out_drop_mm:
0175 mmdrop(mm);
0176 return err < 0 ? ERR_PTR(err) : ret;
0177 }
0178
0179 static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
0180 {
0181 if (arm_smmu_free_asid(cd)) {
0182
0183 arm64_mm_context_put(cd->mm);
0184 mmdrop(cd->mm);
0185 kfree(cd);
0186 }
0187 }
0188
0189 static void arm_smmu_mm_invalidate_range(struct mmu_notifier *mn,
0190 struct mm_struct *mm,
0191 unsigned long start, unsigned long end)
0192 {
0193 struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
0194 struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
0195 size_t size;
0196
0197
0198
0199
0200
0201
0202 size = end - start;
0203
0204 if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM))
0205 arm_smmu_tlb_inv_range_asid(start, size, smmu_mn->cd->asid,
0206 PAGE_SIZE, false, smmu_domain);
0207 arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, start, size);
0208 }
0209
0210 static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
0211 {
0212 struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
0213 struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
0214
0215 mutex_lock(&sva_lock);
0216 if (smmu_mn->cleared) {
0217 mutex_unlock(&sva_lock);
0218 return;
0219 }
0220
0221
0222
0223
0224
0225 arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, &quiet_cd);
0226
0227 arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid);
0228 arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0);
0229
0230 smmu_mn->cleared = true;
0231 mutex_unlock(&sva_lock);
0232 }
0233
0234 static void arm_smmu_mmu_notifier_free(struct mmu_notifier *mn)
0235 {
0236 kfree(mn_to_smmu(mn));
0237 }
0238
0239 static const struct mmu_notifier_ops arm_smmu_mmu_notifier_ops = {
0240 .invalidate_range = arm_smmu_mm_invalidate_range,
0241 .release = arm_smmu_mm_release,
0242 .free_notifier = arm_smmu_mmu_notifier_free,
0243 };
0244
0245
0246 static struct arm_smmu_mmu_notifier *
0247 arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
0248 struct mm_struct *mm)
0249 {
0250 int ret;
0251 struct arm_smmu_ctx_desc *cd;
0252 struct arm_smmu_mmu_notifier *smmu_mn;
0253
0254 list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) {
0255 if (smmu_mn->mn.mm == mm) {
0256 refcount_inc(&smmu_mn->refs);
0257 return smmu_mn;
0258 }
0259 }
0260
0261 cd = arm_smmu_alloc_shared_cd(mm);
0262 if (IS_ERR(cd))
0263 return ERR_CAST(cd);
0264
0265 smmu_mn = kzalloc(sizeof(*smmu_mn), GFP_KERNEL);
0266 if (!smmu_mn) {
0267 ret = -ENOMEM;
0268 goto err_free_cd;
0269 }
0270
0271 refcount_set(&smmu_mn->refs, 1);
0272 smmu_mn->cd = cd;
0273 smmu_mn->domain = smmu_domain;
0274 smmu_mn->mn.ops = &arm_smmu_mmu_notifier_ops;
0275
0276 ret = mmu_notifier_register(&smmu_mn->mn, mm);
0277 if (ret) {
0278 kfree(smmu_mn);
0279 goto err_free_cd;
0280 }
0281
0282 ret = arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, cd);
0283 if (ret)
0284 goto err_put_notifier;
0285
0286 list_add(&smmu_mn->list, &smmu_domain->mmu_notifiers);
0287 return smmu_mn;
0288
0289 err_put_notifier:
0290
0291 mmu_notifier_put(&smmu_mn->mn);
0292 err_free_cd:
0293 arm_smmu_free_shared_cd(cd);
0294 return ERR_PTR(ret);
0295 }
0296
0297 static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
0298 {
0299 struct mm_struct *mm = smmu_mn->mn.mm;
0300 struct arm_smmu_ctx_desc *cd = smmu_mn->cd;
0301 struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
0302
0303 if (!refcount_dec_and_test(&smmu_mn->refs))
0304 return;
0305
0306 list_del(&smmu_mn->list);
0307 arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, NULL);
0308
0309
0310
0311
0312
0313 if (!smmu_mn->cleared) {
0314 arm_smmu_tlb_inv_asid(smmu_domain->smmu, cd->asid);
0315 arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0);
0316 }
0317
0318
0319 mmu_notifier_put(&smmu_mn->mn);
0320 arm_smmu_free_shared_cd(cd);
0321 }
0322
0323 static struct iommu_sva *
0324 __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
0325 {
0326 int ret;
0327 struct arm_smmu_bond *bond;
0328 struct arm_smmu_master *master = dev_iommu_priv_get(dev);
0329 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
0330 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
0331
0332 if (!master || !master->sva_enabled)
0333 return ERR_PTR(-ENODEV);
0334
0335
0336 list_for_each_entry(bond, &master->bonds, list) {
0337 if (bond->mm == mm) {
0338 refcount_inc(&bond->refs);
0339 return &bond->sva;
0340 }
0341 }
0342
0343 bond = kzalloc(sizeof(*bond), GFP_KERNEL);
0344 if (!bond)
0345 return ERR_PTR(-ENOMEM);
0346
0347
0348 ret = iommu_sva_alloc_pasid(mm, 1, (1U << master->ssid_bits) - 1);
0349 if (ret)
0350 goto err_free_bond;
0351
0352 bond->mm = mm;
0353 bond->sva.dev = dev;
0354 refcount_set(&bond->refs, 1);
0355
0356 bond->smmu_mn = arm_smmu_mmu_notifier_get(smmu_domain, mm);
0357 if (IS_ERR(bond->smmu_mn)) {
0358 ret = PTR_ERR(bond->smmu_mn);
0359 goto err_free_bond;
0360 }
0361
0362 list_add(&bond->list, &master->bonds);
0363 return &bond->sva;
0364
0365 err_free_bond:
0366 kfree(bond);
0367 return ERR_PTR(ret);
0368 }
0369
0370 struct iommu_sva *
0371 arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
0372 {
0373 struct iommu_sva *handle;
0374 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
0375 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
0376
0377 if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1)
0378 return ERR_PTR(-EINVAL);
0379
0380 mutex_lock(&sva_lock);
0381 handle = __arm_smmu_sva_bind(dev, mm);
0382 mutex_unlock(&sva_lock);
0383 return handle;
0384 }
0385
0386 void arm_smmu_sva_unbind(struct iommu_sva *handle)
0387 {
0388 struct arm_smmu_bond *bond = sva_to_bond(handle);
0389
0390 mutex_lock(&sva_lock);
0391 if (refcount_dec_and_test(&bond->refs)) {
0392 list_del(&bond->list);
0393 arm_smmu_mmu_notifier_put(bond->smmu_mn);
0394 kfree(bond);
0395 }
0396 mutex_unlock(&sva_lock);
0397 }
0398
0399 u32 arm_smmu_sva_get_pasid(struct iommu_sva *handle)
0400 {
0401 struct arm_smmu_bond *bond = sva_to_bond(handle);
0402
0403 return bond->mm->pasid;
0404 }
0405
0406 bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
0407 {
0408 unsigned long reg, fld;
0409 unsigned long oas;
0410 unsigned long asid_bits;
0411 u32 feat_mask = ARM_SMMU_FEAT_COHERENCY;
0412
0413 if (vabits_actual == 52)
0414 feat_mask |= ARM_SMMU_FEAT_VAX;
0415
0416 if ((smmu->features & feat_mask) != feat_mask)
0417 return false;
0418
0419 if (!(smmu->pgsize_bitmap & PAGE_SIZE))
0420 return false;
0421
0422
0423
0424
0425
0426
0427 reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
0428 fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT);
0429 oas = id_aa64mmfr0_parange_to_phys_shift(fld);
0430 if (smmu->oas < oas)
0431 return false;
0432
0433
0434 fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_ASID_SHIFT);
0435 asid_bits = fld ? 16 : 8;
0436 if (smmu->asid_bits < asid_bits)
0437 return false;
0438
0439
0440
0441
0442
0443 if (arm64_kernel_unmapped_at_el0())
0444 asid_bits--;
0445 dev_dbg(smmu->dev, "%d shared contexts\n", (1 << asid_bits) -
0446 num_possible_cpus() - 2);
0447
0448 return true;
0449 }
0450
0451 bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master)
0452 {
0453
0454 if (master->num_streams != 1)
0455 return false;
0456
0457 return master->stall_enabled;
0458 }
0459
0460 bool arm_smmu_master_sva_supported(struct arm_smmu_master *master)
0461 {
0462 if (!(master->smmu->features & ARM_SMMU_FEAT_SVA))
0463 return false;
0464
0465
0466 return master->ssid_bits;
0467 }
0468
0469 bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
0470 {
0471 bool enabled;
0472
0473 mutex_lock(&sva_lock);
0474 enabled = master->sva_enabled;
0475 mutex_unlock(&sva_lock);
0476 return enabled;
0477 }
0478
0479 static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master)
0480 {
0481 int ret;
0482 struct device *dev = master->dev;
0483
0484
0485
0486
0487
0488 if (!arm_smmu_master_iopf_supported(master))
0489 return 0;
0490
0491 if (!master->iopf_enabled)
0492 return -EINVAL;
0493
0494 ret = iopf_queue_add_device(master->smmu->evtq.iopf, dev);
0495 if (ret)
0496 return ret;
0497
0498 ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
0499 if (ret) {
0500 iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
0501 return ret;
0502 }
0503 return 0;
0504 }
0505
0506 static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master)
0507 {
0508 struct device *dev = master->dev;
0509
0510 if (!master->iopf_enabled)
0511 return;
0512
0513 iommu_unregister_device_fault_handler(dev);
0514 iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
0515 }
0516
0517 int arm_smmu_master_enable_sva(struct arm_smmu_master *master)
0518 {
0519 int ret;
0520
0521 mutex_lock(&sva_lock);
0522 ret = arm_smmu_master_sva_enable_iopf(master);
0523 if (!ret)
0524 master->sva_enabled = true;
0525 mutex_unlock(&sva_lock);
0526
0527 return ret;
0528 }
0529
0530 int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
0531 {
0532 mutex_lock(&sva_lock);
0533 if (!list_empty(&master->bonds)) {
0534 dev_err(master->dev, "cannot disable SVA, device is bound\n");
0535 mutex_unlock(&sva_lock);
0536 return -EBUSY;
0537 }
0538 arm_smmu_master_sva_disable_iopf(master);
0539 master->sva_enabled = false;
0540 mutex_unlock(&sva_lock);
0541
0542 return 0;
0543 }
0544
0545 void arm_smmu_sva_notifier_synchronize(void)
0546 {
0547
0548
0549
0550
0551 mmu_notifier_synchronize();
0552 }