0001
0002
0003
0004
0005
0006
0007
0008 #define pr_fmt(fmt) "AMD-Vi: " fmt
0009 #define dev_fmt(fmt) pr_fmt(fmt)
0010
0011 #include <linux/ratelimit.h>
0012 #include <linux/pci.h>
0013 #include <linux/acpi.h>
0014 #include <linux/amba/bus.h>
0015 #include <linux/platform_device.h>
0016 #include <linux/pci-ats.h>
0017 #include <linux/bitmap.h>
0018 #include <linux/slab.h>
0019 #include <linux/debugfs.h>
0020 #include <linux/scatterlist.h>
0021 #include <linux/dma-map-ops.h>
0022 #include <linux/dma-direct.h>
0023 #include <linux/dma-iommu.h>
0024 #include <linux/iommu-helper.h>
0025 #include <linux/delay.h>
0026 #include <linux/amd-iommu.h>
0027 #include <linux/notifier.h>
0028 #include <linux/export.h>
0029 #include <linux/irq.h>
0030 #include <linux/msi.h>
0031 #include <linux/irqdomain.h>
0032 #include <linux/percpu.h>
0033 #include <linux/io-pgtable.h>
0034 #include <linux/cc_platform.h>
0035 #include <asm/irq_remapping.h>
0036 #include <asm/io_apic.h>
0037 #include <asm/apic.h>
0038 #include <asm/hw_irq.h>
0039 #include <asm/proto.h>
0040 #include <asm/iommu.h>
0041 #include <asm/gart.h>
0042 #include <asm/dma.h>
0043
0044 #include "amd_iommu.h"
0045 #include "../irq_remapping.h"
0046
0047 #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
0048
0049 #define LOOP_TIMEOUT 100000
0050
0051
0052 #define IOVA_START_PFN (1)
0053 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
0054
0055
0056 #define MSI_RANGE_START (0xfee00000)
0057 #define MSI_RANGE_END (0xfeefffff)
0058 #define HT_RANGE_START (0xfd00000000ULL)
0059 #define HT_RANGE_END (0xffffffffffULL)
0060
0061 #define DEFAULT_PGTABLE_LEVEL PAGE_MODE_3_LEVEL
0062
0063 static DEFINE_SPINLOCK(pd_bitmap_lock);
0064
0065 LIST_HEAD(ioapic_map);
0066 LIST_HEAD(hpet_map);
0067 LIST_HEAD(acpihid_map);
0068
0069
0070
0071
0072
0073 const struct iommu_ops amd_iommu_ops;
0074
0075 static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
0076 int amd_iommu_max_glx_val = -1;
0077
0078
0079
0080
0081 struct iommu_cmd {
0082 u32 data[4];
0083 };
0084
0085 struct kmem_cache *amd_iommu_irq_cache;
0086
0087 static void detach_device(struct device *dev);
0088
0089
0090
0091
0092
0093
0094
0095 static inline int get_acpihid_device_id(struct device *dev,
0096 struct acpihid_map_entry **entry)
0097 {
0098 struct acpi_device *adev = ACPI_COMPANION(dev);
0099 struct acpihid_map_entry *p;
0100
0101 if (!adev)
0102 return -ENODEV;
0103
0104 list_for_each_entry(p, &acpihid_map, list) {
0105 if (acpi_dev_hid_uid_match(adev, p->hid,
0106 p->uid[0] ? p->uid : NULL)) {
0107 if (entry)
0108 *entry = p;
0109 return p->devid;
0110 }
0111 }
0112 return -EINVAL;
0113 }
0114
0115 static inline int get_device_sbdf_id(struct device *dev)
0116 {
0117 int sbdf;
0118
0119 if (dev_is_pci(dev))
0120 sbdf = get_pci_sbdf_id(to_pci_dev(dev));
0121 else
0122 sbdf = get_acpihid_device_id(dev, NULL);
0123
0124 return sbdf;
0125 }
0126
0127 struct dev_table_entry *get_dev_table(struct amd_iommu *iommu)
0128 {
0129 struct dev_table_entry *dev_table;
0130 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
0131
0132 BUG_ON(pci_seg == NULL);
0133 dev_table = pci_seg->dev_table;
0134 BUG_ON(dev_table == NULL);
0135
0136 return dev_table;
0137 }
0138
0139 static inline u16 get_device_segment(struct device *dev)
0140 {
0141 u16 seg;
0142
0143 if (dev_is_pci(dev)) {
0144 struct pci_dev *pdev = to_pci_dev(dev);
0145
0146 seg = pci_domain_nr(pdev->bus);
0147 } else {
0148 u32 devid = get_acpihid_device_id(dev, NULL);
0149
0150 seg = PCI_SBDF_TO_SEGID(devid);
0151 }
0152
0153 return seg;
0154 }
0155
0156
0157 void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid)
0158 {
0159 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
0160
0161 pci_seg->rlookup_table[devid] = iommu;
0162 }
0163
0164 static struct amd_iommu *__rlookup_amd_iommu(u16 seg, u16 devid)
0165 {
0166 struct amd_iommu_pci_seg *pci_seg;
0167
0168 for_each_pci_segment(pci_seg) {
0169 if (pci_seg->id == seg)
0170 return pci_seg->rlookup_table[devid];
0171 }
0172 return NULL;
0173 }
0174
0175 static struct amd_iommu *rlookup_amd_iommu(struct device *dev)
0176 {
0177 u16 seg = get_device_segment(dev);
0178 int devid = get_device_sbdf_id(dev);
0179
0180 if (devid < 0)
0181 return NULL;
0182 return __rlookup_amd_iommu(seg, PCI_SBDF_TO_DEVID(devid));
0183 }
0184
0185 static struct protection_domain *to_pdomain(struct iommu_domain *dom)
0186 {
0187 return container_of(dom, struct protection_domain, domain);
0188 }
0189
0190 static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid)
0191 {
0192 struct iommu_dev_data *dev_data;
0193 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
0194
0195 dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
0196 if (!dev_data)
0197 return NULL;
0198
0199 spin_lock_init(&dev_data->lock);
0200 dev_data->devid = devid;
0201 ratelimit_default_init(&dev_data->rs);
0202
0203 llist_add(&dev_data->dev_data_list, &pci_seg->dev_data_list);
0204 return dev_data;
0205 }
0206
0207 static struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid)
0208 {
0209 struct iommu_dev_data *dev_data;
0210 struct llist_node *node;
0211 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
0212
0213 if (llist_empty(&pci_seg->dev_data_list))
0214 return NULL;
0215
0216 node = pci_seg->dev_data_list.first;
0217 llist_for_each_entry(dev_data, node, dev_data_list) {
0218 if (dev_data->devid == devid)
0219 return dev_data;
0220 }
0221
0222 return NULL;
0223 }
0224
0225 static int clone_alias(struct pci_dev *pdev, u16 alias, void *data)
0226 {
0227 struct amd_iommu *iommu;
0228 struct dev_table_entry *dev_table;
0229 u16 devid = pci_dev_id(pdev);
0230
0231 if (devid == alias)
0232 return 0;
0233
0234 iommu = rlookup_amd_iommu(&pdev->dev);
0235 if (!iommu)
0236 return 0;
0237
0238 amd_iommu_set_rlookup_table(iommu, alias);
0239 dev_table = get_dev_table(iommu);
0240 memcpy(dev_table[alias].data,
0241 dev_table[devid].data,
0242 sizeof(dev_table[alias].data));
0243
0244 return 0;
0245 }
0246
0247 static void clone_aliases(struct amd_iommu *iommu, struct device *dev)
0248 {
0249 struct pci_dev *pdev;
0250
0251 if (!dev_is_pci(dev))
0252 return;
0253 pdev = to_pci_dev(dev);
0254
0255
0256
0257
0258
0259
0260 clone_alias(pdev, iommu->pci_seg->alias_table[pci_dev_id(pdev)], NULL);
0261
0262 pci_for_each_dma_alias(pdev, clone_alias, NULL);
0263 }
0264
0265 static void setup_aliases(struct amd_iommu *iommu, struct device *dev)
0266 {
0267 struct pci_dev *pdev = to_pci_dev(dev);
0268 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
0269 u16 ivrs_alias;
0270
0271
0272 if (!dev_is_pci(dev))
0273 return;
0274
0275
0276
0277
0278
0279 ivrs_alias = pci_seg->alias_table[pci_dev_id(pdev)];
0280 if (ivrs_alias != pci_dev_id(pdev) &&
0281 PCI_BUS_NUM(ivrs_alias) == pdev->bus->number)
0282 pci_add_dma_alias(pdev, ivrs_alias & 0xff, 1);
0283
0284 clone_aliases(iommu, dev);
0285 }
0286
0287 static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid)
0288 {
0289 struct iommu_dev_data *dev_data;
0290
0291 dev_data = search_dev_data(iommu, devid);
0292
0293 if (dev_data == NULL) {
0294 dev_data = alloc_dev_data(iommu, devid);
0295 if (!dev_data)
0296 return NULL;
0297
0298 if (translation_pre_enabled(iommu))
0299 dev_data->defer_attach = true;
0300 }
0301
0302 return dev_data;
0303 }
0304
0305
0306
0307
0308 static struct iommu_group *acpihid_device_group(struct device *dev)
0309 {
0310 struct acpihid_map_entry *p, *entry = NULL;
0311 int devid;
0312
0313 devid = get_acpihid_device_id(dev, &entry);
0314 if (devid < 0)
0315 return ERR_PTR(devid);
0316
0317 list_for_each_entry(p, &acpihid_map, list) {
0318 if ((devid == p->devid) && p->group)
0319 entry->group = p->group;
0320 }
0321
0322 if (!entry->group)
0323 entry->group = generic_device_group(dev);
0324 else
0325 iommu_group_ref_get(entry->group);
0326
0327 return entry->group;
0328 }
0329
0330 static bool pci_iommuv2_capable(struct pci_dev *pdev)
0331 {
0332 static const int caps[] = {
0333 PCI_EXT_CAP_ID_PRI,
0334 PCI_EXT_CAP_ID_PASID,
0335 };
0336 int i, pos;
0337
0338 if (!pci_ats_supported(pdev))
0339 return false;
0340
0341 for (i = 0; i < 2; ++i) {
0342 pos = pci_find_ext_capability(pdev, caps[i]);
0343 if (pos == 0)
0344 return false;
0345 }
0346
0347 return true;
0348 }
0349
0350
0351
0352
0353
0354 static bool check_device(struct device *dev)
0355 {
0356 struct amd_iommu_pci_seg *pci_seg;
0357 struct amd_iommu *iommu;
0358 int devid, sbdf;
0359
0360 if (!dev)
0361 return false;
0362
0363 sbdf = get_device_sbdf_id(dev);
0364 if (sbdf < 0)
0365 return false;
0366 devid = PCI_SBDF_TO_DEVID(sbdf);
0367
0368 iommu = rlookup_amd_iommu(dev);
0369 if (!iommu)
0370 return false;
0371
0372
0373 pci_seg = iommu->pci_seg;
0374 if (devid > pci_seg->last_bdf)
0375 return false;
0376
0377 return true;
0378 }
0379
0380 static int iommu_init_device(struct amd_iommu *iommu, struct device *dev)
0381 {
0382 struct iommu_dev_data *dev_data;
0383 int devid, sbdf;
0384
0385 if (dev_iommu_priv_get(dev))
0386 return 0;
0387
0388 sbdf = get_device_sbdf_id(dev);
0389 if (sbdf < 0)
0390 return sbdf;
0391
0392 devid = PCI_SBDF_TO_DEVID(sbdf);
0393 dev_data = find_dev_data(iommu, devid);
0394 if (!dev_data)
0395 return -ENOMEM;
0396
0397 dev_data->dev = dev;
0398 setup_aliases(iommu, dev);
0399
0400
0401
0402
0403
0404
0405
0406 if ((iommu_default_passthrough() || !amd_iommu_force_isolation) &&
0407 dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
0408 dev_data->iommu_v2 = iommu->is_iommu_v2;
0409 }
0410
0411 dev_iommu_priv_set(dev, dev_data);
0412
0413 return 0;
0414 }
0415
0416 static void iommu_ignore_device(struct amd_iommu *iommu, struct device *dev)
0417 {
0418 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
0419 struct dev_table_entry *dev_table = get_dev_table(iommu);
0420 int devid, sbdf;
0421
0422 sbdf = get_device_sbdf_id(dev);
0423 if (sbdf < 0)
0424 return;
0425
0426 devid = PCI_SBDF_TO_DEVID(sbdf);
0427 pci_seg->rlookup_table[devid] = NULL;
0428 memset(&dev_table[devid], 0, sizeof(struct dev_table_entry));
0429
0430 setup_aliases(iommu, dev);
0431 }
0432
0433 static void amd_iommu_uninit_device(struct device *dev)
0434 {
0435 struct iommu_dev_data *dev_data;
0436
0437 dev_data = dev_iommu_priv_get(dev);
0438 if (!dev_data)
0439 return;
0440
0441 if (dev_data->domain)
0442 detach_device(dev);
0443
0444 dev_iommu_priv_set(dev, NULL);
0445
0446
0447
0448
0449
0450 }
0451
0452
0453
0454
0455
0456
0457
0458 static void dump_dte_entry(struct amd_iommu *iommu, u16 devid)
0459 {
0460 int i;
0461 struct dev_table_entry *dev_table = get_dev_table(iommu);
0462
0463 for (i = 0; i < 4; ++i)
0464 pr_err("DTE[%d]: %016llx\n", i, dev_table[devid].data[i]);
0465 }
0466
0467 static void dump_command(unsigned long phys_addr)
0468 {
0469 struct iommu_cmd *cmd = iommu_phys_to_virt(phys_addr);
0470 int i;
0471
0472 for (i = 0; i < 4; ++i)
0473 pr_err("CMD[%d]: %08x\n", i, cmd->data[i]);
0474 }
0475
0476 static void amd_iommu_report_rmp_hw_error(struct amd_iommu *iommu, volatile u32 *event)
0477 {
0478 struct iommu_dev_data *dev_data = NULL;
0479 int devid, vmg_tag, flags;
0480 struct pci_dev *pdev;
0481 u64 spa;
0482
0483 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
0484 vmg_tag = (event[1]) & 0xFFFF;
0485 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
0486 spa = ((u64)event[3] << 32) | (event[2] & 0xFFFFFFF8);
0487
0488 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
0489 devid & 0xff);
0490 if (pdev)
0491 dev_data = dev_iommu_priv_get(&pdev->dev);
0492
0493 if (dev_data) {
0494 if (__ratelimit(&dev_data->rs)) {
0495 pci_err(pdev, "Event logged [RMP_HW_ERROR vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
0496 vmg_tag, spa, flags);
0497 }
0498 } else {
0499 pr_err_ratelimited("Event logged [RMP_HW_ERROR device=%04x:%02x:%02x.%x, vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
0500 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
0501 vmg_tag, spa, flags);
0502 }
0503
0504 if (pdev)
0505 pci_dev_put(pdev);
0506 }
0507
0508 static void amd_iommu_report_rmp_fault(struct amd_iommu *iommu, volatile u32 *event)
0509 {
0510 struct iommu_dev_data *dev_data = NULL;
0511 int devid, flags_rmp, vmg_tag, flags;
0512 struct pci_dev *pdev;
0513 u64 gpa;
0514
0515 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
0516 flags_rmp = (event[0] >> EVENT_FLAGS_SHIFT) & 0xFF;
0517 vmg_tag = (event[1]) & 0xFFFF;
0518 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
0519 gpa = ((u64)event[3] << 32) | event[2];
0520
0521 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
0522 devid & 0xff);
0523 if (pdev)
0524 dev_data = dev_iommu_priv_get(&pdev->dev);
0525
0526 if (dev_data) {
0527 if (__ratelimit(&dev_data->rs)) {
0528 pci_err(pdev, "Event logged [RMP_PAGE_FAULT vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
0529 vmg_tag, gpa, flags_rmp, flags);
0530 }
0531 } else {
0532 pr_err_ratelimited("Event logged [RMP_PAGE_FAULT device=%04x:%02x:%02x.%x, vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
0533 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
0534 vmg_tag, gpa, flags_rmp, flags);
0535 }
0536
0537 if (pdev)
0538 pci_dev_put(pdev);
0539 }
0540
0541 #define IS_IOMMU_MEM_TRANSACTION(flags) \
0542 (((flags) & EVENT_FLAG_I) == 0)
0543
0544 #define IS_WRITE_REQUEST(flags) \
0545 ((flags) & EVENT_FLAG_RW)
0546
0547 static void amd_iommu_report_page_fault(struct amd_iommu *iommu,
0548 u16 devid, u16 domain_id,
0549 u64 address, int flags)
0550 {
0551 struct iommu_dev_data *dev_data = NULL;
0552 struct pci_dev *pdev;
0553
0554 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
0555 devid & 0xff);
0556 if (pdev)
0557 dev_data = dev_iommu_priv_get(&pdev->dev);
0558
0559 if (dev_data) {
0560
0561
0562
0563
0564
0565 if (IS_IOMMU_MEM_TRANSACTION(flags)) {
0566 if (!report_iommu_fault(&dev_data->domain->domain,
0567 &pdev->dev, address,
0568 IS_WRITE_REQUEST(flags) ?
0569 IOMMU_FAULT_WRITE :
0570 IOMMU_FAULT_READ))
0571 goto out;
0572 }
0573
0574 if (__ratelimit(&dev_data->rs)) {
0575 pci_err(pdev, "Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%llx flags=0x%04x]\n",
0576 domain_id, address, flags);
0577 }
0578 } else {
0579 pr_err_ratelimited("Event logged [IO_PAGE_FAULT device=%04x:%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n",
0580 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
0581 domain_id, address, flags);
0582 }
0583
0584 out:
0585 if (pdev)
0586 pci_dev_put(pdev);
0587 }
0588
0589 static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
0590 {
0591 struct device *dev = iommu->iommu.dev;
0592 int type, devid, flags, tag;
0593 volatile u32 *event = __evt;
0594 int count = 0;
0595 u64 address;
0596 u32 pasid;
0597
0598 retry:
0599 type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
0600 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
0601 pasid = (event[0] & EVENT_DOMID_MASK_HI) |
0602 (event[1] & EVENT_DOMID_MASK_LO);
0603 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
0604 address = (u64)(((u64)event[3]) << 32) | event[2];
0605
0606 if (type == 0) {
0607
0608 if (++count == LOOP_TIMEOUT) {
0609 pr_err("No event written to event log\n");
0610 return;
0611 }
0612 udelay(1);
0613 goto retry;
0614 }
0615
0616 if (type == EVENT_TYPE_IO_FAULT) {
0617 amd_iommu_report_page_fault(iommu, devid, pasid, address, flags);
0618 return;
0619 }
0620
0621 switch (type) {
0622 case EVENT_TYPE_ILL_DEV:
0623 dev_err(dev, "Event logged [ILLEGAL_DEV_TABLE_ENTRY device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
0624 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
0625 pasid, address, flags);
0626 dump_dte_entry(iommu, devid);
0627 break;
0628 case EVENT_TYPE_DEV_TAB_ERR:
0629 dev_err(dev, "Event logged [DEV_TAB_HARDWARE_ERROR device=%04x:%02x:%02x.%x "
0630 "address=0x%llx flags=0x%04x]\n",
0631 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
0632 address, flags);
0633 break;
0634 case EVENT_TYPE_PAGE_TAB_ERR:
0635 dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%04x:%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n",
0636 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
0637 pasid, address, flags);
0638 break;
0639 case EVENT_TYPE_ILL_CMD:
0640 dev_err(dev, "Event logged [ILLEGAL_COMMAND_ERROR address=0x%llx]\n", address);
0641 dump_command(address);
0642 break;
0643 case EVENT_TYPE_CMD_HARD_ERR:
0644 dev_err(dev, "Event logged [COMMAND_HARDWARE_ERROR address=0x%llx flags=0x%04x]\n",
0645 address, flags);
0646 break;
0647 case EVENT_TYPE_IOTLB_INV_TO:
0648 dev_err(dev, "Event logged [IOTLB_INV_TIMEOUT device=%04x:%02x:%02x.%x address=0x%llx]\n",
0649 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
0650 address);
0651 break;
0652 case EVENT_TYPE_INV_DEV_REQ:
0653 dev_err(dev, "Event logged [INVALID_DEVICE_REQUEST device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
0654 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
0655 pasid, address, flags);
0656 break;
0657 case EVENT_TYPE_RMP_FAULT:
0658 amd_iommu_report_rmp_fault(iommu, event);
0659 break;
0660 case EVENT_TYPE_RMP_HW_ERR:
0661 amd_iommu_report_rmp_hw_error(iommu, event);
0662 break;
0663 case EVENT_TYPE_INV_PPR_REQ:
0664 pasid = PPR_PASID(*((u64 *)__evt));
0665 tag = event[1] & 0x03FF;
0666 dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
0667 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
0668 pasid, address, flags, tag);
0669 break;
0670 default:
0671 dev_err(dev, "Event logged [UNKNOWN event[0]=0x%08x event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n",
0672 event[0], event[1], event[2], event[3]);
0673 }
0674
0675 memset(__evt, 0, 4 * sizeof(u32));
0676 }
0677
0678 static void iommu_poll_events(struct amd_iommu *iommu)
0679 {
0680 u32 head, tail;
0681
0682 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
0683 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
0684
0685 while (head != tail) {
0686 iommu_print_event(iommu, iommu->evt_buf + head);
0687 head = (head + EVENT_ENTRY_SIZE) % EVT_BUFFER_SIZE;
0688 }
0689
0690 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
0691 }
0692
0693 static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
0694 {
0695 struct amd_iommu_fault fault;
0696
0697 if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
0698 pr_err_ratelimited("Unknown PPR request received\n");
0699 return;
0700 }
0701
0702 fault.address = raw[1];
0703 fault.pasid = PPR_PASID(raw[0]);
0704 fault.sbdf = PCI_SEG_DEVID_TO_SBDF(iommu->pci_seg->id, PPR_DEVID(raw[0]));
0705 fault.tag = PPR_TAG(raw[0]);
0706 fault.flags = PPR_FLAGS(raw[0]);
0707
0708 atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
0709 }
0710
0711 static void iommu_poll_ppr_log(struct amd_iommu *iommu)
0712 {
0713 u32 head, tail;
0714
0715 if (iommu->ppr_log == NULL)
0716 return;
0717
0718 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
0719 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
0720
0721 while (head != tail) {
0722 volatile u64 *raw;
0723 u64 entry[2];
0724 int i;
0725
0726 raw = (u64 *)(iommu->ppr_log + head);
0727
0728
0729
0730
0731
0732
0733 for (i = 0; i < LOOP_TIMEOUT; ++i) {
0734 if (PPR_REQ_TYPE(raw[0]) != 0)
0735 break;
0736 udelay(1);
0737 }
0738
0739
0740 entry[0] = raw[0];
0741 entry[1] = raw[1];
0742
0743
0744
0745
0746
0747 raw[0] = raw[1] = 0UL;
0748
0749
0750 head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
0751 writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
0752
0753
0754 iommu_handle_ppr_entry(iommu, entry);
0755
0756
0757 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
0758 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
0759 }
0760 }
0761
0762 #ifdef CONFIG_IRQ_REMAP
0763 static int (*iommu_ga_log_notifier)(u32);
0764
0765 int amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
0766 {
0767 iommu_ga_log_notifier = notifier;
0768
0769 return 0;
0770 }
0771 EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier);
0772
0773 static void iommu_poll_ga_log(struct amd_iommu *iommu)
0774 {
0775 u32 head, tail, cnt = 0;
0776
0777 if (iommu->ga_log == NULL)
0778 return;
0779
0780 head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
0781 tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
0782
0783 while (head != tail) {
0784 volatile u64 *raw;
0785 u64 log_entry;
0786
0787 raw = (u64 *)(iommu->ga_log + head);
0788 cnt++;
0789
0790
0791 log_entry = *raw;
0792
0793
0794 head = (head + GA_ENTRY_SIZE) % GA_LOG_SIZE;
0795 writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
0796
0797
0798 switch (GA_REQ_TYPE(log_entry)) {
0799 case GA_GUEST_NR:
0800 if (!iommu_ga_log_notifier)
0801 break;
0802
0803 pr_debug("%s: devid=%#x, ga_tag=%#x\n",
0804 __func__, GA_DEVID(log_entry),
0805 GA_TAG(log_entry));
0806
0807 if (iommu_ga_log_notifier(GA_TAG(log_entry)) != 0)
0808 pr_err("GA log notifier failed.\n");
0809 break;
0810 default:
0811 break;
0812 }
0813 }
0814 }
0815
0816 static void
0817 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu)
0818 {
0819 if (!irq_remapping_enabled || !dev_is_pci(dev) ||
0820 pci_dev_has_special_msi_domain(to_pci_dev(dev)))
0821 return;
0822
0823 dev_set_msi_domain(dev, iommu->msi_domain);
0824 }
0825
0826 #else
0827 static inline void
0828 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { }
0829 #endif
0830
0831 #define AMD_IOMMU_INT_MASK \
0832 (MMIO_STATUS_EVT_OVERFLOW_INT_MASK | \
0833 MMIO_STATUS_EVT_INT_MASK | \
0834 MMIO_STATUS_PPR_INT_MASK | \
0835 MMIO_STATUS_GALOG_INT_MASK)
0836
0837 irqreturn_t amd_iommu_int_thread(int irq, void *data)
0838 {
0839 struct amd_iommu *iommu = (struct amd_iommu *) data;
0840 u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
0841
0842 while (status & AMD_IOMMU_INT_MASK) {
0843
0844 writel(AMD_IOMMU_INT_MASK,
0845 iommu->mmio_base + MMIO_STATUS_OFFSET);
0846
0847 if (status & MMIO_STATUS_EVT_INT_MASK) {
0848 pr_devel("Processing IOMMU Event Log\n");
0849 iommu_poll_events(iommu);
0850 }
0851
0852 if (status & MMIO_STATUS_PPR_INT_MASK) {
0853 pr_devel("Processing IOMMU PPR Log\n");
0854 iommu_poll_ppr_log(iommu);
0855 }
0856
0857 #ifdef CONFIG_IRQ_REMAP
0858 if (status & MMIO_STATUS_GALOG_INT_MASK) {
0859 pr_devel("Processing IOMMU GA Log\n");
0860 iommu_poll_ga_log(iommu);
0861 }
0862 #endif
0863
0864 if (status & MMIO_STATUS_EVT_OVERFLOW_INT_MASK) {
0865 pr_info_ratelimited("IOMMU event log overflow\n");
0866 amd_iommu_restart_event_logging(iommu);
0867 }
0868
0869
0870
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881
0882 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
0883 }
0884 return IRQ_HANDLED;
0885 }
0886
0887 irqreturn_t amd_iommu_int_handler(int irq, void *data)
0888 {
0889 return IRQ_WAKE_THREAD;
0890 }
0891
0892
0893
0894
0895
0896
0897
0898 static int wait_on_sem(struct amd_iommu *iommu, u64 data)
0899 {
0900 int i = 0;
0901
0902 while (*iommu->cmd_sem != data && i < LOOP_TIMEOUT) {
0903 udelay(1);
0904 i += 1;
0905 }
0906
0907 if (i == LOOP_TIMEOUT) {
0908 pr_alert("Completion-Wait loop timed out\n");
0909 return -EIO;
0910 }
0911
0912 return 0;
0913 }
0914
0915 static void copy_cmd_to_buffer(struct amd_iommu *iommu,
0916 struct iommu_cmd *cmd)
0917 {
0918 u8 *target;
0919 u32 tail;
0920
0921
0922 tail = iommu->cmd_buf_tail;
0923 target = iommu->cmd_buf + tail;
0924 memcpy(target, cmd, sizeof(*cmd));
0925
0926 tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
0927 iommu->cmd_buf_tail = tail;
0928
0929
0930 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
0931 }
0932
0933 static void build_completion_wait(struct iommu_cmd *cmd,
0934 struct amd_iommu *iommu,
0935 u64 data)
0936 {
0937 u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem);
0938
0939 memset(cmd, 0, sizeof(*cmd));
0940 cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
0941 cmd->data[1] = upper_32_bits(paddr);
0942 cmd->data[2] = lower_32_bits(data);
0943 cmd->data[3] = upper_32_bits(data);
0944 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
0945 }
0946
0947 static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
0948 {
0949 memset(cmd, 0, sizeof(*cmd));
0950 cmd->data[0] = devid;
0951 CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);
0952 }
0953
0954
0955
0956
0957
0958 static inline u64 build_inv_address(u64 address, size_t size)
0959 {
0960 u64 pages, end, msb_diff;
0961
0962 pages = iommu_num_pages(address, size, PAGE_SIZE);
0963
0964 if (pages == 1)
0965 return address & PAGE_MASK;
0966
0967 end = address + size - 1;
0968
0969
0970
0971
0972
0973 msb_diff = fls64(end ^ address) - 1;
0974
0975
0976
0977
0978
0979 if (unlikely(msb_diff > 51)) {
0980 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
0981 } else {
0982
0983
0984
0985
0986 address |= (1ull << msb_diff) - 1;
0987 }
0988
0989
0990 address &= PAGE_MASK;
0991
0992
0993 return address | CMD_INV_IOMMU_PAGES_SIZE_MASK;
0994 }
0995
0996 static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
0997 size_t size, u16 domid, int pde)
0998 {
0999 u64 inv_address = build_inv_address(address, size);
1000
1001 memset(cmd, 0, sizeof(*cmd));
1002 cmd->data[1] |= domid;
1003 cmd->data[2] = lower_32_bits(inv_address);
1004 cmd->data[3] = upper_32_bits(inv_address);
1005 CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
1006 if (pde)
1007 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
1008 }
1009
1010 static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
1011 u64 address, size_t size)
1012 {
1013 u64 inv_address = build_inv_address(address, size);
1014
1015 memset(cmd, 0, sizeof(*cmd));
1016 cmd->data[0] = devid;
1017 cmd->data[0] |= (qdep & 0xff) << 24;
1018 cmd->data[1] = devid;
1019 cmd->data[2] = lower_32_bits(inv_address);
1020 cmd->data[3] = upper_32_bits(inv_address);
1021 CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
1022 }
1023
1024 static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, u32 pasid,
1025 u64 address, bool size)
1026 {
1027 memset(cmd, 0, sizeof(*cmd));
1028
1029 address &= ~(0xfffULL);
1030
1031 cmd->data[0] = pasid;
1032 cmd->data[1] = domid;
1033 cmd->data[2] = lower_32_bits(address);
1034 cmd->data[3] = upper_32_bits(address);
1035 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
1036 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
1037 if (size)
1038 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
1039 CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
1040 }
1041
1042 static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, u32 pasid,
1043 int qdep, u64 address, bool size)
1044 {
1045 memset(cmd, 0, sizeof(*cmd));
1046
1047 address &= ~(0xfffULL);
1048
1049 cmd->data[0] = devid;
1050 cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
1051 cmd->data[0] |= (qdep & 0xff) << 24;
1052 cmd->data[1] = devid;
1053 cmd->data[1] |= (pasid & 0xff) << 16;
1054 cmd->data[2] = lower_32_bits(address);
1055 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
1056 cmd->data[3] = upper_32_bits(address);
1057 if (size)
1058 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
1059 CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
1060 }
1061
1062 static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, u32 pasid,
1063 int status, int tag, bool gn)
1064 {
1065 memset(cmd, 0, sizeof(*cmd));
1066
1067 cmd->data[0] = devid;
1068 if (gn) {
1069 cmd->data[1] = pasid;
1070 cmd->data[2] = CMD_INV_IOMMU_PAGES_GN_MASK;
1071 }
1072 cmd->data[3] = tag & 0x1ff;
1073 cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT;
1074
1075 CMD_SET_TYPE(cmd, CMD_COMPLETE_PPR);
1076 }
1077
1078 static void build_inv_all(struct iommu_cmd *cmd)
1079 {
1080 memset(cmd, 0, sizeof(*cmd));
1081 CMD_SET_TYPE(cmd, CMD_INV_ALL);
1082 }
1083
1084 static void build_inv_irt(struct iommu_cmd *cmd, u16 devid)
1085 {
1086 memset(cmd, 0, sizeof(*cmd));
1087 cmd->data[0] = devid;
1088 CMD_SET_TYPE(cmd, CMD_INV_IRT);
1089 }
1090
1091
1092
1093
1094
1095 static int __iommu_queue_command_sync(struct amd_iommu *iommu,
1096 struct iommu_cmd *cmd,
1097 bool sync)
1098 {
1099 unsigned int count = 0;
1100 u32 left, next_tail;
1101
1102 next_tail = (iommu->cmd_buf_tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
1103 again:
1104 left = (iommu->cmd_buf_head - next_tail) % CMD_BUFFER_SIZE;
1105
1106 if (left <= 0x20) {
1107
1108 if (count++) {
1109 if (count == LOOP_TIMEOUT) {
1110 pr_err("Command buffer timeout\n");
1111 return -EIO;
1112 }
1113
1114 udelay(1);
1115 }
1116
1117
1118 iommu->cmd_buf_head = readl(iommu->mmio_base +
1119 MMIO_CMD_HEAD_OFFSET);
1120
1121 goto again;
1122 }
1123
1124 copy_cmd_to_buffer(iommu, cmd);
1125
1126
1127 iommu->need_sync = sync;
1128
1129 return 0;
1130 }
1131
1132 static int iommu_queue_command_sync(struct amd_iommu *iommu,
1133 struct iommu_cmd *cmd,
1134 bool sync)
1135 {
1136 unsigned long flags;
1137 int ret;
1138
1139 raw_spin_lock_irqsave(&iommu->lock, flags);
1140 ret = __iommu_queue_command_sync(iommu, cmd, sync);
1141 raw_spin_unlock_irqrestore(&iommu->lock, flags);
1142
1143 return ret;
1144 }
1145
1146 static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
1147 {
1148 return iommu_queue_command_sync(iommu, cmd, true);
1149 }
1150
1151
1152
1153
1154
1155 static int iommu_completion_wait(struct amd_iommu *iommu)
1156 {
1157 struct iommu_cmd cmd;
1158 unsigned long flags;
1159 int ret;
1160 u64 data;
1161
1162 if (!iommu->need_sync)
1163 return 0;
1164
1165 raw_spin_lock_irqsave(&iommu->lock, flags);
1166
1167 data = ++iommu->cmd_sem_val;
1168 build_completion_wait(&cmd, iommu, data);
1169
1170 ret = __iommu_queue_command_sync(iommu, &cmd, false);
1171 if (ret)
1172 goto out_unlock;
1173
1174 ret = wait_on_sem(iommu, data);
1175
1176 out_unlock:
1177 raw_spin_unlock_irqrestore(&iommu->lock, flags);
1178
1179 return ret;
1180 }
1181
1182 static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
1183 {
1184 struct iommu_cmd cmd;
1185
1186 build_inv_dte(&cmd, devid);
1187
1188 return iommu_queue_command(iommu, &cmd);
1189 }
1190
1191 static void amd_iommu_flush_dte_all(struct amd_iommu *iommu)
1192 {
1193 u32 devid;
1194 u16 last_bdf = iommu->pci_seg->last_bdf;
1195
1196 for (devid = 0; devid <= last_bdf; ++devid)
1197 iommu_flush_dte(iommu, devid);
1198
1199 iommu_completion_wait(iommu);
1200 }
1201
1202
1203
1204
1205
1206 static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
1207 {
1208 u32 dom_id;
1209 u16 last_bdf = iommu->pci_seg->last_bdf;
1210
1211 for (dom_id = 0; dom_id <= last_bdf; ++dom_id) {
1212 struct iommu_cmd cmd;
1213 build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
1214 dom_id, 1);
1215 iommu_queue_command(iommu, &cmd);
1216 }
1217
1218 iommu_completion_wait(iommu);
1219 }
1220
1221 static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
1222 {
1223 struct iommu_cmd cmd;
1224
1225 build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
1226 dom_id, 1);
1227 iommu_queue_command(iommu, &cmd);
1228
1229 iommu_completion_wait(iommu);
1230 }
1231
1232 static void amd_iommu_flush_all(struct amd_iommu *iommu)
1233 {
1234 struct iommu_cmd cmd;
1235
1236 build_inv_all(&cmd);
1237
1238 iommu_queue_command(iommu, &cmd);
1239 iommu_completion_wait(iommu);
1240 }
1241
1242 static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid)
1243 {
1244 struct iommu_cmd cmd;
1245
1246 build_inv_irt(&cmd, devid);
1247
1248 iommu_queue_command(iommu, &cmd);
1249 }
1250
1251 static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
1252 {
1253 u32 devid;
1254 u16 last_bdf = iommu->pci_seg->last_bdf;
1255
1256 for (devid = 0; devid <= last_bdf; devid++)
1257 iommu_flush_irt(iommu, devid);
1258
1259 iommu_completion_wait(iommu);
1260 }
1261
1262 void iommu_flush_all_caches(struct amd_iommu *iommu)
1263 {
1264 if (iommu_feature(iommu, FEATURE_IA)) {
1265 amd_iommu_flush_all(iommu);
1266 } else {
1267 amd_iommu_flush_dte_all(iommu);
1268 amd_iommu_flush_irt_all(iommu);
1269 amd_iommu_flush_tlb_all(iommu);
1270 }
1271 }
1272
1273
1274
1275
1276 static int device_flush_iotlb(struct iommu_dev_data *dev_data,
1277 u64 address, size_t size)
1278 {
1279 struct amd_iommu *iommu;
1280 struct iommu_cmd cmd;
1281 int qdep;
1282
1283 qdep = dev_data->ats.qdep;
1284 iommu = rlookup_amd_iommu(dev_data->dev);
1285 if (!iommu)
1286 return -EINVAL;
1287
1288 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
1289
1290 return iommu_queue_command(iommu, &cmd);
1291 }
1292
1293 static int device_flush_dte_alias(struct pci_dev *pdev, u16 alias, void *data)
1294 {
1295 struct amd_iommu *iommu = data;
1296
1297 return iommu_flush_dte(iommu, alias);
1298 }
1299
1300
1301
1302
1303 static int device_flush_dte(struct iommu_dev_data *dev_data)
1304 {
1305 struct amd_iommu *iommu;
1306 struct pci_dev *pdev = NULL;
1307 struct amd_iommu_pci_seg *pci_seg;
1308 u16 alias;
1309 int ret;
1310
1311 iommu = rlookup_amd_iommu(dev_data->dev);
1312 if (!iommu)
1313 return -EINVAL;
1314
1315 if (dev_is_pci(dev_data->dev))
1316 pdev = to_pci_dev(dev_data->dev);
1317
1318 if (pdev)
1319 ret = pci_for_each_dma_alias(pdev,
1320 device_flush_dte_alias, iommu);
1321 else
1322 ret = iommu_flush_dte(iommu, dev_data->devid);
1323 if (ret)
1324 return ret;
1325
1326 pci_seg = iommu->pci_seg;
1327 alias = pci_seg->alias_table[dev_data->devid];
1328 if (alias != dev_data->devid) {
1329 ret = iommu_flush_dte(iommu, alias);
1330 if (ret)
1331 return ret;
1332 }
1333
1334 if (dev_data->ats.enabled)
1335 ret = device_flush_iotlb(dev_data, 0, ~0UL);
1336
1337 return ret;
1338 }
1339
1340
1341
1342
1343
1344
1345 static void __domain_flush_pages(struct protection_domain *domain,
1346 u64 address, size_t size, int pde)
1347 {
1348 struct iommu_dev_data *dev_data;
1349 struct iommu_cmd cmd;
1350 int ret = 0, i;
1351
1352 build_inv_iommu_pages(&cmd, address, size, domain->id, pde);
1353
1354 for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
1355 if (!domain->dev_iommu[i])
1356 continue;
1357
1358
1359
1360
1361
1362 ret |= iommu_queue_command(amd_iommus[i], &cmd);
1363 }
1364
1365 list_for_each_entry(dev_data, &domain->dev_list, list) {
1366
1367 if (!dev_data->ats.enabled)
1368 continue;
1369
1370 ret |= device_flush_iotlb(dev_data, address, size);
1371 }
1372
1373 WARN_ON(ret);
1374 }
1375
1376 static void domain_flush_pages(struct protection_domain *domain,
1377 u64 address, size_t size, int pde)
1378 {
1379 if (likely(!amd_iommu_np_cache)) {
1380 __domain_flush_pages(domain, address, size, pde);
1381 return;
1382 }
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394 while (size != 0) {
1395 int addr_alignment = __ffs(address);
1396 int size_alignment = __fls(size);
1397 int min_alignment;
1398 size_t flush_size;
1399
1400
1401
1402
1403
1404
1405
1406 if (likely((unsigned long)address != 0))
1407 min_alignment = min(addr_alignment, size_alignment);
1408 else
1409 min_alignment = size_alignment;
1410
1411 flush_size = 1ul << min_alignment;
1412
1413 __domain_flush_pages(domain, address, flush_size, pde);
1414 address += flush_size;
1415 size -= flush_size;
1416 }
1417 }
1418
1419
1420 void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain)
1421 {
1422 domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
1423 }
1424
1425 void amd_iommu_domain_flush_complete(struct protection_domain *domain)
1426 {
1427 int i;
1428
1429 for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
1430 if (domain && !domain->dev_iommu[i])
1431 continue;
1432
1433
1434
1435
1436
1437 iommu_completion_wait(amd_iommus[i]);
1438 }
1439 }
1440
1441
1442 static void domain_flush_np_cache(struct protection_domain *domain,
1443 dma_addr_t iova, size_t size)
1444 {
1445 if (unlikely(amd_iommu_np_cache)) {
1446 unsigned long flags;
1447
1448 spin_lock_irqsave(&domain->lock, flags);
1449 domain_flush_pages(domain, iova, size, 1);
1450 amd_iommu_domain_flush_complete(domain);
1451 spin_unlock_irqrestore(&domain->lock, flags);
1452 }
1453 }
1454
1455
1456
1457
1458
1459 static void domain_flush_devices(struct protection_domain *domain)
1460 {
1461 struct iommu_dev_data *dev_data;
1462
1463 list_for_each_entry(dev_data, &domain->dev_list, list)
1464 device_flush_dte(dev_data);
1465 }
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477 static u16 domain_id_alloc(void)
1478 {
1479 int id;
1480
1481 spin_lock(&pd_bitmap_lock);
1482 id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
1483 BUG_ON(id == 0);
1484 if (id > 0 && id < MAX_DOMAIN_ID)
1485 __set_bit(id, amd_iommu_pd_alloc_bitmap);
1486 else
1487 id = 0;
1488 spin_unlock(&pd_bitmap_lock);
1489
1490 return id;
1491 }
1492
1493 static void domain_id_free(int id)
1494 {
1495 spin_lock(&pd_bitmap_lock);
1496 if (id > 0 && id < MAX_DOMAIN_ID)
1497 __clear_bit(id, amd_iommu_pd_alloc_bitmap);
1498 spin_unlock(&pd_bitmap_lock);
1499 }
1500
1501 static void free_gcr3_tbl_level1(u64 *tbl)
1502 {
1503 u64 *ptr;
1504 int i;
1505
1506 for (i = 0; i < 512; ++i) {
1507 if (!(tbl[i] & GCR3_VALID))
1508 continue;
1509
1510 ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
1511
1512 free_page((unsigned long)ptr);
1513 }
1514 }
1515
1516 static void free_gcr3_tbl_level2(u64 *tbl)
1517 {
1518 u64 *ptr;
1519 int i;
1520
1521 for (i = 0; i < 512; ++i) {
1522 if (!(tbl[i] & GCR3_VALID))
1523 continue;
1524
1525 ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
1526
1527 free_gcr3_tbl_level1(ptr);
1528 }
1529 }
1530
1531 static void free_gcr3_table(struct protection_domain *domain)
1532 {
1533 if (domain->glx == 2)
1534 free_gcr3_tbl_level2(domain->gcr3_tbl);
1535 else if (domain->glx == 1)
1536 free_gcr3_tbl_level1(domain->gcr3_tbl);
1537 else
1538 BUG_ON(domain->glx != 0);
1539
1540 free_page((unsigned long)domain->gcr3_tbl);
1541 }
1542
1543 static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
1544 struct protection_domain *domain, bool ats, bool ppr)
1545 {
1546 u64 pte_root = 0;
1547 u64 flags = 0;
1548 u32 old_domid;
1549 struct dev_table_entry *dev_table = get_dev_table(iommu);
1550
1551 if (domain->iop.mode != PAGE_MODE_NONE)
1552 pte_root = iommu_virt_to_phys(domain->iop.root);
1553
1554 pte_root |= (domain->iop.mode & DEV_ENTRY_MODE_MASK)
1555 << DEV_ENTRY_MODE_SHIFT;
1556
1557 pte_root |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V;
1558
1559
1560
1561
1562
1563 if (!amd_iommu_snp_en || (domain->id != 0))
1564 pte_root |= DTE_FLAG_TV;
1565
1566 flags = dev_table[devid].data[1];
1567
1568 if (ats)
1569 flags |= DTE_FLAG_IOTLB;
1570
1571 if (ppr) {
1572 if (iommu_feature(iommu, FEATURE_EPHSUP))
1573 pte_root |= 1ULL << DEV_ENTRY_PPR;
1574 }
1575
1576 if (domain->flags & PD_IOMMUV2_MASK) {
1577 u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl);
1578 u64 glx = domain->glx;
1579 u64 tmp;
1580
1581 pte_root |= DTE_FLAG_GV;
1582 pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT;
1583
1584
1585 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
1586 flags &= ~tmp;
1587
1588 tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
1589 flags &= ~tmp;
1590
1591
1592 tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A;
1593 pte_root |= tmp;
1594
1595 tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B;
1596 flags |= tmp;
1597
1598 tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
1599 flags |= tmp;
1600 }
1601
1602 flags &= ~DEV_DOMID_MASK;
1603 flags |= domain->id;
1604
1605 old_domid = dev_table[devid].data[1] & DEV_DOMID_MASK;
1606 dev_table[devid].data[1] = flags;
1607 dev_table[devid].data[0] = pte_root;
1608
1609
1610
1611
1612
1613
1614 if (old_domid) {
1615 amd_iommu_flush_tlb_domid(iommu, old_domid);
1616 }
1617 }
1618
1619 static void clear_dte_entry(struct amd_iommu *iommu, u16 devid)
1620 {
1621 struct dev_table_entry *dev_table = get_dev_table(iommu);
1622
1623
1624 dev_table[devid].data[0] = DTE_FLAG_V;
1625
1626 if (!amd_iommu_snp_en)
1627 dev_table[devid].data[0] |= DTE_FLAG_TV;
1628
1629 dev_table[devid].data[1] &= DTE_FLAG_MASK;
1630
1631 amd_iommu_apply_erratum_63(iommu, devid);
1632 }
1633
1634 static void do_attach(struct iommu_dev_data *dev_data,
1635 struct protection_domain *domain)
1636 {
1637 struct amd_iommu *iommu;
1638 bool ats;
1639
1640 iommu = rlookup_amd_iommu(dev_data->dev);
1641 if (!iommu)
1642 return;
1643 ats = dev_data->ats.enabled;
1644
1645
1646 dev_data->domain = domain;
1647 list_add(&dev_data->list, &domain->dev_list);
1648
1649
1650 domain->dev_iommu[iommu->index] += 1;
1651 domain->dev_cnt += 1;
1652
1653
1654 set_dte_entry(iommu, dev_data->devid, domain,
1655 ats, dev_data->iommu_v2);
1656 clone_aliases(iommu, dev_data->dev);
1657
1658 device_flush_dte(dev_data);
1659 }
1660
1661 static void do_detach(struct iommu_dev_data *dev_data)
1662 {
1663 struct protection_domain *domain = dev_data->domain;
1664 struct amd_iommu *iommu;
1665
1666 iommu = rlookup_amd_iommu(dev_data->dev);
1667 if (!iommu)
1668 return;
1669
1670
1671 dev_data->domain = NULL;
1672 list_del(&dev_data->list);
1673 clear_dte_entry(iommu, dev_data->devid);
1674 clone_aliases(iommu, dev_data->dev);
1675
1676
1677 device_flush_dte(dev_data);
1678
1679
1680 amd_iommu_domain_flush_tlb_pde(domain);
1681
1682
1683 amd_iommu_domain_flush_complete(domain);
1684
1685
1686 domain->dev_iommu[iommu->index] -= 1;
1687 domain->dev_cnt -= 1;
1688 }
1689
1690 static void pdev_iommuv2_disable(struct pci_dev *pdev)
1691 {
1692 pci_disable_ats(pdev);
1693 pci_disable_pri(pdev);
1694 pci_disable_pasid(pdev);
1695 }
1696
1697 static int pdev_iommuv2_enable(struct pci_dev *pdev)
1698 {
1699 int ret;
1700
1701
1702 ret = pci_enable_pasid(pdev, 0);
1703 if (ret)
1704 goto out_err;
1705
1706
1707 ret = pci_reset_pri(pdev);
1708 if (ret)
1709 goto out_err;
1710
1711
1712
1713 ret = pci_enable_pri(pdev, 32);
1714 if (ret)
1715 goto out_err;
1716
1717 ret = pci_enable_ats(pdev, PAGE_SHIFT);
1718 if (ret)
1719 goto out_err;
1720
1721 return 0;
1722
1723 out_err:
1724 pci_disable_pri(pdev);
1725 pci_disable_pasid(pdev);
1726
1727 return ret;
1728 }
1729
1730
1731
1732
1733
1734 static int attach_device(struct device *dev,
1735 struct protection_domain *domain)
1736 {
1737 struct iommu_dev_data *dev_data;
1738 struct pci_dev *pdev;
1739 unsigned long flags;
1740 int ret;
1741
1742 spin_lock_irqsave(&domain->lock, flags);
1743
1744 dev_data = dev_iommu_priv_get(dev);
1745
1746 spin_lock(&dev_data->lock);
1747
1748 ret = -EBUSY;
1749 if (dev_data->domain != NULL)
1750 goto out;
1751
1752 if (!dev_is_pci(dev))
1753 goto skip_ats_check;
1754
1755 pdev = to_pci_dev(dev);
1756 if (domain->flags & PD_IOMMUV2_MASK) {
1757 struct iommu_domain *def_domain = iommu_get_dma_domain(dev);
1758
1759 ret = -EINVAL;
1760 if (def_domain->type != IOMMU_DOMAIN_IDENTITY)
1761 goto out;
1762
1763 if (dev_data->iommu_v2) {
1764 if (pdev_iommuv2_enable(pdev) != 0)
1765 goto out;
1766
1767 dev_data->ats.enabled = true;
1768 dev_data->ats.qdep = pci_ats_queue_depth(pdev);
1769 dev_data->pri_tlp = pci_prg_resp_pasid_required(pdev);
1770 }
1771 } else if (amd_iommu_iotlb_sup &&
1772 pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
1773 dev_data->ats.enabled = true;
1774 dev_data->ats.qdep = pci_ats_queue_depth(pdev);
1775 }
1776
1777 skip_ats_check:
1778 ret = 0;
1779
1780 do_attach(dev_data, domain);
1781
1782
1783
1784
1785
1786
1787 amd_iommu_domain_flush_tlb_pde(domain);
1788
1789 amd_iommu_domain_flush_complete(domain);
1790
1791 out:
1792 spin_unlock(&dev_data->lock);
1793
1794 spin_unlock_irqrestore(&domain->lock, flags);
1795
1796 return ret;
1797 }
1798
1799
1800
1801
1802 static void detach_device(struct device *dev)
1803 {
1804 struct protection_domain *domain;
1805 struct iommu_dev_data *dev_data;
1806 unsigned long flags;
1807
1808 dev_data = dev_iommu_priv_get(dev);
1809 domain = dev_data->domain;
1810
1811 spin_lock_irqsave(&domain->lock, flags);
1812
1813 spin_lock(&dev_data->lock);
1814
1815
1816
1817
1818
1819
1820
1821 if (WARN_ON(!dev_data->domain))
1822 goto out;
1823
1824 do_detach(dev_data);
1825
1826 if (!dev_is_pci(dev))
1827 goto out;
1828
1829 if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
1830 pdev_iommuv2_disable(to_pci_dev(dev));
1831 else if (dev_data->ats.enabled)
1832 pci_disable_ats(to_pci_dev(dev));
1833
1834 dev_data->ats.enabled = false;
1835
1836 out:
1837 spin_unlock(&dev_data->lock);
1838
1839 spin_unlock_irqrestore(&domain->lock, flags);
1840 }
1841
1842 static struct iommu_device *amd_iommu_probe_device(struct device *dev)
1843 {
1844 struct iommu_device *iommu_dev;
1845 struct amd_iommu *iommu;
1846 int ret;
1847
1848 if (!check_device(dev))
1849 return ERR_PTR(-ENODEV);
1850
1851 iommu = rlookup_amd_iommu(dev);
1852 if (!iommu)
1853 return ERR_PTR(-ENODEV);
1854
1855 if (dev_iommu_priv_get(dev))
1856 return &iommu->iommu;
1857
1858 ret = iommu_init_device(iommu, dev);
1859 if (ret) {
1860 if (ret != -ENOTSUPP)
1861 dev_err(dev, "Failed to initialize - trying to proceed anyway\n");
1862 iommu_dev = ERR_PTR(ret);
1863 iommu_ignore_device(iommu, dev);
1864 } else {
1865 amd_iommu_set_pci_msi_domain(dev, iommu);
1866 iommu_dev = &iommu->iommu;
1867 }
1868
1869 iommu_completion_wait(iommu);
1870
1871 return iommu_dev;
1872 }
1873
1874 static void amd_iommu_probe_finalize(struct device *dev)
1875 {
1876
1877 set_dma_ops(dev, NULL);
1878 iommu_setup_dma_ops(dev, 0, U64_MAX);
1879 }
1880
1881 static void amd_iommu_release_device(struct device *dev)
1882 {
1883 struct amd_iommu *iommu;
1884
1885 if (!check_device(dev))
1886 return;
1887
1888 iommu = rlookup_amd_iommu(dev);
1889 if (!iommu)
1890 return;
1891
1892 amd_iommu_uninit_device(dev);
1893 iommu_completion_wait(iommu);
1894 }
1895
1896 static struct iommu_group *amd_iommu_device_group(struct device *dev)
1897 {
1898 if (dev_is_pci(dev))
1899 return pci_device_group(dev);
1900
1901 return acpihid_device_group(dev);
1902 }
1903
1904
1905
1906
1907
1908
1909
1910 static void update_device_table(struct protection_domain *domain)
1911 {
1912 struct iommu_dev_data *dev_data;
1913
1914 list_for_each_entry(dev_data, &domain->dev_list, list) {
1915 struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev);
1916
1917 if (!iommu)
1918 continue;
1919 set_dte_entry(iommu, dev_data->devid, domain,
1920 dev_data->ats.enabled, dev_data->iommu_v2);
1921 clone_aliases(iommu, dev_data->dev);
1922 }
1923 }
1924
1925 void amd_iommu_update_and_flush_device_table(struct protection_domain *domain)
1926 {
1927 update_device_table(domain);
1928 domain_flush_devices(domain);
1929 }
1930
1931 void amd_iommu_domain_update(struct protection_domain *domain)
1932 {
1933
1934 amd_iommu_update_and_flush_device_table(domain);
1935
1936
1937 amd_iommu_domain_flush_tlb_pde(domain);
1938 amd_iommu_domain_flush_complete(domain);
1939 }
1940
1941 int __init amd_iommu_init_api(void)
1942 {
1943 int err;
1944
1945 err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
1946 if (err)
1947 return err;
1948 #ifdef CONFIG_ARM_AMBA
1949 err = bus_set_iommu(&amba_bustype, &amd_iommu_ops);
1950 if (err)
1951 return err;
1952 #endif
1953 err = bus_set_iommu(&platform_bus_type, &amd_iommu_ops);
1954 if (err)
1955 return err;
1956
1957 return 0;
1958 }
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970 static void cleanup_domain(struct protection_domain *domain)
1971 {
1972 struct iommu_dev_data *entry;
1973 unsigned long flags;
1974
1975 spin_lock_irqsave(&domain->lock, flags);
1976
1977 while (!list_empty(&domain->dev_list)) {
1978 entry = list_first_entry(&domain->dev_list,
1979 struct iommu_dev_data, list);
1980 BUG_ON(!entry->domain);
1981 do_detach(entry);
1982 }
1983
1984 spin_unlock_irqrestore(&domain->lock, flags);
1985 }
1986
1987 static void protection_domain_free(struct protection_domain *domain)
1988 {
1989 if (!domain)
1990 return;
1991
1992 if (domain->id)
1993 domain_id_free(domain->id);
1994
1995 if (domain->iop.pgtbl_cfg.tlb)
1996 free_io_pgtable_ops(&domain->iop.iop.ops);
1997
1998 kfree(domain);
1999 }
2000
2001 static int protection_domain_init_v1(struct protection_domain *domain, int mode)
2002 {
2003 u64 *pt_root = NULL;
2004
2005 BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL);
2006
2007 spin_lock_init(&domain->lock);
2008 domain->id = domain_id_alloc();
2009 if (!domain->id)
2010 return -ENOMEM;
2011 INIT_LIST_HEAD(&domain->dev_list);
2012
2013 if (mode != PAGE_MODE_NONE) {
2014 pt_root = (void *)get_zeroed_page(GFP_KERNEL);
2015 if (!pt_root)
2016 return -ENOMEM;
2017 }
2018
2019 amd_iommu_domain_set_pgtable(domain, pt_root, mode);
2020
2021 return 0;
2022 }
2023
2024 static struct protection_domain *protection_domain_alloc(unsigned int type)
2025 {
2026 struct io_pgtable_ops *pgtbl_ops;
2027 struct protection_domain *domain;
2028 int pgtable = amd_iommu_pgtable;
2029 int mode = DEFAULT_PGTABLE_LEVEL;
2030 int ret;
2031
2032 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
2033 if (!domain)
2034 return NULL;
2035
2036
2037
2038
2039
2040 if (type == IOMMU_DOMAIN_IDENTITY) {
2041 pgtable = AMD_IOMMU_V1;
2042 mode = PAGE_MODE_NONE;
2043 } else if (type == IOMMU_DOMAIN_UNMANAGED) {
2044 pgtable = AMD_IOMMU_V1;
2045 }
2046
2047 switch (pgtable) {
2048 case AMD_IOMMU_V1:
2049 ret = protection_domain_init_v1(domain, mode);
2050 break;
2051 default:
2052 ret = -EINVAL;
2053 }
2054
2055 if (ret)
2056 goto out_err;
2057
2058 pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl_cfg, domain);
2059 if (!pgtbl_ops)
2060 goto out_err;
2061
2062 return domain;
2063 out_err:
2064 kfree(domain);
2065 return NULL;
2066 }
2067
2068 static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
2069 {
2070 struct protection_domain *domain;
2071
2072
2073
2074
2075
2076 if (amd_iommu_snp_en && (type == IOMMU_DOMAIN_IDENTITY))
2077 return NULL;
2078
2079 domain = protection_domain_alloc(type);
2080 if (!domain)
2081 return NULL;
2082
2083 domain->domain.geometry.aperture_start = 0;
2084 domain->domain.geometry.aperture_end = ~0ULL;
2085 domain->domain.geometry.force_aperture = true;
2086
2087 return &domain->domain;
2088 }
2089
2090 static void amd_iommu_domain_free(struct iommu_domain *dom)
2091 {
2092 struct protection_domain *domain;
2093
2094 domain = to_pdomain(dom);
2095
2096 if (domain->dev_cnt > 0)
2097 cleanup_domain(domain);
2098
2099 BUG_ON(domain->dev_cnt != 0);
2100
2101 if (!dom)
2102 return;
2103
2104 if (domain->flags & PD_IOMMUV2_MASK)
2105 free_gcr3_table(domain);
2106
2107 protection_domain_free(domain);
2108 }
2109
2110 static void amd_iommu_detach_device(struct iommu_domain *dom,
2111 struct device *dev)
2112 {
2113 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
2114 struct amd_iommu *iommu;
2115
2116 if (!check_device(dev))
2117 return;
2118
2119 if (dev_data->domain != NULL)
2120 detach_device(dev);
2121
2122 iommu = rlookup_amd_iommu(dev);
2123 if (!iommu)
2124 return;
2125
2126 #ifdef CONFIG_IRQ_REMAP
2127 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
2128 (dom->type == IOMMU_DOMAIN_UNMANAGED))
2129 dev_data->use_vapic = 0;
2130 #endif
2131
2132 iommu_completion_wait(iommu);
2133 }
2134
2135 static int amd_iommu_attach_device(struct iommu_domain *dom,
2136 struct device *dev)
2137 {
2138 struct protection_domain *domain = to_pdomain(dom);
2139 struct iommu_dev_data *dev_data;
2140 struct amd_iommu *iommu;
2141 int ret;
2142
2143 if (!check_device(dev))
2144 return -EINVAL;
2145
2146 dev_data = dev_iommu_priv_get(dev);
2147 dev_data->defer_attach = false;
2148
2149 iommu = rlookup_amd_iommu(dev);
2150 if (!iommu)
2151 return -EINVAL;
2152
2153 if (dev_data->domain)
2154 detach_device(dev);
2155
2156 ret = attach_device(dev, domain);
2157
2158 #ifdef CONFIG_IRQ_REMAP
2159 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
2160 if (dom->type == IOMMU_DOMAIN_UNMANAGED)
2161 dev_data->use_vapic = 1;
2162 else
2163 dev_data->use_vapic = 0;
2164 }
2165 #endif
2166
2167 iommu_completion_wait(iommu);
2168
2169 return ret;
2170 }
2171
2172 static void amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
2173 unsigned long iova, size_t size)
2174 {
2175 struct protection_domain *domain = to_pdomain(dom);
2176 struct io_pgtable_ops *ops = &domain->iop.iop.ops;
2177
2178 if (ops->map)
2179 domain_flush_np_cache(domain, iova, size);
2180 }
2181
2182 static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
2183 phys_addr_t paddr, size_t page_size, int iommu_prot,
2184 gfp_t gfp)
2185 {
2186 struct protection_domain *domain = to_pdomain(dom);
2187 struct io_pgtable_ops *ops = &domain->iop.iop.ops;
2188 int prot = 0;
2189 int ret = -EINVAL;
2190
2191 if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
2192 (domain->iop.mode == PAGE_MODE_NONE))
2193 return -EINVAL;
2194
2195 if (iommu_prot & IOMMU_READ)
2196 prot |= IOMMU_PROT_IR;
2197 if (iommu_prot & IOMMU_WRITE)
2198 prot |= IOMMU_PROT_IW;
2199
2200 if (ops->map)
2201 ret = ops->map(ops, iova, paddr, page_size, prot, gfp);
2202
2203 return ret;
2204 }
2205
2206 static void amd_iommu_iotlb_gather_add_page(struct iommu_domain *domain,
2207 struct iommu_iotlb_gather *gather,
2208 unsigned long iova, size_t size)
2209 {
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220 if (amd_iommu_np_cache &&
2221 iommu_iotlb_gather_is_disjoint(gather, iova, size))
2222 iommu_iotlb_sync(domain, gather);
2223
2224 iommu_iotlb_gather_add_range(gather, iova, size);
2225 }
2226
2227 static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
2228 size_t page_size,
2229 struct iommu_iotlb_gather *gather)
2230 {
2231 struct protection_domain *domain = to_pdomain(dom);
2232 struct io_pgtable_ops *ops = &domain->iop.iop.ops;
2233 size_t r;
2234
2235 if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
2236 (domain->iop.mode == PAGE_MODE_NONE))
2237 return 0;
2238
2239 r = (ops->unmap) ? ops->unmap(ops, iova, page_size, gather) : 0;
2240
2241 amd_iommu_iotlb_gather_add_page(dom, gather, iova, page_size);
2242
2243 return r;
2244 }
2245
2246 static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
2247 dma_addr_t iova)
2248 {
2249 struct protection_domain *domain = to_pdomain(dom);
2250 struct io_pgtable_ops *ops = &domain->iop.iop.ops;
2251
2252 return ops->iova_to_phys(ops, iova);
2253 }
2254
2255 static bool amd_iommu_capable(enum iommu_cap cap)
2256 {
2257 switch (cap) {
2258 case IOMMU_CAP_CACHE_COHERENCY:
2259 return true;
2260 case IOMMU_CAP_INTR_REMAP:
2261 return (irq_remapping_enabled == 1);
2262 case IOMMU_CAP_NOEXEC:
2263 return false;
2264 case IOMMU_CAP_PRE_BOOT_PROTECTION:
2265 return amdr_ivrs_remap_support;
2266 default:
2267 break;
2268 }
2269
2270 return false;
2271 }
2272
2273 static void amd_iommu_get_resv_regions(struct device *dev,
2274 struct list_head *head)
2275 {
2276 struct iommu_resv_region *region;
2277 struct unity_map_entry *entry;
2278 struct amd_iommu *iommu;
2279 struct amd_iommu_pci_seg *pci_seg;
2280 int devid, sbdf;
2281
2282 sbdf = get_device_sbdf_id(dev);
2283 if (sbdf < 0)
2284 return;
2285
2286 devid = PCI_SBDF_TO_DEVID(sbdf);
2287 iommu = rlookup_amd_iommu(dev);
2288 if (!iommu)
2289 return;
2290 pci_seg = iommu->pci_seg;
2291
2292 list_for_each_entry(entry, &pci_seg->unity_map, list) {
2293 int type, prot = 0;
2294 size_t length;
2295
2296 if (devid < entry->devid_start || devid > entry->devid_end)
2297 continue;
2298
2299 type = IOMMU_RESV_DIRECT;
2300 length = entry->address_end - entry->address_start;
2301 if (entry->prot & IOMMU_PROT_IR)
2302 prot |= IOMMU_READ;
2303 if (entry->prot & IOMMU_PROT_IW)
2304 prot |= IOMMU_WRITE;
2305 if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE)
2306
2307 type = IOMMU_RESV_RESERVED;
2308
2309 region = iommu_alloc_resv_region(entry->address_start,
2310 length, prot, type);
2311 if (!region) {
2312 dev_err(dev, "Out of memory allocating dm-regions\n");
2313 return;
2314 }
2315 list_add_tail(®ion->list, head);
2316 }
2317
2318 region = iommu_alloc_resv_region(MSI_RANGE_START,
2319 MSI_RANGE_END - MSI_RANGE_START + 1,
2320 0, IOMMU_RESV_MSI);
2321 if (!region)
2322 return;
2323 list_add_tail(®ion->list, head);
2324
2325 region = iommu_alloc_resv_region(HT_RANGE_START,
2326 HT_RANGE_END - HT_RANGE_START + 1,
2327 0, IOMMU_RESV_RESERVED);
2328 if (!region)
2329 return;
2330 list_add_tail(®ion->list, head);
2331 }
2332
2333 bool amd_iommu_is_attach_deferred(struct device *dev)
2334 {
2335 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
2336
2337 return dev_data->defer_attach;
2338 }
2339 EXPORT_SYMBOL_GPL(amd_iommu_is_attach_deferred);
2340
2341 static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
2342 {
2343 struct protection_domain *dom = to_pdomain(domain);
2344 unsigned long flags;
2345
2346 spin_lock_irqsave(&dom->lock, flags);
2347 amd_iommu_domain_flush_tlb_pde(dom);
2348 amd_iommu_domain_flush_complete(dom);
2349 spin_unlock_irqrestore(&dom->lock, flags);
2350 }
2351
2352 static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
2353 struct iommu_iotlb_gather *gather)
2354 {
2355 struct protection_domain *dom = to_pdomain(domain);
2356 unsigned long flags;
2357
2358 spin_lock_irqsave(&dom->lock, flags);
2359 domain_flush_pages(dom, gather->start, gather->end - gather->start, 1);
2360 amd_iommu_domain_flush_complete(dom);
2361 spin_unlock_irqrestore(&dom->lock, flags);
2362 }
2363
2364 static int amd_iommu_def_domain_type(struct device *dev)
2365 {
2366 struct iommu_dev_data *dev_data;
2367
2368 dev_data = dev_iommu_priv_get(dev);
2369 if (!dev_data)
2370 return 0;
2371
2372
2373
2374
2375
2376
2377 if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT) && dev_data->iommu_v2)
2378 return IOMMU_DOMAIN_IDENTITY;
2379
2380 return 0;
2381 }
2382
2383 static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain)
2384 {
2385
2386 return true;
2387 }
2388
2389 const struct iommu_ops amd_iommu_ops = {
2390 .capable = amd_iommu_capable,
2391 .domain_alloc = amd_iommu_domain_alloc,
2392 .probe_device = amd_iommu_probe_device,
2393 .release_device = amd_iommu_release_device,
2394 .probe_finalize = amd_iommu_probe_finalize,
2395 .device_group = amd_iommu_device_group,
2396 .get_resv_regions = amd_iommu_get_resv_regions,
2397 .is_attach_deferred = amd_iommu_is_attach_deferred,
2398 .pgsize_bitmap = AMD_IOMMU_PGSIZES,
2399 .def_domain_type = amd_iommu_def_domain_type,
2400 .default_domain_ops = &(const struct iommu_domain_ops) {
2401 .attach_dev = amd_iommu_attach_device,
2402 .detach_dev = amd_iommu_detach_device,
2403 .map = amd_iommu_map,
2404 .unmap = amd_iommu_unmap,
2405 .iotlb_sync_map = amd_iommu_iotlb_sync_map,
2406 .iova_to_phys = amd_iommu_iova_to_phys,
2407 .flush_iotlb_all = amd_iommu_flush_iotlb_all,
2408 .iotlb_sync = amd_iommu_iotlb_sync,
2409 .free = amd_iommu_domain_free,
2410 .enforce_cache_coherency = amd_iommu_enforce_cache_coherency,
2411 }
2412 };
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425 int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
2426 {
2427 return atomic_notifier_chain_register(&ppr_notifier, nb);
2428 }
2429 EXPORT_SYMBOL(amd_iommu_register_ppr_notifier);
2430
2431 int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb)
2432 {
2433 return atomic_notifier_chain_unregister(&ppr_notifier, nb);
2434 }
2435 EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
2436
2437 void amd_iommu_domain_direct_map(struct iommu_domain *dom)
2438 {
2439 struct protection_domain *domain = to_pdomain(dom);
2440 unsigned long flags;
2441
2442 spin_lock_irqsave(&domain->lock, flags);
2443
2444 if (domain->iop.pgtbl_cfg.tlb)
2445 free_io_pgtable_ops(&domain->iop.iop.ops);
2446
2447 spin_unlock_irqrestore(&domain->lock, flags);
2448 }
2449 EXPORT_SYMBOL(amd_iommu_domain_direct_map);
2450
2451 int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
2452 {
2453 struct protection_domain *domain = to_pdomain(dom);
2454 unsigned long flags;
2455 int levels, ret;
2456
2457
2458 for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
2459 levels += 1;
2460
2461 if (levels > amd_iommu_max_glx_val)
2462 return -EINVAL;
2463
2464 spin_lock_irqsave(&domain->lock, flags);
2465
2466
2467
2468
2469
2470
2471 ret = -EBUSY;
2472 if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK)
2473 goto out;
2474
2475 ret = -ENOMEM;
2476 domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
2477 if (domain->gcr3_tbl == NULL)
2478 goto out;
2479
2480 domain->glx = levels;
2481 domain->flags |= PD_IOMMUV2_MASK;
2482
2483 amd_iommu_domain_update(domain);
2484
2485 ret = 0;
2486
2487 out:
2488 spin_unlock_irqrestore(&domain->lock, flags);
2489
2490 return ret;
2491 }
2492 EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
2493
2494 static int __flush_pasid(struct protection_domain *domain, u32 pasid,
2495 u64 address, bool size)
2496 {
2497 struct iommu_dev_data *dev_data;
2498 struct iommu_cmd cmd;
2499 int i, ret;
2500
2501 if (!(domain->flags & PD_IOMMUV2_MASK))
2502 return -EINVAL;
2503
2504 build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size);
2505
2506
2507
2508
2509
2510 for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
2511 if (domain->dev_iommu[i] == 0)
2512 continue;
2513
2514 ret = iommu_queue_command(amd_iommus[i], &cmd);
2515 if (ret != 0)
2516 goto out;
2517 }
2518
2519
2520 amd_iommu_domain_flush_complete(domain);
2521
2522
2523 list_for_each_entry(dev_data, &domain->dev_list, list) {
2524 struct amd_iommu *iommu;
2525 int qdep;
2526
2527
2528
2529
2530
2531 if (!dev_data->ats.enabled)
2532 continue;
2533
2534 qdep = dev_data->ats.qdep;
2535 iommu = rlookup_amd_iommu(dev_data->dev);
2536 if (!iommu)
2537 continue;
2538 build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
2539 qdep, address, size);
2540
2541 ret = iommu_queue_command(iommu, &cmd);
2542 if (ret != 0)
2543 goto out;
2544 }
2545
2546
2547 amd_iommu_domain_flush_complete(domain);
2548
2549 ret = 0;
2550
2551 out:
2552
2553 return ret;
2554 }
2555
2556 static int __amd_iommu_flush_page(struct protection_domain *domain, u32 pasid,
2557 u64 address)
2558 {
2559 return __flush_pasid(domain, pasid, address, false);
2560 }
2561
2562 int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
2563 u64 address)
2564 {
2565 struct protection_domain *domain = to_pdomain(dom);
2566 unsigned long flags;
2567 int ret;
2568
2569 spin_lock_irqsave(&domain->lock, flags);
2570 ret = __amd_iommu_flush_page(domain, pasid, address);
2571 spin_unlock_irqrestore(&domain->lock, flags);
2572
2573 return ret;
2574 }
2575 EXPORT_SYMBOL(amd_iommu_flush_page);
2576
2577 static int __amd_iommu_flush_tlb(struct protection_domain *domain, u32 pasid)
2578 {
2579 return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
2580 true);
2581 }
2582
2583 int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid)
2584 {
2585 struct protection_domain *domain = to_pdomain(dom);
2586 unsigned long flags;
2587 int ret;
2588
2589 spin_lock_irqsave(&domain->lock, flags);
2590 ret = __amd_iommu_flush_tlb(domain, pasid);
2591 spin_unlock_irqrestore(&domain->lock, flags);
2592
2593 return ret;
2594 }
2595 EXPORT_SYMBOL(amd_iommu_flush_tlb);
2596
2597 static u64 *__get_gcr3_pte(u64 *root, int level, u32 pasid, bool alloc)
2598 {
2599 int index;
2600 u64 *pte;
2601
2602 while (true) {
2603
2604 index = (pasid >> (9 * level)) & 0x1ff;
2605 pte = &root[index];
2606
2607 if (level == 0)
2608 break;
2609
2610 if (!(*pte & GCR3_VALID)) {
2611 if (!alloc)
2612 return NULL;
2613
2614 root = (void *)get_zeroed_page(GFP_ATOMIC);
2615 if (root == NULL)
2616 return NULL;
2617
2618 *pte = iommu_virt_to_phys(root) | GCR3_VALID;
2619 }
2620
2621 root = iommu_phys_to_virt(*pte & PAGE_MASK);
2622
2623 level -= 1;
2624 }
2625
2626 return pte;
2627 }
2628
2629 static int __set_gcr3(struct protection_domain *domain, u32 pasid,
2630 unsigned long cr3)
2631 {
2632 u64 *pte;
2633
2634 if (domain->iop.mode != PAGE_MODE_NONE)
2635 return -EINVAL;
2636
2637 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
2638 if (pte == NULL)
2639 return -ENOMEM;
2640
2641 *pte = (cr3 & PAGE_MASK) | GCR3_VALID;
2642
2643 return __amd_iommu_flush_tlb(domain, pasid);
2644 }
2645
2646 static int __clear_gcr3(struct protection_domain *domain, u32 pasid)
2647 {
2648 u64 *pte;
2649
2650 if (domain->iop.mode != PAGE_MODE_NONE)
2651 return -EINVAL;
2652
2653 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
2654 if (pte == NULL)
2655 return 0;
2656
2657 *pte = 0;
2658
2659 return __amd_iommu_flush_tlb(domain, pasid);
2660 }
2661
2662 int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
2663 unsigned long cr3)
2664 {
2665 struct protection_domain *domain = to_pdomain(dom);
2666 unsigned long flags;
2667 int ret;
2668
2669 spin_lock_irqsave(&domain->lock, flags);
2670 ret = __set_gcr3(domain, pasid, cr3);
2671 spin_unlock_irqrestore(&domain->lock, flags);
2672
2673 return ret;
2674 }
2675 EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
2676
2677 int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid)
2678 {
2679 struct protection_domain *domain = to_pdomain(dom);
2680 unsigned long flags;
2681 int ret;
2682
2683 spin_lock_irqsave(&domain->lock, flags);
2684 ret = __clear_gcr3(domain, pasid);
2685 spin_unlock_irqrestore(&domain->lock, flags);
2686
2687 return ret;
2688 }
2689 EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3);
2690
2691 int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
2692 int status, int tag)
2693 {
2694 struct iommu_dev_data *dev_data;
2695 struct amd_iommu *iommu;
2696 struct iommu_cmd cmd;
2697
2698 dev_data = dev_iommu_priv_get(&pdev->dev);
2699 iommu = rlookup_amd_iommu(&pdev->dev);
2700 if (!iommu)
2701 return -ENODEV;
2702
2703 build_complete_ppr(&cmd, dev_data->devid, pasid, status,
2704 tag, dev_data->pri_tlp);
2705
2706 return iommu_queue_command(iommu, &cmd);
2707 }
2708 EXPORT_SYMBOL(amd_iommu_complete_ppr);
2709
2710 int amd_iommu_device_info(struct pci_dev *pdev,
2711 struct amd_iommu_device_info *info)
2712 {
2713 int max_pasids;
2714 int pos;
2715
2716 if (pdev == NULL || info == NULL)
2717 return -EINVAL;
2718
2719 if (!amd_iommu_v2_supported())
2720 return -EINVAL;
2721
2722 memset(info, 0, sizeof(*info));
2723
2724 if (pci_ats_supported(pdev))
2725 info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
2726
2727 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
2728 if (pos)
2729 info->flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;
2730
2731 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
2732 if (pos) {
2733 int features;
2734
2735 max_pasids = 1 << (9 * (amd_iommu_max_glx_val + 1));
2736 max_pasids = min(max_pasids, (1 << 20));
2737
2738 info->flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
2739 info->max_pasids = min(pci_max_pasids(pdev), max_pasids);
2740
2741 features = pci_pasid_features(pdev);
2742 if (features & PCI_PASID_CAP_EXEC)
2743 info->flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
2744 if (features & PCI_PASID_CAP_PRIV)
2745 info->flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
2746 }
2747
2748 return 0;
2749 }
2750 EXPORT_SYMBOL(amd_iommu_device_info);
2751
2752 #ifdef CONFIG_IRQ_REMAP
2753
2754
2755
2756
2757
2758
2759
2760 static struct irq_chip amd_ir_chip;
2761 static DEFINE_SPINLOCK(iommu_table_lock);
2762
2763 static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid,
2764 struct irq_remap_table *table)
2765 {
2766 u64 dte;
2767 struct dev_table_entry *dev_table = get_dev_table(iommu);
2768
2769 dte = dev_table[devid].data[2];
2770 dte &= ~DTE_IRQ_PHYS_ADDR_MASK;
2771 dte |= iommu_virt_to_phys(table->table);
2772 dte |= DTE_IRQ_REMAP_INTCTL;
2773 dte |= DTE_INTTABLEN;
2774 dte |= DTE_IRQ_REMAP_ENABLE;
2775
2776 dev_table[devid].data[2] = dte;
2777 }
2778
2779 static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid)
2780 {
2781 struct irq_remap_table *table;
2782 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
2783
2784 if (WARN_ONCE(!pci_seg->rlookup_table[devid],
2785 "%s: no iommu for devid %x:%x\n",
2786 __func__, pci_seg->id, devid))
2787 return NULL;
2788
2789 table = pci_seg->irq_lookup_table[devid];
2790 if (WARN_ONCE(!table, "%s: no table for devid %x:%x\n",
2791 __func__, pci_seg->id, devid))
2792 return NULL;
2793
2794 return table;
2795 }
2796
2797 static struct irq_remap_table *__alloc_irq_table(void)
2798 {
2799 struct irq_remap_table *table;
2800
2801 table = kzalloc(sizeof(*table), GFP_KERNEL);
2802 if (!table)
2803 return NULL;
2804
2805 table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_KERNEL);
2806 if (!table->table) {
2807 kfree(table);
2808 return NULL;
2809 }
2810 raw_spin_lock_init(&table->lock);
2811
2812 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
2813 memset(table->table, 0,
2814 MAX_IRQS_PER_TABLE * sizeof(u32));
2815 else
2816 memset(table->table, 0,
2817 (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
2818 return table;
2819 }
2820
2821 static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
2822 struct irq_remap_table *table)
2823 {
2824 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
2825
2826 pci_seg->irq_lookup_table[devid] = table;
2827 set_dte_irq_entry(iommu, devid, table);
2828 iommu_flush_dte(iommu, devid);
2829 }
2830
2831 static int set_remap_table_entry_alias(struct pci_dev *pdev, u16 alias,
2832 void *data)
2833 {
2834 struct irq_remap_table *table = data;
2835 struct amd_iommu_pci_seg *pci_seg;
2836 struct amd_iommu *iommu = rlookup_amd_iommu(&pdev->dev);
2837
2838 if (!iommu)
2839 return -EINVAL;
2840
2841 pci_seg = iommu->pci_seg;
2842 pci_seg->irq_lookup_table[alias] = table;
2843 set_dte_irq_entry(iommu, alias, table);
2844 iommu_flush_dte(pci_seg->rlookup_table[alias], alias);
2845
2846 return 0;
2847 }
2848
2849 static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu,
2850 u16 devid, struct pci_dev *pdev)
2851 {
2852 struct irq_remap_table *table = NULL;
2853 struct irq_remap_table *new_table = NULL;
2854 struct amd_iommu_pci_seg *pci_seg;
2855 unsigned long flags;
2856 u16 alias;
2857
2858 spin_lock_irqsave(&iommu_table_lock, flags);
2859
2860 pci_seg = iommu->pci_seg;
2861 table = pci_seg->irq_lookup_table[devid];
2862 if (table)
2863 goto out_unlock;
2864
2865 alias = pci_seg->alias_table[devid];
2866 table = pci_seg->irq_lookup_table[alias];
2867 if (table) {
2868 set_remap_table_entry(iommu, devid, table);
2869 goto out_wait;
2870 }
2871 spin_unlock_irqrestore(&iommu_table_lock, flags);
2872
2873
2874 new_table = __alloc_irq_table();
2875 if (!new_table)
2876 return NULL;
2877
2878 spin_lock_irqsave(&iommu_table_lock, flags);
2879
2880 table = pci_seg->irq_lookup_table[devid];
2881 if (table)
2882 goto out_unlock;
2883
2884 table = pci_seg->irq_lookup_table[alias];
2885 if (table) {
2886 set_remap_table_entry(iommu, devid, table);
2887 goto out_wait;
2888 }
2889
2890 table = new_table;
2891 new_table = NULL;
2892
2893 if (pdev)
2894 pci_for_each_dma_alias(pdev, set_remap_table_entry_alias,
2895 table);
2896 else
2897 set_remap_table_entry(iommu, devid, table);
2898
2899 if (devid != alias)
2900 set_remap_table_entry(iommu, alias, table);
2901
2902 out_wait:
2903 iommu_completion_wait(iommu);
2904
2905 out_unlock:
2906 spin_unlock_irqrestore(&iommu_table_lock, flags);
2907
2908 if (new_table) {
2909 kmem_cache_free(amd_iommu_irq_cache, new_table->table);
2910 kfree(new_table);
2911 }
2912 return table;
2913 }
2914
2915 static int alloc_irq_index(struct amd_iommu *iommu, u16 devid, int count,
2916 bool align, struct pci_dev *pdev)
2917 {
2918 struct irq_remap_table *table;
2919 int index, c, alignment = 1;
2920 unsigned long flags;
2921
2922 table = alloc_irq_table(iommu, devid, pdev);
2923 if (!table)
2924 return -ENODEV;
2925
2926 if (align)
2927 alignment = roundup_pow_of_two(count);
2928
2929 raw_spin_lock_irqsave(&table->lock, flags);
2930
2931
2932 for (index = ALIGN(table->min_index, alignment), c = 0;
2933 index < MAX_IRQS_PER_TABLE;) {
2934 if (!iommu->irte_ops->is_allocated(table, index)) {
2935 c += 1;
2936 } else {
2937 c = 0;
2938 index = ALIGN(index + 1, alignment);
2939 continue;
2940 }
2941
2942 if (c == count) {
2943 for (; c != 0; --c)
2944 iommu->irte_ops->set_allocated(table, index - c + 1);
2945
2946 index -= count - 1;
2947 goto out;
2948 }
2949
2950 index++;
2951 }
2952
2953 index = -ENOSPC;
2954
2955 out:
2956 raw_spin_unlock_irqrestore(&table->lock, flags);
2957
2958 return index;
2959 }
2960
2961 static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
2962 struct irte_ga *irte, struct amd_ir_data *data)
2963 {
2964 bool ret;
2965 struct irq_remap_table *table;
2966 unsigned long flags;
2967 struct irte_ga *entry;
2968
2969 table = get_irq_table(iommu, devid);
2970 if (!table)
2971 return -ENOMEM;
2972
2973 raw_spin_lock_irqsave(&table->lock, flags);
2974
2975 entry = (struct irte_ga *)table->table;
2976 entry = &entry[index];
2977
2978 ret = cmpxchg_double(&entry->lo.val, &entry->hi.val,
2979 entry->lo.val, entry->hi.val,
2980 irte->lo.val, irte->hi.val);
2981
2982
2983
2984
2985
2986
2987 WARN_ON(!ret);
2988
2989 if (data)
2990 data->ref = entry;
2991
2992 raw_spin_unlock_irqrestore(&table->lock, flags);
2993
2994 iommu_flush_irt(iommu, devid);
2995 iommu_completion_wait(iommu);
2996
2997 return 0;
2998 }
2999
3000 static int modify_irte(struct amd_iommu *iommu,
3001 u16 devid, int index, union irte *irte)
3002 {
3003 struct irq_remap_table *table;
3004 unsigned long flags;
3005
3006 table = get_irq_table(iommu, devid);
3007 if (!table)
3008 return -ENOMEM;
3009
3010 raw_spin_lock_irqsave(&table->lock, flags);
3011 table->table[index] = irte->val;
3012 raw_spin_unlock_irqrestore(&table->lock, flags);
3013
3014 iommu_flush_irt(iommu, devid);
3015 iommu_completion_wait(iommu);
3016
3017 return 0;
3018 }
3019
3020 static void free_irte(struct amd_iommu *iommu, u16 devid, int index)
3021 {
3022 struct irq_remap_table *table;
3023 unsigned long flags;
3024
3025 table = get_irq_table(iommu, devid);
3026 if (!table)
3027 return;
3028
3029 raw_spin_lock_irqsave(&table->lock, flags);
3030 iommu->irte_ops->clear_allocated(table, index);
3031 raw_spin_unlock_irqrestore(&table->lock, flags);
3032
3033 iommu_flush_irt(iommu, devid);
3034 iommu_completion_wait(iommu);
3035 }
3036
3037 static void irte_prepare(void *entry,
3038 u32 delivery_mode, bool dest_mode,
3039 u8 vector, u32 dest_apicid, int devid)
3040 {
3041 union irte *irte = (union irte *) entry;
3042
3043 irte->val = 0;
3044 irte->fields.vector = vector;
3045 irte->fields.int_type = delivery_mode;
3046 irte->fields.destination = dest_apicid;
3047 irte->fields.dm = dest_mode;
3048 irte->fields.valid = 1;
3049 }
3050
3051 static void irte_ga_prepare(void *entry,
3052 u32 delivery_mode, bool dest_mode,
3053 u8 vector, u32 dest_apicid, int devid)
3054 {
3055 struct irte_ga *irte = (struct irte_ga *) entry;
3056
3057 irte->lo.val = 0;
3058 irte->hi.val = 0;
3059 irte->lo.fields_remap.int_type = delivery_mode;
3060 irte->lo.fields_remap.dm = dest_mode;
3061 irte->hi.fields.vector = vector;
3062 irte->lo.fields_remap.destination = APICID_TO_IRTE_DEST_LO(dest_apicid);
3063 irte->hi.fields.destination = APICID_TO_IRTE_DEST_HI(dest_apicid);
3064 irte->lo.fields_remap.valid = 1;
3065 }
3066
3067 static void irte_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
3068 {
3069 union irte *irte = (union irte *) entry;
3070
3071 irte->fields.valid = 1;
3072 modify_irte(iommu, devid, index, irte);
3073 }
3074
3075 static void irte_ga_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
3076 {
3077 struct irte_ga *irte = (struct irte_ga *) entry;
3078
3079 irte->lo.fields_remap.valid = 1;
3080 modify_irte_ga(iommu, devid, index, irte, NULL);
3081 }
3082
3083 static void irte_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
3084 {
3085 union irte *irte = (union irte *) entry;
3086
3087 irte->fields.valid = 0;
3088 modify_irte(iommu, devid, index, irte);
3089 }
3090
3091 static void irte_ga_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
3092 {
3093 struct irte_ga *irte = (struct irte_ga *) entry;
3094
3095 irte->lo.fields_remap.valid = 0;
3096 modify_irte_ga(iommu, devid, index, irte, NULL);
3097 }
3098
3099 static void irte_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index,
3100 u8 vector, u32 dest_apicid)
3101 {
3102 union irte *irte = (union irte *) entry;
3103
3104 irte->fields.vector = vector;
3105 irte->fields.destination = dest_apicid;
3106 modify_irte(iommu, devid, index, irte);
3107 }
3108
3109 static void irte_ga_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index,
3110 u8 vector, u32 dest_apicid)
3111 {
3112 struct irte_ga *irte = (struct irte_ga *) entry;
3113
3114 if (!irte->lo.fields_remap.guest_mode) {
3115 irte->hi.fields.vector = vector;
3116 irte->lo.fields_remap.destination =
3117 APICID_TO_IRTE_DEST_LO(dest_apicid);
3118 irte->hi.fields.destination =
3119 APICID_TO_IRTE_DEST_HI(dest_apicid);
3120 modify_irte_ga(iommu, devid, index, irte, NULL);
3121 }
3122 }
3123
3124 #define IRTE_ALLOCATED (~1U)
3125 static void irte_set_allocated(struct irq_remap_table *table, int index)
3126 {
3127 table->table[index] = IRTE_ALLOCATED;
3128 }
3129
3130 static void irte_ga_set_allocated(struct irq_remap_table *table, int index)
3131 {
3132 struct irte_ga *ptr = (struct irte_ga *)table->table;
3133 struct irte_ga *irte = &ptr[index];
3134
3135 memset(&irte->lo.val, 0, sizeof(u64));
3136 memset(&irte->hi.val, 0, sizeof(u64));
3137 irte->hi.fields.vector = 0xff;
3138 }
3139
3140 static bool irte_is_allocated(struct irq_remap_table *table, int index)
3141 {
3142 union irte *ptr = (union irte *)table->table;
3143 union irte *irte = &ptr[index];
3144
3145 return irte->val != 0;
3146 }
3147
3148 static bool irte_ga_is_allocated(struct irq_remap_table *table, int index)
3149 {
3150 struct irte_ga *ptr = (struct irte_ga *)table->table;
3151 struct irte_ga *irte = &ptr[index];
3152
3153 return irte->hi.fields.vector != 0;
3154 }
3155
3156 static void irte_clear_allocated(struct irq_remap_table *table, int index)
3157 {
3158 table->table[index] = 0;
3159 }
3160
3161 static void irte_ga_clear_allocated(struct irq_remap_table *table, int index)
3162 {
3163 struct irte_ga *ptr = (struct irte_ga *)table->table;
3164 struct irte_ga *irte = &ptr[index];
3165
3166 memset(&irte->lo.val, 0, sizeof(u64));
3167 memset(&irte->hi.val, 0, sizeof(u64));
3168 }
3169
3170 static int get_devid(struct irq_alloc_info *info)
3171 {
3172 switch (info->type) {
3173 case X86_IRQ_ALLOC_TYPE_IOAPIC:
3174 return get_ioapic_devid(info->devid);
3175 case X86_IRQ_ALLOC_TYPE_HPET:
3176 return get_hpet_devid(info->devid);
3177 case X86_IRQ_ALLOC_TYPE_PCI_MSI:
3178 case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
3179 return get_device_sbdf_id(msi_desc_to_dev(info->desc));
3180 default:
3181 WARN_ON_ONCE(1);
3182 return -1;
3183 }
3184 }
3185
3186 struct irq_remap_ops amd_iommu_irq_ops = {
3187 .prepare = amd_iommu_prepare,
3188 .enable = amd_iommu_enable,
3189 .disable = amd_iommu_disable,
3190 .reenable = amd_iommu_reenable,
3191 .enable_faulting = amd_iommu_enable_faulting,
3192 };
3193
3194 static void fill_msi_msg(struct msi_msg *msg, u32 index)
3195 {
3196 msg->data = index;
3197 msg->address_lo = 0;
3198 msg->arch_addr_lo.base_address = X86_MSI_BASE_ADDRESS_LOW;
3199 msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH;
3200 }
3201
3202 static void irq_remapping_prepare_irte(struct amd_ir_data *data,
3203 struct irq_cfg *irq_cfg,
3204 struct irq_alloc_info *info,
3205 int devid, int index, int sub_handle)
3206 {
3207 struct irq_2_irte *irte_info = &data->irq_2_irte;
3208 struct amd_iommu *iommu = data->iommu;
3209
3210 if (!iommu)
3211 return;
3212
3213 data->irq_2_irte.devid = devid;
3214 data->irq_2_irte.index = index + sub_handle;
3215 iommu->irte_ops->prepare(data->entry, apic->delivery_mode,
3216 apic->dest_mode_logical, irq_cfg->vector,
3217 irq_cfg->dest_apicid, devid);
3218
3219 switch (info->type) {
3220 case X86_IRQ_ALLOC_TYPE_IOAPIC:
3221 case X86_IRQ_ALLOC_TYPE_HPET:
3222 case X86_IRQ_ALLOC_TYPE_PCI_MSI:
3223 case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
3224 fill_msi_msg(&data->msi_entry, irte_info->index);
3225 break;
3226
3227 default:
3228 BUG_ON(1);
3229 break;
3230 }
3231 }
3232
3233 struct amd_irte_ops irte_32_ops = {
3234 .prepare = irte_prepare,
3235 .activate = irte_activate,
3236 .deactivate = irte_deactivate,
3237 .set_affinity = irte_set_affinity,
3238 .set_allocated = irte_set_allocated,
3239 .is_allocated = irte_is_allocated,
3240 .clear_allocated = irte_clear_allocated,
3241 };
3242
3243 struct amd_irte_ops irte_128_ops = {
3244 .prepare = irte_ga_prepare,
3245 .activate = irte_ga_activate,
3246 .deactivate = irte_ga_deactivate,
3247 .set_affinity = irte_ga_set_affinity,
3248 .set_allocated = irte_ga_set_allocated,
3249 .is_allocated = irte_ga_is_allocated,
3250 .clear_allocated = irte_ga_clear_allocated,
3251 };
3252
3253 static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
3254 unsigned int nr_irqs, void *arg)
3255 {
3256 struct irq_alloc_info *info = arg;
3257 struct irq_data *irq_data;
3258 struct amd_ir_data *data = NULL;
3259 struct amd_iommu *iommu;
3260 struct irq_cfg *cfg;
3261 int i, ret, devid, seg, sbdf;
3262 int index;
3263
3264 if (!info)
3265 return -EINVAL;
3266 if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_PCI_MSI &&
3267 info->type != X86_IRQ_ALLOC_TYPE_PCI_MSIX)
3268 return -EINVAL;
3269
3270
3271
3272
3273
3274 if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI)
3275 info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
3276
3277 sbdf = get_devid(info);
3278 if (sbdf < 0)
3279 return -EINVAL;
3280
3281 seg = PCI_SBDF_TO_SEGID(sbdf);
3282 devid = PCI_SBDF_TO_DEVID(sbdf);
3283 iommu = __rlookup_amd_iommu(seg, devid);
3284 if (!iommu)
3285 return -EINVAL;
3286
3287 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
3288 if (ret < 0)
3289 return ret;
3290
3291 if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
3292 struct irq_remap_table *table;
3293
3294 table = alloc_irq_table(iommu, devid, NULL);
3295 if (table) {
3296 if (!table->min_index) {
3297
3298
3299
3300
3301 table->min_index = 32;
3302 for (i = 0; i < 32; ++i)
3303 iommu->irte_ops->set_allocated(table, i);
3304 }
3305 WARN_ON(table->min_index != 32);
3306 index = info->ioapic.pin;
3307 } else {
3308 index = -ENOMEM;
3309 }
3310 } else if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI ||
3311 info->type == X86_IRQ_ALLOC_TYPE_PCI_MSIX) {
3312 bool align = (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI);
3313
3314 index = alloc_irq_index(iommu, devid, nr_irqs, align,
3315 msi_desc_to_pci_dev(info->desc));
3316 } else {
3317 index = alloc_irq_index(iommu, devid, nr_irqs, false, NULL);
3318 }
3319
3320 if (index < 0) {
3321 pr_warn("Failed to allocate IRTE\n");
3322 ret = index;
3323 goto out_free_parent;
3324 }
3325
3326 for (i = 0; i < nr_irqs; i++) {
3327 irq_data = irq_domain_get_irq_data(domain, virq + i);
3328 cfg = irq_data ? irqd_cfg(irq_data) : NULL;
3329 if (!cfg) {
3330 ret = -EINVAL;
3331 goto out_free_data;
3332 }
3333
3334 ret = -ENOMEM;
3335 data = kzalloc(sizeof(*data), GFP_KERNEL);
3336 if (!data)
3337 goto out_free_data;
3338
3339 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
3340 data->entry = kzalloc(sizeof(union irte), GFP_KERNEL);
3341 else
3342 data->entry = kzalloc(sizeof(struct irte_ga),
3343 GFP_KERNEL);
3344 if (!data->entry) {
3345 kfree(data);
3346 goto out_free_data;
3347 }
3348
3349 data->iommu = iommu;
3350 irq_data->hwirq = (devid << 16) + i;
3351 irq_data->chip_data = data;
3352 irq_data->chip = &amd_ir_chip;
3353 irq_remapping_prepare_irte(data, cfg, info, devid, index, i);
3354 irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
3355 }
3356
3357 return 0;
3358
3359 out_free_data:
3360 for (i--; i >= 0; i--) {
3361 irq_data = irq_domain_get_irq_data(domain, virq + i);
3362 if (irq_data)
3363 kfree(irq_data->chip_data);
3364 }
3365 for (i = 0; i < nr_irqs; i++)
3366 free_irte(iommu, devid, index + i);
3367 out_free_parent:
3368 irq_domain_free_irqs_common(domain, virq, nr_irqs);
3369 return ret;
3370 }
3371
3372 static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
3373 unsigned int nr_irqs)
3374 {
3375 struct irq_2_irte *irte_info;
3376 struct irq_data *irq_data;
3377 struct amd_ir_data *data;
3378 int i;
3379
3380 for (i = 0; i < nr_irqs; i++) {
3381 irq_data = irq_domain_get_irq_data(domain, virq + i);
3382 if (irq_data && irq_data->chip_data) {
3383 data = irq_data->chip_data;
3384 irte_info = &data->irq_2_irte;
3385 free_irte(data->iommu, irte_info->devid, irte_info->index);
3386 kfree(data->entry);
3387 kfree(data);
3388 }
3389 }
3390 irq_domain_free_irqs_common(domain, virq, nr_irqs);
3391 }
3392
3393 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
3394 struct amd_ir_data *ir_data,
3395 struct irq_2_irte *irte_info,
3396 struct irq_cfg *cfg);
3397
3398 static int irq_remapping_activate(struct irq_domain *domain,
3399 struct irq_data *irq_data, bool reserve)
3400 {
3401 struct amd_ir_data *data = irq_data->chip_data;
3402 struct irq_2_irte *irte_info = &data->irq_2_irte;
3403 struct amd_iommu *iommu = data->iommu;
3404 struct irq_cfg *cfg = irqd_cfg(irq_data);
3405
3406 if (!iommu)
3407 return 0;
3408
3409 iommu->irte_ops->activate(iommu, data->entry, irte_info->devid,
3410 irte_info->index);
3411 amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg);
3412 return 0;
3413 }
3414
3415 static void irq_remapping_deactivate(struct irq_domain *domain,
3416 struct irq_data *irq_data)
3417 {
3418 struct amd_ir_data *data = irq_data->chip_data;
3419 struct irq_2_irte *irte_info = &data->irq_2_irte;
3420 struct amd_iommu *iommu = data->iommu;
3421
3422 if (iommu)
3423 iommu->irte_ops->deactivate(iommu, data->entry, irte_info->devid,
3424 irte_info->index);
3425 }
3426
3427 static int irq_remapping_select(struct irq_domain *d, struct irq_fwspec *fwspec,
3428 enum irq_domain_bus_token bus_token)
3429 {
3430 struct amd_iommu *iommu;
3431 int devid = -1;
3432
3433 if (!amd_iommu_irq_remap)
3434 return 0;
3435
3436 if (x86_fwspec_is_ioapic(fwspec))
3437 devid = get_ioapic_devid(fwspec->param[0]);
3438 else if (x86_fwspec_is_hpet(fwspec))
3439 devid = get_hpet_devid(fwspec->param[0]);
3440
3441 if (devid < 0)
3442 return 0;
3443 iommu = __rlookup_amd_iommu((devid >> 16), (devid & 0xffff));
3444
3445 return iommu && iommu->ir_domain == d;
3446 }
3447
3448 static const struct irq_domain_ops amd_ir_domain_ops = {
3449 .select = irq_remapping_select,
3450 .alloc = irq_remapping_alloc,
3451 .free = irq_remapping_free,
3452 .activate = irq_remapping_activate,
3453 .deactivate = irq_remapping_deactivate,
3454 };
3455
3456 int amd_iommu_activate_guest_mode(void *data)
3457 {
3458 struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
3459 struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
3460 u64 valid;
3461
3462 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
3463 !entry || entry->lo.fields_vapic.guest_mode)
3464 return 0;
3465
3466 valid = entry->lo.fields_vapic.valid;
3467
3468 entry->lo.val = 0;
3469 entry->hi.val = 0;
3470
3471 entry->lo.fields_vapic.valid = valid;
3472 entry->lo.fields_vapic.guest_mode = 1;
3473 entry->lo.fields_vapic.ga_log_intr = 1;
3474 entry->hi.fields.ga_root_ptr = ir_data->ga_root_ptr;
3475 entry->hi.fields.vector = ir_data->ga_vector;
3476 entry->lo.fields_vapic.ga_tag = ir_data->ga_tag;
3477
3478 return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
3479 ir_data->irq_2_irte.index, entry, ir_data);
3480 }
3481 EXPORT_SYMBOL(amd_iommu_activate_guest_mode);
3482
3483 int amd_iommu_deactivate_guest_mode(void *data)
3484 {
3485 struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
3486 struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
3487 struct irq_cfg *cfg = ir_data->cfg;
3488 u64 valid;
3489
3490 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
3491 !entry || !entry->lo.fields_vapic.guest_mode)
3492 return 0;
3493
3494 valid = entry->lo.fields_remap.valid;
3495
3496 entry->lo.val = 0;
3497 entry->hi.val = 0;
3498
3499 entry->lo.fields_remap.valid = valid;
3500 entry->lo.fields_remap.dm = apic->dest_mode_logical;
3501 entry->lo.fields_remap.int_type = apic->delivery_mode;
3502 entry->hi.fields.vector = cfg->vector;
3503 entry->lo.fields_remap.destination =
3504 APICID_TO_IRTE_DEST_LO(cfg->dest_apicid);
3505 entry->hi.fields.destination =
3506 APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
3507
3508 return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
3509 ir_data->irq_2_irte.index, entry, ir_data);
3510 }
3511 EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode);
3512
3513 static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
3514 {
3515 int ret;
3516 struct amd_iommu_pi_data *pi_data = vcpu_info;
3517 struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data;
3518 struct amd_ir_data *ir_data = data->chip_data;
3519 struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
3520 struct iommu_dev_data *dev_data;
3521
3522 if (ir_data->iommu == NULL)
3523 return -EINVAL;
3524
3525 dev_data = search_dev_data(ir_data->iommu, irte_info->devid);
3526
3527
3528
3529
3530
3531 if (!dev_data || !dev_data->use_vapic)
3532 return 0;
3533
3534 ir_data->cfg = irqd_cfg(data);
3535 pi_data->ir_data = ir_data;
3536
3537
3538
3539
3540
3541 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
3542 pr_debug("%s: Fall back to using intr legacy remap\n",
3543 __func__);
3544 pi_data->is_guest_mode = false;
3545 }
3546
3547 pi_data->prev_ga_tag = ir_data->cached_ga_tag;
3548 if (pi_data->is_guest_mode) {
3549 ir_data->ga_root_ptr = (pi_data->base >> 12);
3550 ir_data->ga_vector = vcpu_pi_info->vector;
3551 ir_data->ga_tag = pi_data->ga_tag;
3552 ret = amd_iommu_activate_guest_mode(ir_data);
3553 if (!ret)
3554 ir_data->cached_ga_tag = pi_data->ga_tag;
3555 } else {
3556 ret = amd_iommu_deactivate_guest_mode(ir_data);
3557
3558
3559
3560
3561
3562 if (!ret)
3563 ir_data->cached_ga_tag = 0;
3564 }
3565
3566 return ret;
3567 }
3568
3569
3570 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
3571 struct amd_ir_data *ir_data,
3572 struct irq_2_irte *irte_info,
3573 struct irq_cfg *cfg)
3574 {
3575
3576
3577
3578
3579
3580 iommu->irte_ops->set_affinity(iommu, ir_data->entry, irte_info->devid,
3581 irte_info->index, cfg->vector,
3582 cfg->dest_apicid);
3583 }
3584
3585 static int amd_ir_set_affinity(struct irq_data *data,
3586 const struct cpumask *mask, bool force)
3587 {
3588 struct amd_ir_data *ir_data = data->chip_data;
3589 struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
3590 struct irq_cfg *cfg = irqd_cfg(data);
3591 struct irq_data *parent = data->parent_data;
3592 struct amd_iommu *iommu = ir_data->iommu;
3593 int ret;
3594
3595 if (!iommu)
3596 return -ENODEV;
3597
3598 ret = parent->chip->irq_set_affinity(parent, mask, force);
3599 if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
3600 return ret;
3601
3602 amd_ir_update_irte(data, iommu, ir_data, irte_info, cfg);
3603
3604
3605
3606
3607
3608 send_cleanup_vector(cfg);
3609
3610 return IRQ_SET_MASK_OK_DONE;
3611 }
3612
3613 static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
3614 {
3615 struct amd_ir_data *ir_data = irq_data->chip_data;
3616
3617 *msg = ir_data->msi_entry;
3618 }
3619
3620 static struct irq_chip amd_ir_chip = {
3621 .name = "AMD-IR",
3622 .irq_ack = apic_ack_irq,
3623 .irq_set_affinity = amd_ir_set_affinity,
3624 .irq_set_vcpu_affinity = amd_ir_set_vcpu_affinity,
3625 .irq_compose_msi_msg = ir_compose_msi_msg,
3626 };
3627
3628 int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
3629 {
3630 struct fwnode_handle *fn;
3631
3632 fn = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index);
3633 if (!fn)
3634 return -ENOMEM;
3635 iommu->ir_domain = irq_domain_create_tree(fn, &amd_ir_domain_ops, iommu);
3636 if (!iommu->ir_domain) {
3637 irq_domain_free_fwnode(fn);
3638 return -ENOMEM;
3639 }
3640
3641 iommu->ir_domain->parent = arch_get_ir_parent_domain();
3642 iommu->msi_domain = arch_create_remap_msi_irq_domain(iommu->ir_domain,
3643 "AMD-IR-MSI",
3644 iommu->index);
3645 return 0;
3646 }
3647
3648 int amd_iommu_update_ga(int cpu, bool is_run, void *data)
3649 {
3650 unsigned long flags;
3651 struct amd_iommu *iommu;
3652 struct irq_remap_table *table;
3653 struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
3654 int devid = ir_data->irq_2_irte.devid;
3655 struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
3656 struct irte_ga *ref = (struct irte_ga *) ir_data->ref;
3657
3658 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
3659 !ref || !entry || !entry->lo.fields_vapic.guest_mode)
3660 return 0;
3661
3662 iommu = ir_data->iommu;
3663 if (!iommu)
3664 return -ENODEV;
3665
3666 table = get_irq_table(iommu, devid);
3667 if (!table)
3668 return -ENODEV;
3669
3670 raw_spin_lock_irqsave(&table->lock, flags);
3671
3672 if (ref->lo.fields_vapic.guest_mode) {
3673 if (cpu >= 0) {
3674 ref->lo.fields_vapic.destination =
3675 APICID_TO_IRTE_DEST_LO(cpu);
3676 ref->hi.fields.destination =
3677 APICID_TO_IRTE_DEST_HI(cpu);
3678 }
3679 ref->lo.fields_vapic.is_run = is_run;
3680 barrier();
3681 }
3682
3683 raw_spin_unlock_irqrestore(&table->lock, flags);
3684
3685 iommu_flush_irt(iommu, devid);
3686 iommu_completion_wait(iommu);
3687 return 0;
3688 }
3689 EXPORT_SYMBOL(amd_iommu_update_ga);
3690 #endif