0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #define pr_fmt(fmt) "DMAR: " fmt
0018
0019 #include <linux/pci.h>
0020 #include <linux/dmar.h>
0021 #include <linux/iova.h>
0022 #include <linux/timer.h>
0023 #include <linux/irq.h>
0024 #include <linux/interrupt.h>
0025 #include <linux/tboot.h>
0026 #include <linux/dmi.h>
0027 #include <linux/slab.h>
0028 #include <linux/iommu.h>
0029 #include <linux/numa.h>
0030 #include <linux/limits.h>
0031 #include <asm/irq_remapping.h>
0032
0033 #include "iommu.h"
0034 #include "../irq_remapping.h"
0035 #include "perf.h"
0036 #include "trace.h"
0037
0038 typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
0039 struct dmar_res_callback {
0040 dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED];
0041 void *arg[ACPI_DMAR_TYPE_RESERVED];
0042 bool ignore_unhandled;
0043 bool print_entry;
0044 };
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058 DECLARE_RWSEM(dmar_global_lock);
0059 LIST_HEAD(dmar_drhd_units);
0060
0061 struct acpi_table_header * __initdata dmar_tbl;
0062 static int dmar_dev_scope_status = 1;
0063 static DEFINE_IDA(dmar_seq_ids);
0064
0065 static int alloc_iommu(struct dmar_drhd_unit *drhd);
0066 static void free_iommu(struct intel_iommu *iommu);
0067
0068 static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
0069 {
0070
0071
0072
0073
0074 if (drhd->include_all)
0075 list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
0076 else
0077 list_add_rcu(&drhd->list, &dmar_drhd_units);
0078 }
0079
0080 void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
0081 {
0082 struct acpi_dmar_device_scope *scope;
0083
0084 *cnt = 0;
0085 while (start < end) {
0086 scope = start;
0087 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE ||
0088 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
0089 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
0090 (*cnt)++;
0091 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
0092 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
0093 pr_warn("Unsupported device scope\n");
0094 }
0095 start += scope->length;
0096 }
0097 if (*cnt == 0)
0098 return NULL;
0099
0100 return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
0101 }
0102
0103 void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
0104 {
0105 int i;
0106 struct device *tmp_dev;
0107
0108 if (*devices && *cnt) {
0109 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
0110 put_device(tmp_dev);
0111 kfree(*devices);
0112 }
0113
0114 *devices = NULL;
0115 *cnt = 0;
0116 }
0117
0118
0119 static char dmar_pci_notify_info_buf[64];
0120
0121 static struct dmar_pci_notify_info *
0122 dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
0123 {
0124 int level = 0;
0125 size_t size;
0126 struct pci_dev *tmp;
0127 struct dmar_pci_notify_info *info;
0128
0129 BUG_ON(dev->is_virtfn);
0130
0131
0132
0133
0134
0135 if (pci_domain_nr(dev->bus) > U16_MAX)
0136 return NULL;
0137
0138
0139 if (event == BUS_NOTIFY_ADD_DEVICE)
0140 for (tmp = dev; tmp; tmp = tmp->bus->self)
0141 level++;
0142
0143 size = struct_size(info, path, level);
0144 if (size <= sizeof(dmar_pci_notify_info_buf)) {
0145 info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
0146 } else {
0147 info = kzalloc(size, GFP_KERNEL);
0148 if (!info) {
0149 if (dmar_dev_scope_status == 0)
0150 dmar_dev_scope_status = -ENOMEM;
0151 return NULL;
0152 }
0153 }
0154
0155 info->event = event;
0156 info->dev = dev;
0157 info->seg = pci_domain_nr(dev->bus);
0158 info->level = level;
0159 if (event == BUS_NOTIFY_ADD_DEVICE) {
0160 for (tmp = dev; tmp; tmp = tmp->bus->self) {
0161 level--;
0162 info->path[level].bus = tmp->bus->number;
0163 info->path[level].device = PCI_SLOT(tmp->devfn);
0164 info->path[level].function = PCI_FUNC(tmp->devfn);
0165 if (pci_is_root_bus(tmp->bus))
0166 info->bus = tmp->bus->number;
0167 }
0168 }
0169
0170 return info;
0171 }
0172
0173 static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
0174 {
0175 if ((void *)info != dmar_pci_notify_info_buf)
0176 kfree(info);
0177 }
0178
0179 static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
0180 struct acpi_dmar_pci_path *path, int count)
0181 {
0182 int i;
0183
0184 if (info->bus != bus)
0185 goto fallback;
0186 if (info->level != count)
0187 goto fallback;
0188
0189 for (i = 0; i < count; i++) {
0190 if (path[i].device != info->path[i].device ||
0191 path[i].function != info->path[i].function)
0192 goto fallback;
0193 }
0194
0195 return true;
0196
0197 fallback:
0198
0199 if (count != 1)
0200 return false;
0201
0202 i = info->level - 1;
0203 if (bus == info->path[i].bus &&
0204 path[0].device == info->path[i].device &&
0205 path[0].function == info->path[i].function) {
0206 pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
0207 bus, path[0].device, path[0].function);
0208 return true;
0209 }
0210
0211 return false;
0212 }
0213
0214
0215 int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
0216 void *start, void*end, u16 segment,
0217 struct dmar_dev_scope *devices,
0218 int devices_cnt)
0219 {
0220 int i, level;
0221 struct device *tmp, *dev = &info->dev->dev;
0222 struct acpi_dmar_device_scope *scope;
0223 struct acpi_dmar_pci_path *path;
0224
0225 if (segment != info->seg)
0226 return 0;
0227
0228 for (; start < end; start += scope->length) {
0229 scope = start;
0230 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
0231 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
0232 continue;
0233
0234 path = (struct acpi_dmar_pci_path *)(scope + 1);
0235 level = (scope->length - sizeof(*scope)) / sizeof(*path);
0236 if (!dmar_match_pci_path(info, scope->bus, path, level))
0237 continue;
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
0249 info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
0250 (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
0251 (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
0252 info->dev->class >> 16 != PCI_BASE_CLASS_BRIDGE))) {
0253 pr_warn("Device scope type does not match for %s\n",
0254 pci_name(info->dev));
0255 return -EINVAL;
0256 }
0257
0258 for_each_dev_scope(devices, devices_cnt, i, tmp)
0259 if (tmp == NULL) {
0260 devices[i].bus = info->dev->bus->number;
0261 devices[i].devfn = info->dev->devfn;
0262 rcu_assign_pointer(devices[i].dev,
0263 get_device(dev));
0264 return 1;
0265 }
0266 BUG_ON(i >= devices_cnt);
0267 }
0268
0269 return 0;
0270 }
0271
0272 int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
0273 struct dmar_dev_scope *devices, int count)
0274 {
0275 int index;
0276 struct device *tmp;
0277
0278 if (info->seg != segment)
0279 return 0;
0280
0281 for_each_active_dev_scope(devices, count, index, tmp)
0282 if (tmp == &info->dev->dev) {
0283 RCU_INIT_POINTER(devices[index].dev, NULL);
0284 synchronize_rcu();
0285 put_device(tmp);
0286 return 1;
0287 }
0288
0289 return 0;
0290 }
0291
0292 static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
0293 {
0294 int ret = 0;
0295 struct dmar_drhd_unit *dmaru;
0296 struct acpi_dmar_hardware_unit *drhd;
0297
0298 for_each_drhd_unit(dmaru) {
0299 if (dmaru->include_all)
0300 continue;
0301
0302 drhd = container_of(dmaru->hdr,
0303 struct acpi_dmar_hardware_unit, header);
0304 ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
0305 ((void *)drhd) + drhd->header.length,
0306 dmaru->segment,
0307 dmaru->devices, dmaru->devices_cnt);
0308 if (ret)
0309 break;
0310 }
0311 if (ret >= 0)
0312 ret = dmar_iommu_notify_scope_dev(info);
0313 if (ret < 0 && dmar_dev_scope_status == 0)
0314 dmar_dev_scope_status = ret;
0315
0316 if (ret >= 0)
0317 intel_irq_remap_add_device(info);
0318
0319 return ret;
0320 }
0321
0322 static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
0323 {
0324 struct dmar_drhd_unit *dmaru;
0325
0326 for_each_drhd_unit(dmaru)
0327 if (dmar_remove_dev_scope(info, dmaru->segment,
0328 dmaru->devices, dmaru->devices_cnt))
0329 break;
0330 dmar_iommu_notify_scope_dev(info);
0331 }
0332
0333 static inline void vf_inherit_msi_domain(struct pci_dev *pdev)
0334 {
0335 struct pci_dev *physfn = pci_physfn(pdev);
0336
0337 dev_set_msi_domain(&pdev->dev, dev_get_msi_domain(&physfn->dev));
0338 }
0339
0340 static int dmar_pci_bus_notifier(struct notifier_block *nb,
0341 unsigned long action, void *data)
0342 {
0343 struct pci_dev *pdev = to_pci_dev(data);
0344 struct dmar_pci_notify_info *info;
0345
0346
0347
0348
0349 if (pdev->is_virtfn) {
0350
0351
0352
0353
0354
0355
0356
0357
0358 if (action == BUS_NOTIFY_ADD_DEVICE)
0359 vf_inherit_msi_domain(pdev);
0360 return NOTIFY_DONE;
0361 }
0362
0363 if (action != BUS_NOTIFY_ADD_DEVICE &&
0364 action != BUS_NOTIFY_REMOVED_DEVICE)
0365 return NOTIFY_DONE;
0366
0367 info = dmar_alloc_pci_notify_info(pdev, action);
0368 if (!info)
0369 return NOTIFY_DONE;
0370
0371 down_write(&dmar_global_lock);
0372 if (action == BUS_NOTIFY_ADD_DEVICE)
0373 dmar_pci_bus_add_dev(info);
0374 else if (action == BUS_NOTIFY_REMOVED_DEVICE)
0375 dmar_pci_bus_del_dev(info);
0376 up_write(&dmar_global_lock);
0377
0378 dmar_free_pci_notify_info(info);
0379
0380 return NOTIFY_OK;
0381 }
0382
0383 static struct notifier_block dmar_pci_bus_nb = {
0384 .notifier_call = dmar_pci_bus_notifier,
0385 .priority = 1,
0386 };
0387
0388 static struct dmar_drhd_unit *
0389 dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
0390 {
0391 struct dmar_drhd_unit *dmaru;
0392
0393 list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list,
0394 dmar_rcu_check())
0395 if (dmaru->segment == drhd->segment &&
0396 dmaru->reg_base_addr == drhd->address)
0397 return dmaru;
0398
0399 return NULL;
0400 }
0401
0402
0403
0404
0405
0406
0407 static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
0408 {
0409 struct acpi_dmar_hardware_unit *drhd;
0410 struct dmar_drhd_unit *dmaru;
0411 int ret;
0412
0413 drhd = (struct acpi_dmar_hardware_unit *)header;
0414 dmaru = dmar_find_dmaru(drhd);
0415 if (dmaru)
0416 goto out;
0417
0418 dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
0419 if (!dmaru)
0420 return -ENOMEM;
0421
0422
0423
0424
0425
0426 dmaru->hdr = (void *)(dmaru + 1);
0427 memcpy(dmaru->hdr, header, header->length);
0428 dmaru->reg_base_addr = drhd->address;
0429 dmaru->segment = drhd->segment;
0430 dmaru->include_all = drhd->flags & 0x1;
0431 dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
0432 ((void *)drhd) + drhd->header.length,
0433 &dmaru->devices_cnt);
0434 if (dmaru->devices_cnt && dmaru->devices == NULL) {
0435 kfree(dmaru);
0436 return -ENOMEM;
0437 }
0438
0439 ret = alloc_iommu(dmaru);
0440 if (ret) {
0441 dmar_free_dev_scope(&dmaru->devices,
0442 &dmaru->devices_cnt);
0443 kfree(dmaru);
0444 return ret;
0445 }
0446 dmar_register_drhd_unit(dmaru);
0447
0448 out:
0449 if (arg)
0450 (*(int *)arg)++;
0451
0452 return 0;
0453 }
0454
0455 static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
0456 {
0457 if (dmaru->devices && dmaru->devices_cnt)
0458 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
0459 if (dmaru->iommu)
0460 free_iommu(dmaru->iommu);
0461 kfree(dmaru);
0462 }
0463
0464 static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
0465 void *arg)
0466 {
0467 struct acpi_dmar_andd *andd = (void *)header;
0468
0469
0470 if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
0471 pr_warn(FW_BUG
0472 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
0473 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
0474 dmi_get_system_info(DMI_BIOS_VENDOR),
0475 dmi_get_system_info(DMI_BIOS_VERSION),
0476 dmi_get_system_info(DMI_PRODUCT_VERSION));
0477 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
0478 return -EINVAL;
0479 }
0480 pr_info("ANDD device: %x name: %s\n", andd->device_number,
0481 andd->device_name);
0482
0483 return 0;
0484 }
0485
0486 #ifdef CONFIG_ACPI_NUMA
0487 static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
0488 {
0489 struct acpi_dmar_rhsa *rhsa;
0490 struct dmar_drhd_unit *drhd;
0491
0492 rhsa = (struct acpi_dmar_rhsa *)header;
0493 for_each_drhd_unit(drhd) {
0494 if (drhd->reg_base_addr == rhsa->base_address) {
0495 int node = pxm_to_node(rhsa->proximity_domain);
0496
0497 if (node != NUMA_NO_NODE && !node_online(node))
0498 node = NUMA_NO_NODE;
0499 drhd->iommu->node = node;
0500 return 0;
0501 }
0502 }
0503 pr_warn(FW_BUG
0504 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
0505 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
0506 rhsa->base_address,
0507 dmi_get_system_info(DMI_BIOS_VENDOR),
0508 dmi_get_system_info(DMI_BIOS_VERSION),
0509 dmi_get_system_info(DMI_PRODUCT_VERSION));
0510 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
0511
0512 return 0;
0513 }
0514 #else
0515 #define dmar_parse_one_rhsa dmar_res_noop
0516 #endif
0517
0518 static void
0519 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
0520 {
0521 struct acpi_dmar_hardware_unit *drhd;
0522 struct acpi_dmar_reserved_memory *rmrr;
0523 struct acpi_dmar_atsr *atsr;
0524 struct acpi_dmar_rhsa *rhsa;
0525 struct acpi_dmar_satc *satc;
0526
0527 switch (header->type) {
0528 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
0529 drhd = container_of(header, struct acpi_dmar_hardware_unit,
0530 header);
0531 pr_info("DRHD base: %#016Lx flags: %#x\n",
0532 (unsigned long long)drhd->address, drhd->flags);
0533 break;
0534 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
0535 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
0536 header);
0537 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
0538 (unsigned long long)rmrr->base_address,
0539 (unsigned long long)rmrr->end_address);
0540 break;
0541 case ACPI_DMAR_TYPE_ROOT_ATS:
0542 atsr = container_of(header, struct acpi_dmar_atsr, header);
0543 pr_info("ATSR flags: %#x\n", atsr->flags);
0544 break;
0545 case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
0546 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
0547 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
0548 (unsigned long long)rhsa->base_address,
0549 rhsa->proximity_domain);
0550 break;
0551 case ACPI_DMAR_TYPE_NAMESPACE:
0552
0553
0554 break;
0555 case ACPI_DMAR_TYPE_SATC:
0556 satc = container_of(header, struct acpi_dmar_satc, header);
0557 pr_info("SATC flags: 0x%x\n", satc->flags);
0558 break;
0559 }
0560 }
0561
0562
0563
0564
0565 static int __init dmar_table_detect(void)
0566 {
0567 acpi_status status = AE_OK;
0568
0569
0570 status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl);
0571
0572 if (ACPI_SUCCESS(status) && !dmar_tbl) {
0573 pr_warn("Unable to map DMAR\n");
0574 status = AE_NOT_FOUND;
0575 }
0576
0577 return ACPI_SUCCESS(status) ? 0 : -ENOENT;
0578 }
0579
0580 static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
0581 size_t len, struct dmar_res_callback *cb)
0582 {
0583 struct acpi_dmar_header *iter, *next;
0584 struct acpi_dmar_header *end = ((void *)start) + len;
0585
0586 for (iter = start; iter < end; iter = next) {
0587 next = (void *)iter + iter->length;
0588 if (iter->length == 0) {
0589
0590 pr_debug(FW_BUG "Invalid 0-length structure\n");
0591 break;
0592 } else if (next > end) {
0593
0594 pr_warn(FW_BUG "Record passes table end\n");
0595 return -EINVAL;
0596 }
0597
0598 if (cb->print_entry)
0599 dmar_table_print_dmar_entry(iter);
0600
0601 if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
0602
0603 pr_debug("Unknown DMAR structure type %d\n",
0604 iter->type);
0605 } else if (cb->cb[iter->type]) {
0606 int ret;
0607
0608 ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
0609 if (ret)
0610 return ret;
0611 } else if (!cb->ignore_unhandled) {
0612 pr_warn("No handler for DMAR structure type %d\n",
0613 iter->type);
0614 return -EINVAL;
0615 }
0616 }
0617
0618 return 0;
0619 }
0620
0621 static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
0622 struct dmar_res_callback *cb)
0623 {
0624 return dmar_walk_remapping_entries((void *)(dmar + 1),
0625 dmar->header.length - sizeof(*dmar), cb);
0626 }
0627
0628
0629
0630
0631 static int __init
0632 parse_dmar_table(void)
0633 {
0634 struct acpi_table_dmar *dmar;
0635 int drhd_count = 0;
0636 int ret;
0637 struct dmar_res_callback cb = {
0638 .print_entry = true,
0639 .ignore_unhandled = true,
0640 .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
0641 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
0642 .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
0643 .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
0644 .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
0645 .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
0646 .cb[ACPI_DMAR_TYPE_SATC] = &dmar_parse_one_satc,
0647 };
0648
0649
0650
0651
0652
0653 dmar_table_detect();
0654
0655
0656
0657
0658
0659 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
0660
0661 dmar = (struct acpi_table_dmar *)dmar_tbl;
0662 if (!dmar)
0663 return -ENODEV;
0664
0665 if (dmar->width < PAGE_SHIFT - 1) {
0666 pr_warn("Invalid DMAR haw\n");
0667 return -EINVAL;
0668 }
0669
0670 pr_info("Host address width %d\n", dmar->width + 1);
0671 ret = dmar_walk_dmar_table(dmar, &cb);
0672 if (ret == 0 && drhd_count == 0)
0673 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
0674
0675 return ret;
0676 }
0677
0678 static int dmar_pci_device_match(struct dmar_dev_scope devices[],
0679 int cnt, struct pci_dev *dev)
0680 {
0681 int index;
0682 struct device *tmp;
0683
0684 while (dev) {
0685 for_each_active_dev_scope(devices, cnt, index, tmp)
0686 if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
0687 return 1;
0688
0689
0690 dev = dev->bus->self;
0691 }
0692
0693 return 0;
0694 }
0695
0696 struct dmar_drhd_unit *
0697 dmar_find_matched_drhd_unit(struct pci_dev *dev)
0698 {
0699 struct dmar_drhd_unit *dmaru;
0700 struct acpi_dmar_hardware_unit *drhd;
0701
0702 dev = pci_physfn(dev);
0703
0704 rcu_read_lock();
0705 for_each_drhd_unit(dmaru) {
0706 drhd = container_of(dmaru->hdr,
0707 struct acpi_dmar_hardware_unit,
0708 header);
0709
0710 if (dmaru->include_all &&
0711 drhd->segment == pci_domain_nr(dev->bus))
0712 goto out;
0713
0714 if (dmar_pci_device_match(dmaru->devices,
0715 dmaru->devices_cnt, dev))
0716 goto out;
0717 }
0718 dmaru = NULL;
0719 out:
0720 rcu_read_unlock();
0721
0722 return dmaru;
0723 }
0724
0725 static void __init dmar_acpi_insert_dev_scope(u8 device_number,
0726 struct acpi_device *adev)
0727 {
0728 struct dmar_drhd_unit *dmaru;
0729 struct acpi_dmar_hardware_unit *drhd;
0730 struct acpi_dmar_device_scope *scope;
0731 struct device *tmp;
0732 int i;
0733 struct acpi_dmar_pci_path *path;
0734
0735 for_each_drhd_unit(dmaru) {
0736 drhd = container_of(dmaru->hdr,
0737 struct acpi_dmar_hardware_unit,
0738 header);
0739
0740 for (scope = (void *)(drhd + 1);
0741 (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
0742 scope = ((void *)scope) + scope->length) {
0743 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE)
0744 continue;
0745 if (scope->enumeration_id != device_number)
0746 continue;
0747
0748 path = (void *)(scope + 1);
0749 pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
0750 dev_name(&adev->dev), dmaru->reg_base_addr,
0751 scope->bus, path->device, path->function);
0752 for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
0753 if (tmp == NULL) {
0754 dmaru->devices[i].bus = scope->bus;
0755 dmaru->devices[i].devfn = PCI_DEVFN(path->device,
0756 path->function);
0757 rcu_assign_pointer(dmaru->devices[i].dev,
0758 get_device(&adev->dev));
0759 return;
0760 }
0761 BUG_ON(i >= dmaru->devices_cnt);
0762 }
0763 }
0764 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
0765 device_number, dev_name(&adev->dev));
0766 }
0767
0768 static int __init dmar_acpi_dev_scope_init(void)
0769 {
0770 struct acpi_dmar_andd *andd;
0771
0772 if (dmar_tbl == NULL)
0773 return -ENODEV;
0774
0775 for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
0776 ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
0777 andd = ((void *)andd) + andd->header.length) {
0778 if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) {
0779 acpi_handle h;
0780 struct acpi_device *adev;
0781
0782 if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
0783 andd->device_name,
0784 &h))) {
0785 pr_err("Failed to find handle for ACPI object %s\n",
0786 andd->device_name);
0787 continue;
0788 }
0789 adev = acpi_fetch_acpi_dev(h);
0790 if (!adev) {
0791 pr_err("Failed to get device for ACPI object %s\n",
0792 andd->device_name);
0793 continue;
0794 }
0795 dmar_acpi_insert_dev_scope(andd->device_number, adev);
0796 }
0797 }
0798 return 0;
0799 }
0800
0801 int __init dmar_dev_scope_init(void)
0802 {
0803 struct pci_dev *dev = NULL;
0804 struct dmar_pci_notify_info *info;
0805
0806 if (dmar_dev_scope_status != 1)
0807 return dmar_dev_scope_status;
0808
0809 if (list_empty(&dmar_drhd_units)) {
0810 dmar_dev_scope_status = -ENODEV;
0811 } else {
0812 dmar_dev_scope_status = 0;
0813
0814 dmar_acpi_dev_scope_init();
0815
0816 for_each_pci_dev(dev) {
0817 if (dev->is_virtfn)
0818 continue;
0819
0820 info = dmar_alloc_pci_notify_info(dev,
0821 BUS_NOTIFY_ADD_DEVICE);
0822 if (!info) {
0823 return dmar_dev_scope_status;
0824 } else {
0825 dmar_pci_bus_add_dev(info);
0826 dmar_free_pci_notify_info(info);
0827 }
0828 }
0829 }
0830
0831 return dmar_dev_scope_status;
0832 }
0833
0834 void __init dmar_register_bus_notifier(void)
0835 {
0836 bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
0837 }
0838
0839
0840 int __init dmar_table_init(void)
0841 {
0842 static int dmar_table_initialized;
0843 int ret;
0844
0845 if (dmar_table_initialized == 0) {
0846 ret = parse_dmar_table();
0847 if (ret < 0) {
0848 if (ret != -ENODEV)
0849 pr_info("Parse DMAR table failure.\n");
0850 } else if (list_empty(&dmar_drhd_units)) {
0851 pr_info("No DMAR devices found\n");
0852 ret = -ENODEV;
0853 }
0854
0855 if (ret < 0)
0856 dmar_table_initialized = ret;
0857 else
0858 dmar_table_initialized = 1;
0859 }
0860
0861 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
0862 }
0863
0864 static void warn_invalid_dmar(u64 addr, const char *message)
0865 {
0866 pr_warn_once(FW_BUG
0867 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
0868 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
0869 addr, message,
0870 dmi_get_system_info(DMI_BIOS_VENDOR),
0871 dmi_get_system_info(DMI_BIOS_VERSION),
0872 dmi_get_system_info(DMI_PRODUCT_VERSION));
0873 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
0874 }
0875
0876 static int __ref
0877 dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
0878 {
0879 struct acpi_dmar_hardware_unit *drhd;
0880 void __iomem *addr;
0881 u64 cap, ecap;
0882
0883 drhd = (void *)entry;
0884 if (!drhd->address) {
0885 warn_invalid_dmar(0, "");
0886 return -EINVAL;
0887 }
0888
0889 if (arg)
0890 addr = ioremap(drhd->address, VTD_PAGE_SIZE);
0891 else
0892 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
0893 if (!addr) {
0894 pr_warn("Can't validate DRHD address: %llx\n", drhd->address);
0895 return -EINVAL;
0896 }
0897
0898 cap = dmar_readq(addr + DMAR_CAP_REG);
0899 ecap = dmar_readq(addr + DMAR_ECAP_REG);
0900
0901 if (arg)
0902 iounmap(addr);
0903 else
0904 early_iounmap(addr, VTD_PAGE_SIZE);
0905
0906 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
0907 warn_invalid_dmar(drhd->address, " returns all ones");
0908 return -EINVAL;
0909 }
0910
0911 return 0;
0912 }
0913
0914 void __init detect_intel_iommu(void)
0915 {
0916 int ret;
0917 struct dmar_res_callback validate_drhd_cb = {
0918 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
0919 .ignore_unhandled = true,
0920 };
0921
0922 down_write(&dmar_global_lock);
0923 ret = dmar_table_detect();
0924 if (!ret)
0925 ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
0926 &validate_drhd_cb);
0927 if (!ret && !no_iommu && !iommu_detected &&
0928 (!dmar_disabled || dmar_platform_optin())) {
0929 iommu_detected = 1;
0930
0931 pci_request_acs();
0932 }
0933
0934 #ifdef CONFIG_X86
0935 if (!ret) {
0936 x86_init.iommu.iommu_init = intel_iommu_init;
0937 x86_platform.iommu_shutdown = intel_iommu_shutdown;
0938 }
0939
0940 #endif
0941
0942 if (dmar_tbl) {
0943 acpi_put_table(dmar_tbl);
0944 dmar_tbl = NULL;
0945 }
0946 up_write(&dmar_global_lock);
0947 }
0948
0949 static void unmap_iommu(struct intel_iommu *iommu)
0950 {
0951 iounmap(iommu->reg);
0952 release_mem_region(iommu->reg_phys, iommu->reg_size);
0953 }
0954
0955
0956
0957
0958
0959
0960
0961
0962
0963 static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
0964 {
0965 int map_size, err=0;
0966
0967 iommu->reg_phys = phys_addr;
0968 iommu->reg_size = VTD_PAGE_SIZE;
0969
0970 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
0971 pr_err("Can't reserve memory\n");
0972 err = -EBUSY;
0973 goto out;
0974 }
0975
0976 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
0977 if (!iommu->reg) {
0978 pr_err("Can't map the region\n");
0979 err = -ENOMEM;
0980 goto release;
0981 }
0982
0983 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
0984 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
0985
0986 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
0987 err = -EINVAL;
0988 warn_invalid_dmar(phys_addr, " returns all ones");
0989 goto unmap;
0990 }
0991 if (ecap_vcs(iommu->ecap))
0992 iommu->vccap = dmar_readq(iommu->reg + DMAR_VCCAP_REG);
0993
0994
0995 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
0996 cap_max_fault_reg_offset(iommu->cap));
0997 map_size = VTD_PAGE_ALIGN(map_size);
0998 if (map_size > iommu->reg_size) {
0999 iounmap(iommu->reg);
1000 release_mem_region(iommu->reg_phys, iommu->reg_size);
1001 iommu->reg_size = map_size;
1002 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
1003 iommu->name)) {
1004 pr_err("Can't reserve memory\n");
1005 err = -EBUSY;
1006 goto out;
1007 }
1008 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
1009 if (!iommu->reg) {
1010 pr_err("Can't map the region\n");
1011 err = -ENOMEM;
1012 goto release;
1013 }
1014 }
1015 err = 0;
1016 goto out;
1017
1018 unmap:
1019 iounmap(iommu->reg);
1020 release:
1021 release_mem_region(iommu->reg_phys, iommu->reg_size);
1022 out:
1023 return err;
1024 }
1025
1026 static int alloc_iommu(struct dmar_drhd_unit *drhd)
1027 {
1028 struct intel_iommu *iommu;
1029 u32 ver, sts;
1030 int agaw = -1;
1031 int msagaw = -1;
1032 int err;
1033
1034 if (!drhd->reg_base_addr) {
1035 warn_invalid_dmar(0, "");
1036 return -EINVAL;
1037 }
1038
1039 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1040 if (!iommu)
1041 return -ENOMEM;
1042
1043 iommu->seq_id = ida_alloc_range(&dmar_seq_ids, 0,
1044 DMAR_UNITS_SUPPORTED - 1, GFP_KERNEL);
1045 if (iommu->seq_id < 0) {
1046 pr_err("Failed to allocate seq_id\n");
1047 err = iommu->seq_id;
1048 goto error;
1049 }
1050 sprintf(iommu->name, "dmar%d", iommu->seq_id);
1051
1052 err = map_iommu(iommu, drhd->reg_base_addr);
1053 if (err) {
1054 pr_err("Failed to map %s\n", iommu->name);
1055 goto error_free_seq_id;
1056 }
1057
1058 err = -EINVAL;
1059 if (cap_sagaw(iommu->cap) == 0) {
1060 pr_info("%s: No supported address widths. Not attempting DMA translation.\n",
1061 iommu->name);
1062 drhd->ignored = 1;
1063 }
1064
1065 if (!drhd->ignored) {
1066 agaw = iommu_calculate_agaw(iommu);
1067 if (agaw < 0) {
1068 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1069 iommu->seq_id);
1070 drhd->ignored = 1;
1071 }
1072 }
1073 if (!drhd->ignored) {
1074 msagaw = iommu_calculate_max_sagaw(iommu);
1075 if (msagaw < 0) {
1076 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1077 iommu->seq_id);
1078 drhd->ignored = 1;
1079 agaw = -1;
1080 }
1081 }
1082 iommu->agaw = agaw;
1083 iommu->msagaw = msagaw;
1084 iommu->segment = drhd->segment;
1085
1086 iommu->node = NUMA_NO_NODE;
1087
1088 ver = readl(iommu->reg + DMAR_VER_REG);
1089 pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
1090 iommu->name,
1091 (unsigned long long)drhd->reg_base_addr,
1092 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
1093 (unsigned long long)iommu->cap,
1094 (unsigned long long)iommu->ecap);
1095
1096
1097 sts = readl(iommu->reg + DMAR_GSTS_REG);
1098 if (sts & DMA_GSTS_IRES)
1099 iommu->gcmd |= DMA_GCMD_IRE;
1100 if (sts & DMA_GSTS_TES)
1101 iommu->gcmd |= DMA_GCMD_TE;
1102 if (sts & DMA_GSTS_QIES)
1103 iommu->gcmd |= DMA_GCMD_QIE;
1104
1105 raw_spin_lock_init(&iommu->register_lock);
1106
1107
1108
1109
1110
1111
1112 if (intel_iommu_enabled && !drhd->ignored) {
1113 err = iommu_device_sysfs_add(&iommu->iommu, NULL,
1114 intel_iommu_groups,
1115 "%s", iommu->name);
1116 if (err)
1117 goto err_unmap;
1118
1119 err = iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
1120 if (err)
1121 goto err_sysfs;
1122 }
1123
1124 drhd->iommu = iommu;
1125 iommu->drhd = drhd;
1126
1127 return 0;
1128
1129 err_sysfs:
1130 iommu_device_sysfs_remove(&iommu->iommu);
1131 err_unmap:
1132 unmap_iommu(iommu);
1133 error_free_seq_id:
1134 ida_free(&dmar_seq_ids, iommu->seq_id);
1135 error:
1136 kfree(iommu);
1137 return err;
1138 }
1139
1140 static void free_iommu(struct intel_iommu *iommu)
1141 {
1142 if (intel_iommu_enabled && !iommu->drhd->ignored) {
1143 iommu_device_unregister(&iommu->iommu);
1144 iommu_device_sysfs_remove(&iommu->iommu);
1145 }
1146
1147 if (iommu->irq) {
1148 if (iommu->pr_irq) {
1149 free_irq(iommu->pr_irq, iommu);
1150 dmar_free_hwirq(iommu->pr_irq);
1151 iommu->pr_irq = 0;
1152 }
1153 free_irq(iommu->irq, iommu);
1154 dmar_free_hwirq(iommu->irq);
1155 iommu->irq = 0;
1156 }
1157
1158 if (iommu->qi) {
1159 free_page((unsigned long)iommu->qi->desc);
1160 kfree(iommu->qi->desc_status);
1161 kfree(iommu->qi);
1162 }
1163
1164 if (iommu->reg)
1165 unmap_iommu(iommu);
1166
1167 ida_free(&dmar_seq_ids, iommu->seq_id);
1168 kfree(iommu);
1169 }
1170
1171
1172
1173
1174 static inline void reclaim_free_desc(struct q_inval *qi)
1175 {
1176 while (qi->desc_status[qi->free_tail] == QI_DONE ||
1177 qi->desc_status[qi->free_tail] == QI_ABORT) {
1178 qi->desc_status[qi->free_tail] = QI_FREE;
1179 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
1180 qi->free_cnt++;
1181 }
1182 }
1183
1184 static const char *qi_type_string(u8 type)
1185 {
1186 switch (type) {
1187 case QI_CC_TYPE:
1188 return "Context-cache Invalidation";
1189 case QI_IOTLB_TYPE:
1190 return "IOTLB Invalidation";
1191 case QI_DIOTLB_TYPE:
1192 return "Device-TLB Invalidation";
1193 case QI_IEC_TYPE:
1194 return "Interrupt Entry Cache Invalidation";
1195 case QI_IWD_TYPE:
1196 return "Invalidation Wait";
1197 case QI_EIOTLB_TYPE:
1198 return "PASID-based IOTLB Invalidation";
1199 case QI_PC_TYPE:
1200 return "PASID-cache Invalidation";
1201 case QI_DEIOTLB_TYPE:
1202 return "PASID-based Device-TLB Invalidation";
1203 case QI_PGRP_RESP_TYPE:
1204 return "Page Group Response";
1205 default:
1206 return "UNKNOWN";
1207 }
1208 }
1209
1210 static void qi_dump_fault(struct intel_iommu *iommu, u32 fault)
1211 {
1212 unsigned int head = dmar_readl(iommu->reg + DMAR_IQH_REG);
1213 u64 iqe_err = dmar_readq(iommu->reg + DMAR_IQER_REG);
1214 struct qi_desc *desc = iommu->qi->desc + head;
1215
1216 if (fault & DMA_FSTS_IQE)
1217 pr_err("VT-d detected Invalidation Queue Error: Reason %llx",
1218 DMAR_IQER_REG_IQEI(iqe_err));
1219 if (fault & DMA_FSTS_ITE)
1220 pr_err("VT-d detected Invalidation Time-out Error: SID %llx",
1221 DMAR_IQER_REG_ITESID(iqe_err));
1222 if (fault & DMA_FSTS_ICE)
1223 pr_err("VT-d detected Invalidation Completion Error: SID %llx",
1224 DMAR_IQER_REG_ICESID(iqe_err));
1225
1226 pr_err("QI HEAD: %s qw0 = 0x%llx, qw1 = 0x%llx\n",
1227 qi_type_string(desc->qw0 & 0xf),
1228 (unsigned long long)desc->qw0,
1229 (unsigned long long)desc->qw1);
1230
1231 head = ((head >> qi_shift(iommu)) + QI_LENGTH - 1) % QI_LENGTH;
1232 head <<= qi_shift(iommu);
1233 desc = iommu->qi->desc + head;
1234
1235 pr_err("QI PRIOR: %s qw0 = 0x%llx, qw1 = 0x%llx\n",
1236 qi_type_string(desc->qw0 & 0xf),
1237 (unsigned long long)desc->qw0,
1238 (unsigned long long)desc->qw1);
1239 }
1240
1241 static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
1242 {
1243 u32 fault;
1244 int head, tail;
1245 struct q_inval *qi = iommu->qi;
1246 int shift = qi_shift(iommu);
1247
1248 if (qi->desc_status[wait_index] == QI_ABORT)
1249 return -EAGAIN;
1250
1251 fault = readl(iommu->reg + DMAR_FSTS_REG);
1252 if (fault & (DMA_FSTS_IQE | DMA_FSTS_ITE | DMA_FSTS_ICE))
1253 qi_dump_fault(iommu, fault);
1254
1255
1256
1257
1258
1259
1260 if (fault & DMA_FSTS_IQE) {
1261 head = readl(iommu->reg + DMAR_IQH_REG);
1262 if ((head >> shift) == index) {
1263 struct qi_desc *desc = qi->desc + head;
1264
1265
1266
1267
1268
1269
1270 memcpy(desc, qi->desc + (wait_index << shift),
1271 1 << shift);
1272 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1273 pr_info("Invalidation Queue Error (IQE) cleared\n");
1274 return -EINVAL;
1275 }
1276 }
1277
1278
1279
1280
1281
1282 if (fault & DMA_FSTS_ITE) {
1283 head = readl(iommu->reg + DMAR_IQH_REG);
1284 head = ((head >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
1285 head |= 1;
1286 tail = readl(iommu->reg + DMAR_IQT_REG);
1287 tail = ((tail >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
1288
1289 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1290 pr_info("Invalidation Time-out Error (ITE) cleared\n");
1291
1292 do {
1293 if (qi->desc_status[head] == QI_IN_USE)
1294 qi->desc_status[head] = QI_ABORT;
1295 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
1296 } while (head != tail);
1297
1298 if (qi->desc_status[wait_index] == QI_ABORT)
1299 return -EAGAIN;
1300 }
1301
1302 if (fault & DMA_FSTS_ICE) {
1303 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1304 pr_info("Invalidation Completion Error (ICE) cleared\n");
1305 }
1306
1307 return 0;
1308 }
1309
1310
1311
1312
1313
1314
1315
1316
1317 int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
1318 unsigned int count, unsigned long options)
1319 {
1320 struct q_inval *qi = iommu->qi;
1321 s64 devtlb_start_ktime = 0;
1322 s64 iotlb_start_ktime = 0;
1323 s64 iec_start_ktime = 0;
1324 struct qi_desc wait_desc;
1325 int wait_index, index;
1326 unsigned long flags;
1327 int offset, shift;
1328 int rc, i;
1329 u64 type;
1330
1331 if (!qi)
1332 return 0;
1333
1334 type = desc->qw0 & GENMASK_ULL(3, 0);
1335
1336 if ((type == QI_IOTLB_TYPE || type == QI_EIOTLB_TYPE) &&
1337 dmar_latency_enabled(iommu, DMAR_LATENCY_INV_IOTLB))
1338 iotlb_start_ktime = ktime_to_ns(ktime_get());
1339
1340 if ((type == QI_DIOTLB_TYPE || type == QI_DEIOTLB_TYPE) &&
1341 dmar_latency_enabled(iommu, DMAR_LATENCY_INV_DEVTLB))
1342 devtlb_start_ktime = ktime_to_ns(ktime_get());
1343
1344 if (type == QI_IEC_TYPE &&
1345 dmar_latency_enabled(iommu, DMAR_LATENCY_INV_IEC))
1346 iec_start_ktime = ktime_to_ns(ktime_get());
1347
1348 restart:
1349 rc = 0;
1350
1351 raw_spin_lock_irqsave(&qi->q_lock, flags);
1352
1353
1354
1355
1356
1357 while (qi->free_cnt < count + 2) {
1358 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1359 cpu_relax();
1360 raw_spin_lock_irqsave(&qi->q_lock, flags);
1361 }
1362
1363 index = qi->free_head;
1364 wait_index = (index + count) % QI_LENGTH;
1365 shift = qi_shift(iommu);
1366
1367 for (i = 0; i < count; i++) {
1368 offset = ((index + i) % QI_LENGTH) << shift;
1369 memcpy(qi->desc + offset, &desc[i], 1 << shift);
1370 qi->desc_status[(index + i) % QI_LENGTH] = QI_IN_USE;
1371 trace_qi_submit(iommu, desc[i].qw0, desc[i].qw1,
1372 desc[i].qw2, desc[i].qw3);
1373 }
1374 qi->desc_status[wait_index] = QI_IN_USE;
1375
1376 wait_desc.qw0 = QI_IWD_STATUS_DATA(QI_DONE) |
1377 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
1378 if (options & QI_OPT_WAIT_DRAIN)
1379 wait_desc.qw0 |= QI_IWD_PRQ_DRAIN;
1380 wait_desc.qw1 = virt_to_phys(&qi->desc_status[wait_index]);
1381 wait_desc.qw2 = 0;
1382 wait_desc.qw3 = 0;
1383
1384 offset = wait_index << shift;
1385 memcpy(qi->desc + offset, &wait_desc, 1 << shift);
1386
1387 qi->free_head = (qi->free_head + count + 1) % QI_LENGTH;
1388 qi->free_cnt -= count + 1;
1389
1390
1391
1392
1393
1394 writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG);
1395
1396 while (qi->desc_status[wait_index] != QI_DONE) {
1397
1398
1399
1400
1401
1402
1403
1404 rc = qi_check_fault(iommu, index, wait_index);
1405 if (rc)
1406 break;
1407
1408 raw_spin_unlock(&qi->q_lock);
1409 cpu_relax();
1410 raw_spin_lock(&qi->q_lock);
1411 }
1412
1413 for (i = 0; i < count; i++)
1414 qi->desc_status[(index + i) % QI_LENGTH] = QI_DONE;
1415
1416 reclaim_free_desc(qi);
1417 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1418
1419 if (rc == -EAGAIN)
1420 goto restart;
1421
1422 if (iotlb_start_ktime)
1423 dmar_latency_update(iommu, DMAR_LATENCY_INV_IOTLB,
1424 ktime_to_ns(ktime_get()) - iotlb_start_ktime);
1425
1426 if (devtlb_start_ktime)
1427 dmar_latency_update(iommu, DMAR_LATENCY_INV_DEVTLB,
1428 ktime_to_ns(ktime_get()) - devtlb_start_ktime);
1429
1430 if (iec_start_ktime)
1431 dmar_latency_update(iommu, DMAR_LATENCY_INV_IEC,
1432 ktime_to_ns(ktime_get()) - iec_start_ktime);
1433
1434 return rc;
1435 }
1436
1437
1438
1439
1440 void qi_global_iec(struct intel_iommu *iommu)
1441 {
1442 struct qi_desc desc;
1443
1444 desc.qw0 = QI_IEC_TYPE;
1445 desc.qw1 = 0;
1446 desc.qw2 = 0;
1447 desc.qw3 = 0;
1448
1449
1450 qi_submit_sync(iommu, &desc, 1, 0);
1451 }
1452
1453 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1454 u64 type)
1455 {
1456 struct qi_desc desc;
1457
1458 desc.qw0 = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1459 | QI_CC_GRAN(type) | QI_CC_TYPE;
1460 desc.qw1 = 0;
1461 desc.qw2 = 0;
1462 desc.qw3 = 0;
1463
1464 qi_submit_sync(iommu, &desc, 1, 0);
1465 }
1466
1467 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1468 unsigned int size_order, u64 type)
1469 {
1470 u8 dw = 0, dr = 0;
1471
1472 struct qi_desc desc;
1473 int ih = 0;
1474
1475 if (cap_write_drain(iommu->cap))
1476 dw = 1;
1477
1478 if (cap_read_drain(iommu->cap))
1479 dr = 1;
1480
1481 desc.qw0 = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1482 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1483 desc.qw1 = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1484 | QI_IOTLB_AM(size_order);
1485 desc.qw2 = 0;
1486 desc.qw3 = 0;
1487
1488 qi_submit_sync(iommu, &desc, 1, 0);
1489 }
1490
1491 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1492 u16 qdep, u64 addr, unsigned mask)
1493 {
1494 struct qi_desc desc;
1495
1496 if (mask) {
1497 addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1498 desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1499 } else
1500 desc.qw1 = QI_DEV_IOTLB_ADDR(addr);
1501
1502 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1503 qdep = 0;
1504
1505 desc.qw0 = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1506 QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
1507 desc.qw2 = 0;
1508 desc.qw3 = 0;
1509
1510 qi_submit_sync(iommu, &desc, 1, 0);
1511 }
1512
1513
1514 void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
1515 unsigned long npages, bool ih)
1516 {
1517 struct qi_desc desc = {.qw2 = 0, .qw3 = 0};
1518
1519
1520
1521
1522
1523
1524 if (WARN_ON(!npages)) {
1525 pr_err("Invalid input npages = %ld\n", npages);
1526 return;
1527 }
1528
1529 if (npages == -1) {
1530 desc.qw0 = QI_EIOTLB_PASID(pasid) |
1531 QI_EIOTLB_DID(did) |
1532 QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
1533 QI_EIOTLB_TYPE;
1534 desc.qw1 = 0;
1535 } else {
1536 int mask = ilog2(__roundup_pow_of_two(npages));
1537 unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
1538
1539 if (WARN_ON_ONCE(!IS_ALIGNED(addr, align)))
1540 addr = ALIGN_DOWN(addr, align);
1541
1542 desc.qw0 = QI_EIOTLB_PASID(pasid) |
1543 QI_EIOTLB_DID(did) |
1544 QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
1545 QI_EIOTLB_TYPE;
1546 desc.qw1 = QI_EIOTLB_ADDR(addr) |
1547 QI_EIOTLB_IH(ih) |
1548 QI_EIOTLB_AM(mask);
1549 }
1550
1551 qi_submit_sync(iommu, &desc, 1, 0);
1552 }
1553
1554
1555 void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1556 u32 pasid, u16 qdep, u64 addr, unsigned int size_order)
1557 {
1558 unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1);
1559 struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
1560
1561 desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) |
1562 QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE |
1563 QI_DEV_IOTLB_PFSID(pfsid);
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574 if (!IS_ALIGNED(addr, VTD_PAGE_SIZE << size_order))
1575 pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n",
1576 addr, size_order);
1577
1578
1579 desc.qw1 = QI_DEV_EIOTLB_ADDR(addr);
1580
1581 if (size_order) {
1582
1583
1584
1585
1586
1587 desc.qw1 |= GENMASK_ULL(size_order + VTD_PAGE_SHIFT - 1,
1588 VTD_PAGE_SHIFT);
1589
1590 desc.qw1 &= ~mask;
1591
1592 desc.qw1 |= QI_DEV_EIOTLB_SIZE;
1593 }
1594
1595 qi_submit_sync(iommu, &desc, 1, 0);
1596 }
1597
1598 void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did,
1599 u64 granu, u32 pasid)
1600 {
1601 struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
1602
1603 desc.qw0 = QI_PC_PASID(pasid) | QI_PC_DID(did) |
1604 QI_PC_GRAN(granu) | QI_PC_TYPE;
1605 qi_submit_sync(iommu, &desc, 1, 0);
1606 }
1607
1608
1609
1610
1611 void dmar_disable_qi(struct intel_iommu *iommu)
1612 {
1613 unsigned long flags;
1614 u32 sts;
1615 cycles_t start_time = get_cycles();
1616
1617 if (!ecap_qis(iommu->ecap))
1618 return;
1619
1620 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1621
1622 sts = readl(iommu->reg + DMAR_GSTS_REG);
1623 if (!(sts & DMA_GSTS_QIES))
1624 goto end;
1625
1626
1627
1628
1629 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1630 readl(iommu->reg + DMAR_IQH_REG)) &&
1631 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1632 cpu_relax();
1633
1634 iommu->gcmd &= ~DMA_GCMD_QIE;
1635 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1636
1637 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1638 !(sts & DMA_GSTS_QIES), sts);
1639 end:
1640 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1641 }
1642
1643
1644
1645
1646 static void __dmar_enable_qi(struct intel_iommu *iommu)
1647 {
1648 u32 sts;
1649 unsigned long flags;
1650 struct q_inval *qi = iommu->qi;
1651 u64 val = virt_to_phys(qi->desc);
1652
1653 qi->free_head = qi->free_tail = 0;
1654 qi->free_cnt = QI_LENGTH;
1655
1656
1657
1658
1659
1660 if (ecap_smts(iommu->ecap))
1661 val |= (1 << 11) | 1;
1662
1663 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1664
1665
1666 writel(0, iommu->reg + DMAR_IQT_REG);
1667
1668 dmar_writeq(iommu->reg + DMAR_IQA_REG, val);
1669
1670 iommu->gcmd |= DMA_GCMD_QIE;
1671 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1672
1673
1674 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1675
1676 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1677 }
1678
1679
1680
1681
1682
1683
1684 int dmar_enable_qi(struct intel_iommu *iommu)
1685 {
1686 struct q_inval *qi;
1687 struct page *desc_page;
1688
1689 if (!ecap_qis(iommu->ecap))
1690 return -ENOENT;
1691
1692
1693
1694
1695 if (iommu->qi)
1696 return 0;
1697
1698 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1699 if (!iommu->qi)
1700 return -ENOMEM;
1701
1702 qi = iommu->qi;
1703
1704
1705
1706
1707
1708 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
1709 !!ecap_smts(iommu->ecap));
1710 if (!desc_page) {
1711 kfree(qi);
1712 iommu->qi = NULL;
1713 return -ENOMEM;
1714 }
1715
1716 qi->desc = page_address(desc_page);
1717
1718 qi->desc_status = kcalloc(QI_LENGTH, sizeof(int), GFP_ATOMIC);
1719 if (!qi->desc_status) {
1720 free_page((unsigned long) qi->desc);
1721 kfree(qi);
1722 iommu->qi = NULL;
1723 return -ENOMEM;
1724 }
1725
1726 raw_spin_lock_init(&qi->q_lock);
1727
1728 __dmar_enable_qi(iommu);
1729
1730 return 0;
1731 }
1732
1733
1734
1735 enum faulttype {
1736 DMA_REMAP,
1737 INTR_REMAP,
1738 UNKNOWN,
1739 };
1740
1741 static const char *dma_remap_fault_reasons[] =
1742 {
1743 "Software",
1744 "Present bit in root entry is clear",
1745 "Present bit in context entry is clear",
1746 "Invalid context entry",
1747 "Access beyond MGAW",
1748 "PTE Write access is not set",
1749 "PTE Read access is not set",
1750 "Next page table ptr is invalid",
1751 "Root table address invalid",
1752 "Context table ptr is invalid",
1753 "non-zero reserved fields in RTP",
1754 "non-zero reserved fields in CTP",
1755 "non-zero reserved fields in PTE",
1756 "PCE for translation request specifies blocking",
1757 };
1758
1759 static const char * const dma_remap_sm_fault_reasons[] = {
1760 "SM: Invalid Root Table Address",
1761 "SM: TTM 0 for request with PASID",
1762 "SM: TTM 0 for page group request",
1763 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown",
1764 "SM: Error attempting to access Root Entry",
1765 "SM: Present bit in Root Entry is clear",
1766 "SM: Non-zero reserved field set in Root Entry",
1767 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown",
1768 "SM: Error attempting to access Context Entry",
1769 "SM: Present bit in Context Entry is clear",
1770 "SM: Non-zero reserved field set in the Context Entry",
1771 "SM: Invalid Context Entry",
1772 "SM: DTE field in Context Entry is clear",
1773 "SM: PASID Enable field in Context Entry is clear",
1774 "SM: PASID is larger than the max in Context Entry",
1775 "SM: PRE field in Context-Entry is clear",
1776 "SM: RID_PASID field error in Context-Entry",
1777 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown",
1778 "SM: Error attempting to access the PASID Directory Entry",
1779 "SM: Present bit in Directory Entry is clear",
1780 "SM: Non-zero reserved field set in PASID Directory Entry",
1781 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown",
1782 "SM: Error attempting to access PASID Table Entry",
1783 "SM: Present bit in PASID Table Entry is clear",
1784 "SM: Non-zero reserved field set in PASID Table Entry",
1785 "SM: Invalid Scalable-Mode PASID Table Entry",
1786 "SM: ERE field is clear in PASID Table Entry",
1787 "SM: SRE field is clear in PASID Table Entry",
1788 "Unknown", "Unknown",
1789 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown",
1790 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown",
1791 "SM: Error attempting to access first-level paging entry",
1792 "SM: Present bit in first-level paging entry is clear",
1793 "SM: Non-zero reserved field set in first-level paging entry",
1794 "SM: Error attempting to access FL-PML4 entry",
1795 "SM: First-level entry address beyond MGAW in Nested translation",
1796 "SM: Read permission error in FL-PML4 entry in Nested translation",
1797 "SM: Read permission error in first-level paging entry in Nested translation",
1798 "SM: Write permission error in first-level paging entry in Nested translation",
1799 "SM: Error attempting to access second-level paging entry",
1800 "SM: Read/Write permission error in second-level paging entry",
1801 "SM: Non-zero reserved field set in second-level paging entry",
1802 "SM: Invalid second-level page table pointer",
1803 "SM: A/D bit update needed in second-level entry when set up in no snoop",
1804 "Unknown", "Unknown", "Unknown",
1805 "SM: Address in first-level translation is not canonical",
1806 "SM: U/S set 0 for first-level translation with user privilege",
1807 "SM: No execute permission for request with PASID and ER=1",
1808 "SM: Address beyond the DMA hardware max",
1809 "SM: Second-level entry address beyond the max",
1810 "SM: No write permission for Write/AtomicOp request",
1811 "SM: No read permission for Read/AtomicOp request",
1812 "SM: Invalid address-interrupt address",
1813 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown",
1814 "SM: A/D bit update needed in first-level entry when set up in no snoop",
1815 };
1816
1817 static const char *irq_remap_fault_reasons[] =
1818 {
1819 "Detected reserved fields in the decoded interrupt-remapped request",
1820 "Interrupt index exceeded the interrupt-remapping table size",
1821 "Present field in the IRTE entry is clear",
1822 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1823 "Detected reserved fields in the IRTE entry",
1824 "Blocked a compatibility format interrupt request",
1825 "Blocked an interrupt request due to source-id verification failure",
1826 };
1827
1828 static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1829 {
1830 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1831 ARRAY_SIZE(irq_remap_fault_reasons))) {
1832 *fault_type = INTR_REMAP;
1833 return irq_remap_fault_reasons[fault_reason - 0x20];
1834 } else if (fault_reason >= 0x30 && (fault_reason - 0x30 <
1835 ARRAY_SIZE(dma_remap_sm_fault_reasons))) {
1836 *fault_type = DMA_REMAP;
1837 return dma_remap_sm_fault_reasons[fault_reason - 0x30];
1838 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1839 *fault_type = DMA_REMAP;
1840 return dma_remap_fault_reasons[fault_reason];
1841 } else {
1842 *fault_type = UNKNOWN;
1843 return "Unknown";
1844 }
1845 }
1846
1847
1848 static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
1849 {
1850 if (iommu->irq == irq)
1851 return DMAR_FECTL_REG;
1852 else if (iommu->pr_irq == irq)
1853 return DMAR_PECTL_REG;
1854 else
1855 BUG();
1856 }
1857
1858 void dmar_msi_unmask(struct irq_data *data)
1859 {
1860 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1861 int reg = dmar_msi_reg(iommu, data->irq);
1862 unsigned long flag;
1863
1864
1865 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1866 writel(0, iommu->reg + reg);
1867
1868 readl(iommu->reg + reg);
1869 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1870 }
1871
1872 void dmar_msi_mask(struct irq_data *data)
1873 {
1874 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1875 int reg = dmar_msi_reg(iommu, data->irq);
1876 unsigned long flag;
1877
1878
1879 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1880 writel(DMA_FECTL_IM, iommu->reg + reg);
1881
1882 readl(iommu->reg + reg);
1883 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1884 }
1885
1886 void dmar_msi_write(int irq, struct msi_msg *msg)
1887 {
1888 struct intel_iommu *iommu = irq_get_handler_data(irq);
1889 int reg = dmar_msi_reg(iommu, irq);
1890 unsigned long flag;
1891
1892 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1893 writel(msg->data, iommu->reg + reg + 4);
1894 writel(msg->address_lo, iommu->reg + reg + 8);
1895 writel(msg->address_hi, iommu->reg + reg + 12);
1896 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1897 }
1898
1899 void dmar_msi_read(int irq, struct msi_msg *msg)
1900 {
1901 struct intel_iommu *iommu = irq_get_handler_data(irq);
1902 int reg = dmar_msi_reg(iommu, irq);
1903 unsigned long flag;
1904
1905 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1906 msg->data = readl(iommu->reg + reg + 4);
1907 msg->address_lo = readl(iommu->reg + reg + 8);
1908 msg->address_hi = readl(iommu->reg + reg + 12);
1909 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1910 }
1911
1912 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1913 u8 fault_reason, u32 pasid, u16 source_id,
1914 unsigned long long addr)
1915 {
1916 const char *reason;
1917 int fault_type;
1918
1919 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1920
1921 if (fault_type == INTR_REMAP) {
1922 pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index 0x%llx [fault reason 0x%02x] %s\n",
1923 source_id >> 8, PCI_SLOT(source_id & 0xFF),
1924 PCI_FUNC(source_id & 0xFF), addr >> 48,
1925 fault_reason, reason);
1926
1927 return 0;
1928 }
1929
1930 if (pasid == INVALID_IOASID)
1931 pr_err("[%s NO_PASID] Request device [%02x:%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n",
1932 type ? "DMA Read" : "DMA Write",
1933 source_id >> 8, PCI_SLOT(source_id & 0xFF),
1934 PCI_FUNC(source_id & 0xFF), addr,
1935 fault_reason, reason);
1936 else
1937 pr_err("[%s PASID 0x%x] Request device [%02x:%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n",
1938 type ? "DMA Read" : "DMA Write", pasid,
1939 source_id >> 8, PCI_SLOT(source_id & 0xFF),
1940 PCI_FUNC(source_id & 0xFF), addr,
1941 fault_reason, reason);
1942
1943 dmar_fault_dump_ptes(iommu, source_id, addr, pasid);
1944
1945 return 0;
1946 }
1947
1948 #define PRIMARY_FAULT_REG_LEN (16)
1949 irqreturn_t dmar_fault(int irq, void *dev_id)
1950 {
1951 struct intel_iommu *iommu = dev_id;
1952 int reg, fault_index;
1953 u32 fault_status;
1954 unsigned long flag;
1955 static DEFINE_RATELIMIT_STATE(rs,
1956 DEFAULT_RATELIMIT_INTERVAL,
1957 DEFAULT_RATELIMIT_BURST);
1958
1959 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1960 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1961 if (fault_status && __ratelimit(&rs))
1962 pr_err("DRHD: handling fault status reg %x\n", fault_status);
1963
1964
1965 if (!(fault_status & DMA_FSTS_PPF))
1966 goto unlock_exit;
1967
1968 fault_index = dma_fsts_fault_record_index(fault_status);
1969 reg = cap_fault_reg_offset(iommu->cap);
1970 while (1) {
1971
1972 bool ratelimited = !__ratelimit(&rs);
1973 u8 fault_reason;
1974 u16 source_id;
1975 u64 guest_addr;
1976 u32 pasid;
1977 int type;
1978 u32 data;
1979 bool pasid_present;
1980
1981
1982 data = readl(iommu->reg + reg +
1983 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1984 if (!(data & DMA_FRCD_F))
1985 break;
1986
1987 if (!ratelimited) {
1988 fault_reason = dma_frcd_fault_reason(data);
1989 type = dma_frcd_type(data);
1990
1991 pasid = dma_frcd_pasid_value(data);
1992 data = readl(iommu->reg + reg +
1993 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1994 source_id = dma_frcd_source_id(data);
1995
1996 pasid_present = dma_frcd_pasid_present(data);
1997 guest_addr = dmar_readq(iommu->reg + reg +
1998 fault_index * PRIMARY_FAULT_REG_LEN);
1999 guest_addr = dma_frcd_page_addr(guest_addr);
2000 }
2001
2002
2003 writel(DMA_FRCD_F, iommu->reg + reg +
2004 fault_index * PRIMARY_FAULT_REG_LEN + 12);
2005
2006 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
2007
2008 if (!ratelimited)
2009
2010 dmar_fault_do_one(iommu, type, fault_reason,
2011 pasid_present ? pasid : INVALID_IOASID,
2012 source_id, guest_addr);
2013
2014 fault_index++;
2015 if (fault_index >= cap_num_fault_regs(iommu->cap))
2016 fault_index = 0;
2017 raw_spin_lock_irqsave(&iommu->register_lock, flag);
2018 }
2019
2020 writel(DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_PRO,
2021 iommu->reg + DMAR_FSTS_REG);
2022
2023 unlock_exit:
2024 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
2025 return IRQ_HANDLED;
2026 }
2027
2028 int dmar_set_interrupt(struct intel_iommu *iommu)
2029 {
2030 int irq, ret;
2031
2032
2033
2034
2035 if (iommu->irq)
2036 return 0;
2037
2038 irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
2039 if (irq > 0) {
2040 iommu->irq = irq;
2041 } else {
2042 pr_err("No free IRQ vectors\n");
2043 return -EINVAL;
2044 }
2045
2046 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
2047 if (ret)
2048 pr_err("Can't request irq\n");
2049 return ret;
2050 }
2051
2052 int __init enable_drhd_fault_handling(void)
2053 {
2054 struct dmar_drhd_unit *drhd;
2055 struct intel_iommu *iommu;
2056
2057
2058
2059
2060 for_each_iommu(iommu, drhd) {
2061 u32 fault_status;
2062 int ret = dmar_set_interrupt(iommu);
2063
2064 if (ret) {
2065 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
2066 (unsigned long long)drhd->reg_base_addr, ret);
2067 return -1;
2068 }
2069
2070
2071
2072
2073 dmar_fault(iommu->irq, iommu);
2074 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
2075 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
2076 }
2077
2078 return 0;
2079 }
2080
2081
2082
2083
2084 int dmar_reenable_qi(struct intel_iommu *iommu)
2085 {
2086 if (!ecap_qis(iommu->ecap))
2087 return -ENOENT;
2088
2089 if (!iommu->qi)
2090 return -ENOENT;
2091
2092
2093
2094
2095 dmar_disable_qi(iommu);
2096
2097
2098
2099
2100
2101 __dmar_enable_qi(iommu);
2102
2103 return 0;
2104 }
2105
2106
2107
2108
2109 int __init dmar_ir_support(void)
2110 {
2111 struct acpi_table_dmar *dmar;
2112 dmar = (struct acpi_table_dmar *)dmar_tbl;
2113 if (!dmar)
2114 return 0;
2115 return dmar->flags & 0x1;
2116 }
2117
2118
2119 static inline bool dmar_in_use(void)
2120 {
2121 return irq_remapping_enabled || intel_iommu_enabled;
2122 }
2123
2124 static int __init dmar_free_unused_resources(void)
2125 {
2126 struct dmar_drhd_unit *dmaru, *dmaru_n;
2127
2128 if (dmar_in_use())
2129 return 0;
2130
2131 if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
2132 bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
2133
2134 down_write(&dmar_global_lock);
2135 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
2136 list_del(&dmaru->list);
2137 dmar_free_drhd(dmaru);
2138 }
2139 up_write(&dmar_global_lock);
2140
2141 return 0;
2142 }
2143
2144 late_initcall(dmar_free_unused_resources);
2145
2146
2147
2148
2149
2150
2151
2152 static guid_t dmar_hp_guid =
2153 GUID_INIT(0xD8C1A3A6, 0xBE9B, 0x4C9B,
2154 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF);
2155
2156
2157
2158
2159
2160 #define DMAR_DSM_REV_ID 0
2161 #define DMAR_DSM_FUNC_DRHD 1
2162 #define DMAR_DSM_FUNC_ATSR 2
2163 #define DMAR_DSM_FUNC_RHSA 3
2164 #define DMAR_DSM_FUNC_SATC 4
2165
2166 static inline bool dmar_detect_dsm(acpi_handle handle, int func)
2167 {
2168 return acpi_check_dsm(handle, &dmar_hp_guid, DMAR_DSM_REV_ID, 1 << func);
2169 }
2170
2171 static int dmar_walk_dsm_resource(acpi_handle handle, int func,
2172 dmar_res_handler_t handler, void *arg)
2173 {
2174 int ret = -ENODEV;
2175 union acpi_object *obj;
2176 struct acpi_dmar_header *start;
2177 struct dmar_res_callback callback;
2178 static int res_type[] = {
2179 [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
2180 [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
2181 [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
2182 [DMAR_DSM_FUNC_SATC] = ACPI_DMAR_TYPE_SATC,
2183 };
2184
2185 if (!dmar_detect_dsm(handle, func))
2186 return 0;
2187
2188 obj = acpi_evaluate_dsm_typed(handle, &dmar_hp_guid, DMAR_DSM_REV_ID,
2189 func, NULL, ACPI_TYPE_BUFFER);
2190 if (!obj)
2191 return -ENODEV;
2192
2193 memset(&callback, 0, sizeof(callback));
2194 callback.cb[res_type[func]] = handler;
2195 callback.arg[res_type[func]] = arg;
2196 start = (struct acpi_dmar_header *)obj->buffer.pointer;
2197 ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
2198
2199 ACPI_FREE(obj);
2200
2201 return ret;
2202 }
2203
2204 static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
2205 {
2206 int ret;
2207 struct dmar_drhd_unit *dmaru;
2208
2209 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
2210 if (!dmaru)
2211 return -ENODEV;
2212
2213 ret = dmar_ir_hotplug(dmaru, true);
2214 if (ret == 0)
2215 ret = dmar_iommu_hotplug(dmaru, true);
2216
2217 return ret;
2218 }
2219
2220 static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
2221 {
2222 int i, ret;
2223 struct device *dev;
2224 struct dmar_drhd_unit *dmaru;
2225
2226 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
2227 if (!dmaru)
2228 return 0;
2229
2230
2231
2232
2233 if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
2234 for_each_active_dev_scope(dmaru->devices,
2235 dmaru->devices_cnt, i, dev)
2236 return -EBUSY;
2237 }
2238
2239 ret = dmar_ir_hotplug(dmaru, false);
2240 if (ret == 0)
2241 ret = dmar_iommu_hotplug(dmaru, false);
2242
2243 return ret;
2244 }
2245
2246 static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
2247 {
2248 struct dmar_drhd_unit *dmaru;
2249
2250 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
2251 if (dmaru) {
2252 list_del_rcu(&dmaru->list);
2253 synchronize_rcu();
2254 dmar_free_drhd(dmaru);
2255 }
2256
2257 return 0;
2258 }
2259
2260 static int dmar_hotplug_insert(acpi_handle handle)
2261 {
2262 int ret;
2263 int drhd_count = 0;
2264
2265 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2266 &dmar_validate_one_drhd, (void *)1);
2267 if (ret)
2268 goto out;
2269
2270 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2271 &dmar_parse_one_drhd, (void *)&drhd_count);
2272 if (ret == 0 && drhd_count == 0) {
2273 pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
2274 goto out;
2275 } else if (ret) {
2276 goto release_drhd;
2277 }
2278
2279 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
2280 &dmar_parse_one_rhsa, NULL);
2281 if (ret)
2282 goto release_drhd;
2283
2284 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2285 &dmar_parse_one_atsr, NULL);
2286 if (ret)
2287 goto release_atsr;
2288
2289 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2290 &dmar_hp_add_drhd, NULL);
2291 if (!ret)
2292 return 0;
2293
2294 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2295 &dmar_hp_remove_drhd, NULL);
2296 release_atsr:
2297 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2298 &dmar_release_one_atsr, NULL);
2299 release_drhd:
2300 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2301 &dmar_hp_release_drhd, NULL);
2302 out:
2303 return ret;
2304 }
2305
2306 static int dmar_hotplug_remove(acpi_handle handle)
2307 {
2308 int ret;
2309
2310 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2311 &dmar_check_one_atsr, NULL);
2312 if (ret)
2313 return ret;
2314
2315 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2316 &dmar_hp_remove_drhd, NULL);
2317 if (ret == 0) {
2318 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2319 &dmar_release_one_atsr, NULL));
2320 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2321 &dmar_hp_release_drhd, NULL));
2322 } else {
2323 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2324 &dmar_hp_add_drhd, NULL);
2325 }
2326
2327 return ret;
2328 }
2329
2330 static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
2331 void *context, void **retval)
2332 {
2333 acpi_handle *phdl = retval;
2334
2335 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2336 *phdl = handle;
2337 return AE_CTRL_TERMINATE;
2338 }
2339
2340 return AE_OK;
2341 }
2342
2343 static int dmar_device_hotplug(acpi_handle handle, bool insert)
2344 {
2345 int ret;
2346 acpi_handle tmp = NULL;
2347 acpi_status status;
2348
2349 if (!dmar_in_use())
2350 return 0;
2351
2352 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2353 tmp = handle;
2354 } else {
2355 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
2356 ACPI_UINT32_MAX,
2357 dmar_get_dsm_handle,
2358 NULL, NULL, &tmp);
2359 if (ACPI_FAILURE(status)) {
2360 pr_warn("Failed to locate _DSM method.\n");
2361 return -ENXIO;
2362 }
2363 }
2364 if (tmp == NULL)
2365 return 0;
2366
2367 down_write(&dmar_global_lock);
2368 if (insert)
2369 ret = dmar_hotplug_insert(tmp);
2370 else
2371 ret = dmar_hotplug_remove(tmp);
2372 up_write(&dmar_global_lock);
2373
2374 return ret;
2375 }
2376
2377 int dmar_device_add(acpi_handle handle)
2378 {
2379 return dmar_device_hotplug(handle, true);
2380 }
2381
2382 int dmar_device_remove(acpi_handle handle)
2383 {
2384 return dmar_device_hotplug(handle, false);
2385 }
2386
2387
2388
2389
2390
2391
2392
2393
2394 bool dmar_platform_optin(void)
2395 {
2396 struct acpi_table_dmar *dmar;
2397 acpi_status status;
2398 bool ret;
2399
2400 status = acpi_get_table(ACPI_SIG_DMAR, 0,
2401 (struct acpi_table_header **)&dmar);
2402 if (ACPI_FAILURE(status))
2403 return false;
2404
2405 ret = !!(dmar->flags & DMAR_PLATFORM_OPT_IN);
2406 acpi_put_table((struct acpi_table_header *)dmar);
2407
2408 return ret;
2409 }
2410 EXPORT_SYMBOL_GPL(dmar_platform_optin);