0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/debugfs.h>
0012 #include <linux/dmar.h>
0013 #include <linux/pci.h>
0014
0015 #include <asm/irq_remapping.h>
0016
0017 #include "iommu.h"
0018 #include "pasid.h"
0019 #include "perf.h"
0020
0021 struct tbl_walk {
0022 u16 bus;
0023 u16 devfn;
0024 u32 pasid;
0025 struct root_entry *rt_entry;
0026 struct context_entry *ctx_entry;
0027 struct pasid_entry *pasid_tbl_entry;
0028 };
0029
0030 struct iommu_regset {
0031 int offset;
0032 const char *regs;
0033 };
0034
0035 #define DEBUG_BUFFER_SIZE 1024
0036 static char debug_buf[DEBUG_BUFFER_SIZE];
0037
0038 #define IOMMU_REGSET_ENTRY(_reg_) \
0039 { DMAR_##_reg_##_REG, __stringify(_reg_) }
0040
0041 static const struct iommu_regset iommu_regs_32[] = {
0042 IOMMU_REGSET_ENTRY(VER),
0043 IOMMU_REGSET_ENTRY(GCMD),
0044 IOMMU_REGSET_ENTRY(GSTS),
0045 IOMMU_REGSET_ENTRY(FSTS),
0046 IOMMU_REGSET_ENTRY(FECTL),
0047 IOMMU_REGSET_ENTRY(FEDATA),
0048 IOMMU_REGSET_ENTRY(FEADDR),
0049 IOMMU_REGSET_ENTRY(FEUADDR),
0050 IOMMU_REGSET_ENTRY(PMEN),
0051 IOMMU_REGSET_ENTRY(PLMBASE),
0052 IOMMU_REGSET_ENTRY(PLMLIMIT),
0053 IOMMU_REGSET_ENTRY(ICS),
0054 IOMMU_REGSET_ENTRY(PRS),
0055 IOMMU_REGSET_ENTRY(PECTL),
0056 IOMMU_REGSET_ENTRY(PEDATA),
0057 IOMMU_REGSET_ENTRY(PEADDR),
0058 IOMMU_REGSET_ENTRY(PEUADDR),
0059 };
0060
0061 static const struct iommu_regset iommu_regs_64[] = {
0062 IOMMU_REGSET_ENTRY(CAP),
0063 IOMMU_REGSET_ENTRY(ECAP),
0064 IOMMU_REGSET_ENTRY(RTADDR),
0065 IOMMU_REGSET_ENTRY(CCMD),
0066 IOMMU_REGSET_ENTRY(AFLOG),
0067 IOMMU_REGSET_ENTRY(PHMBASE),
0068 IOMMU_REGSET_ENTRY(PHMLIMIT),
0069 IOMMU_REGSET_ENTRY(IQH),
0070 IOMMU_REGSET_ENTRY(IQT),
0071 IOMMU_REGSET_ENTRY(IQA),
0072 IOMMU_REGSET_ENTRY(IRTA),
0073 IOMMU_REGSET_ENTRY(PQH),
0074 IOMMU_REGSET_ENTRY(PQT),
0075 IOMMU_REGSET_ENTRY(PQA),
0076 IOMMU_REGSET_ENTRY(MTRRCAP),
0077 IOMMU_REGSET_ENTRY(MTRRDEF),
0078 IOMMU_REGSET_ENTRY(MTRR_FIX64K_00000),
0079 IOMMU_REGSET_ENTRY(MTRR_FIX16K_80000),
0080 IOMMU_REGSET_ENTRY(MTRR_FIX16K_A0000),
0081 IOMMU_REGSET_ENTRY(MTRR_FIX4K_C0000),
0082 IOMMU_REGSET_ENTRY(MTRR_FIX4K_C8000),
0083 IOMMU_REGSET_ENTRY(MTRR_FIX4K_D0000),
0084 IOMMU_REGSET_ENTRY(MTRR_FIX4K_D8000),
0085 IOMMU_REGSET_ENTRY(MTRR_FIX4K_E0000),
0086 IOMMU_REGSET_ENTRY(MTRR_FIX4K_E8000),
0087 IOMMU_REGSET_ENTRY(MTRR_FIX4K_F0000),
0088 IOMMU_REGSET_ENTRY(MTRR_FIX4K_F8000),
0089 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE0),
0090 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK0),
0091 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE1),
0092 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK1),
0093 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE2),
0094 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK2),
0095 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE3),
0096 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK3),
0097 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE4),
0098 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK4),
0099 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE5),
0100 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK5),
0101 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE6),
0102 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK6),
0103 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE7),
0104 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK7),
0105 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE8),
0106 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK8),
0107 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE9),
0108 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK9),
0109 IOMMU_REGSET_ENTRY(VCCAP),
0110 IOMMU_REGSET_ENTRY(VCMD),
0111 IOMMU_REGSET_ENTRY(VCRSP),
0112 };
0113
0114 static int iommu_regset_show(struct seq_file *m, void *unused)
0115 {
0116 struct dmar_drhd_unit *drhd;
0117 struct intel_iommu *iommu;
0118 unsigned long flag;
0119 int i, ret = 0;
0120 u64 value;
0121
0122 rcu_read_lock();
0123 for_each_active_iommu(iommu, drhd) {
0124 if (!drhd->reg_base_addr) {
0125 seq_puts(m, "IOMMU: Invalid base address\n");
0126 ret = -EINVAL;
0127 goto out;
0128 }
0129
0130 seq_printf(m, "IOMMU: %s Register Base Address: %llx\n",
0131 iommu->name, drhd->reg_base_addr);
0132 seq_puts(m, "Name\t\t\tOffset\t\tContents\n");
0133
0134
0135
0136
0137 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0138 for (i = 0 ; i < ARRAY_SIZE(iommu_regs_32); i++) {
0139 value = dmar_readl(iommu->reg + iommu_regs_32[i].offset);
0140 seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n",
0141 iommu_regs_32[i].regs, iommu_regs_32[i].offset,
0142 value);
0143 }
0144 for (i = 0 ; i < ARRAY_SIZE(iommu_regs_64); i++) {
0145 value = dmar_readq(iommu->reg + iommu_regs_64[i].offset);
0146 seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n",
0147 iommu_regs_64[i].regs, iommu_regs_64[i].offset,
0148 value);
0149 }
0150 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0151 seq_putc(m, '\n');
0152 }
0153 out:
0154 rcu_read_unlock();
0155
0156 return ret;
0157 }
0158 DEFINE_SHOW_ATTRIBUTE(iommu_regset);
0159
0160 static inline void print_tbl_walk(struct seq_file *m)
0161 {
0162 struct tbl_walk *tbl_wlk = m->private;
0163
0164 seq_printf(m, "%02x:%02x.%x\t0x%016llx:0x%016llx\t0x%016llx:0x%016llx\t",
0165 tbl_wlk->bus, PCI_SLOT(tbl_wlk->devfn),
0166 PCI_FUNC(tbl_wlk->devfn), tbl_wlk->rt_entry->hi,
0167 tbl_wlk->rt_entry->lo, tbl_wlk->ctx_entry->hi,
0168 tbl_wlk->ctx_entry->lo);
0169
0170
0171
0172
0173
0174
0175 if (!tbl_wlk->pasid_tbl_entry)
0176 seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n", -1,
0177 (u64)0, (u64)0, (u64)0);
0178 else
0179 seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n",
0180 tbl_wlk->pasid, tbl_wlk->pasid_tbl_entry->val[2],
0181 tbl_wlk->pasid_tbl_entry->val[1],
0182 tbl_wlk->pasid_tbl_entry->val[0]);
0183 }
0184
0185 static void pasid_tbl_walk(struct seq_file *m, struct pasid_entry *tbl_entry,
0186 u16 dir_idx)
0187 {
0188 struct tbl_walk *tbl_wlk = m->private;
0189 u8 tbl_idx;
0190
0191 for (tbl_idx = 0; tbl_idx < PASID_TBL_ENTRIES; tbl_idx++) {
0192 if (pasid_pte_is_present(tbl_entry)) {
0193 tbl_wlk->pasid_tbl_entry = tbl_entry;
0194 tbl_wlk->pasid = (dir_idx << PASID_PDE_SHIFT) + tbl_idx;
0195 print_tbl_walk(m);
0196 }
0197
0198 tbl_entry++;
0199 }
0200 }
0201
0202 static void pasid_dir_walk(struct seq_file *m, u64 pasid_dir_ptr,
0203 u16 pasid_dir_size)
0204 {
0205 struct pasid_dir_entry *dir_entry = phys_to_virt(pasid_dir_ptr);
0206 struct pasid_entry *pasid_tbl;
0207 u16 dir_idx;
0208
0209 for (dir_idx = 0; dir_idx < pasid_dir_size; dir_idx++) {
0210 pasid_tbl = get_pasid_table_from_pde(dir_entry);
0211 if (pasid_tbl)
0212 pasid_tbl_walk(m, pasid_tbl, dir_idx);
0213
0214 dir_entry++;
0215 }
0216 }
0217
0218 static void ctx_tbl_walk(struct seq_file *m, struct intel_iommu *iommu, u16 bus)
0219 {
0220 struct context_entry *context;
0221 u16 devfn, pasid_dir_size;
0222 u64 pasid_dir_ptr;
0223
0224 for (devfn = 0; devfn < 256; devfn++) {
0225 struct tbl_walk tbl_wlk = {0};
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240 context = iommu_context_addr(iommu, bus, devfn, 0);
0241 if (!context)
0242 return;
0243
0244 if (!context_present(context))
0245 continue;
0246
0247 tbl_wlk.bus = bus;
0248 tbl_wlk.devfn = devfn;
0249 tbl_wlk.rt_entry = &iommu->root_entry[bus];
0250 tbl_wlk.ctx_entry = context;
0251 m->private = &tbl_wlk;
0252
0253 if (dmar_readq(iommu->reg + DMAR_RTADDR_REG) & DMA_RTADDR_SMT) {
0254 pasid_dir_ptr = context->lo & VTD_PAGE_MASK;
0255 pasid_dir_size = get_pasid_dir_size(context);
0256 pasid_dir_walk(m, pasid_dir_ptr, pasid_dir_size);
0257 continue;
0258 }
0259
0260 print_tbl_walk(m);
0261 }
0262 }
0263
0264 static void root_tbl_walk(struct seq_file *m, struct intel_iommu *iommu)
0265 {
0266 u16 bus;
0267
0268 spin_lock(&iommu->lock);
0269 seq_printf(m, "IOMMU %s: Root Table Address: 0x%llx\n", iommu->name,
0270 (u64)virt_to_phys(iommu->root_entry));
0271 seq_puts(m, "B.D.F\tRoot_entry\t\t\t\tContext_entry\t\t\t\tPASID\tPASID_table_entry\n");
0272
0273
0274
0275
0276
0277
0278 for (bus = 0; bus < 256; bus++)
0279 ctx_tbl_walk(m, iommu, bus);
0280 spin_unlock(&iommu->lock);
0281 }
0282
0283 static int dmar_translation_struct_show(struct seq_file *m, void *unused)
0284 {
0285 struct dmar_drhd_unit *drhd;
0286 struct intel_iommu *iommu;
0287 u32 sts;
0288
0289 rcu_read_lock();
0290 for_each_active_iommu(iommu, drhd) {
0291 sts = dmar_readl(iommu->reg + DMAR_GSTS_REG);
0292 if (!(sts & DMA_GSTS_TES)) {
0293 seq_printf(m, "DMA Remapping is not enabled on %s\n",
0294 iommu->name);
0295 continue;
0296 }
0297 root_tbl_walk(m, iommu);
0298 seq_putc(m, '\n');
0299 }
0300 rcu_read_unlock();
0301
0302 return 0;
0303 }
0304 DEFINE_SHOW_ATTRIBUTE(dmar_translation_struct);
0305
0306 static inline unsigned long level_to_directory_size(int level)
0307 {
0308 return BIT_ULL(VTD_PAGE_SHIFT + VTD_STRIDE_SHIFT * (level - 1));
0309 }
0310
0311 static inline void
0312 dump_page_info(struct seq_file *m, unsigned long iova, u64 *path)
0313 {
0314 seq_printf(m, "0x%013lx |\t0x%016llx\t0x%016llx\t0x%016llx\t0x%016llx\t0x%016llx\n",
0315 iova >> VTD_PAGE_SHIFT, path[5], path[4],
0316 path[3], path[2], path[1]);
0317 }
0318
0319 static void pgtable_walk_level(struct seq_file *m, struct dma_pte *pde,
0320 int level, unsigned long start,
0321 u64 *path)
0322 {
0323 int i;
0324
0325 if (level > 5 || level < 1)
0326 return;
0327
0328 for (i = 0; i < BIT_ULL(VTD_STRIDE_SHIFT);
0329 i++, pde++, start += level_to_directory_size(level)) {
0330 if (!dma_pte_present(pde))
0331 continue;
0332
0333 path[level] = pde->val;
0334 if (dma_pte_superpage(pde) || level == 1)
0335 dump_page_info(m, start, path);
0336 else
0337 pgtable_walk_level(m, phys_to_virt(dma_pte_addr(pde)),
0338 level - 1, start, path);
0339 path[level] = 0;
0340 }
0341 }
0342
0343 static int __show_device_domain_translation(struct device *dev, void *data)
0344 {
0345 struct dmar_domain *domain;
0346 struct seq_file *m = data;
0347 u64 path[6] = { 0 };
0348
0349 domain = to_dmar_domain(iommu_get_domain_for_dev(dev));
0350 if (!domain)
0351 return 0;
0352
0353 seq_printf(m, "Device %s @0x%llx\n", dev_name(dev),
0354 (u64)virt_to_phys(domain->pgd));
0355 seq_puts(m, "IOVA_PFN\t\tPML5E\t\t\tPML4E\t\t\tPDPE\t\t\tPDE\t\t\tPTE\n");
0356
0357 pgtable_walk_level(m, domain->pgd, domain->agaw + 2, 0, path);
0358 seq_putc(m, '\n');
0359
0360
0361 return 1;
0362 }
0363
0364 static int show_device_domain_translation(struct device *dev, void *data)
0365 {
0366 struct iommu_group *group;
0367
0368 group = iommu_group_get(dev);
0369 if (group) {
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381 iommu_group_for_each_dev(group, data,
0382 __show_device_domain_translation);
0383 iommu_group_put(group);
0384 }
0385
0386 return 0;
0387 }
0388
0389 static int domain_translation_struct_show(struct seq_file *m, void *unused)
0390 {
0391 return bus_for_each_dev(&pci_bus_type, NULL, m,
0392 show_device_domain_translation);
0393 }
0394 DEFINE_SHOW_ATTRIBUTE(domain_translation_struct);
0395
0396 static void invalidation_queue_entry_show(struct seq_file *m,
0397 struct intel_iommu *iommu)
0398 {
0399 int index, shift = qi_shift(iommu);
0400 struct qi_desc *desc;
0401 int offset;
0402
0403 if (ecap_smts(iommu->ecap))
0404 seq_puts(m, "Index\t\tqw0\t\t\tqw1\t\t\tqw2\t\t\tqw3\t\t\tstatus\n");
0405 else
0406 seq_puts(m, "Index\t\tqw0\t\t\tqw1\t\t\tstatus\n");
0407
0408 for (index = 0; index < QI_LENGTH; index++) {
0409 offset = index << shift;
0410 desc = iommu->qi->desc + offset;
0411 if (ecap_smts(iommu->ecap))
0412 seq_printf(m, "%5d\t%016llx\t%016llx\t%016llx\t%016llx\t%016x\n",
0413 index, desc->qw0, desc->qw1,
0414 desc->qw2, desc->qw3,
0415 iommu->qi->desc_status[index]);
0416 else
0417 seq_printf(m, "%5d\t%016llx\t%016llx\t%016x\n",
0418 index, desc->qw0, desc->qw1,
0419 iommu->qi->desc_status[index]);
0420 }
0421 }
0422
0423 static int invalidation_queue_show(struct seq_file *m, void *unused)
0424 {
0425 struct dmar_drhd_unit *drhd;
0426 struct intel_iommu *iommu;
0427 unsigned long flags;
0428 struct q_inval *qi;
0429 int shift;
0430
0431 rcu_read_lock();
0432 for_each_active_iommu(iommu, drhd) {
0433 qi = iommu->qi;
0434 shift = qi_shift(iommu);
0435
0436 if (!qi || !ecap_qis(iommu->ecap))
0437 continue;
0438
0439 seq_printf(m, "Invalidation queue on IOMMU: %s\n", iommu->name);
0440
0441 raw_spin_lock_irqsave(&qi->q_lock, flags);
0442 seq_printf(m, " Base: 0x%llx\tHead: %lld\tTail: %lld\n",
0443 (u64)virt_to_phys(qi->desc),
0444 dmar_readq(iommu->reg + DMAR_IQH_REG) >> shift,
0445 dmar_readq(iommu->reg + DMAR_IQT_REG) >> shift);
0446 invalidation_queue_entry_show(m, iommu);
0447 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
0448 seq_putc(m, '\n');
0449 }
0450 rcu_read_unlock();
0451
0452 return 0;
0453 }
0454 DEFINE_SHOW_ATTRIBUTE(invalidation_queue);
0455
0456 #ifdef CONFIG_IRQ_REMAP
0457 static void ir_tbl_remap_entry_show(struct seq_file *m,
0458 struct intel_iommu *iommu)
0459 {
0460 struct irte *ri_entry;
0461 unsigned long flags;
0462 int idx;
0463
0464 seq_puts(m, " Entry SrcID DstID Vct IRTE_high\t\tIRTE_low\n");
0465
0466 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
0467 for (idx = 0; idx < INTR_REMAP_TABLE_ENTRIES; idx++) {
0468 ri_entry = &iommu->ir_table->base[idx];
0469 if (!ri_entry->present || ri_entry->p_pst)
0470 continue;
0471
0472 seq_printf(m, " %-5d %02x:%02x.%01x %08x %02x %016llx\t%016llx\n",
0473 idx, PCI_BUS_NUM(ri_entry->sid),
0474 PCI_SLOT(ri_entry->sid), PCI_FUNC(ri_entry->sid),
0475 ri_entry->dest_id, ri_entry->vector,
0476 ri_entry->high, ri_entry->low);
0477 }
0478 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
0479 }
0480
0481 static void ir_tbl_posted_entry_show(struct seq_file *m,
0482 struct intel_iommu *iommu)
0483 {
0484 struct irte *pi_entry;
0485 unsigned long flags;
0486 int idx;
0487
0488 seq_puts(m, " Entry SrcID PDA_high PDA_low Vct IRTE_high\t\tIRTE_low\n");
0489
0490 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
0491 for (idx = 0; idx < INTR_REMAP_TABLE_ENTRIES; idx++) {
0492 pi_entry = &iommu->ir_table->base[idx];
0493 if (!pi_entry->present || !pi_entry->p_pst)
0494 continue;
0495
0496 seq_printf(m, " %-5d %02x:%02x.%01x %08x %08x %02x %016llx\t%016llx\n",
0497 idx, PCI_BUS_NUM(pi_entry->sid),
0498 PCI_SLOT(pi_entry->sid), PCI_FUNC(pi_entry->sid),
0499 pi_entry->pda_h, pi_entry->pda_l << 6,
0500 pi_entry->vector, pi_entry->high,
0501 pi_entry->low);
0502 }
0503 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
0504 }
0505
0506
0507
0508
0509
0510
0511 static int ir_translation_struct_show(struct seq_file *m, void *unused)
0512 {
0513 struct dmar_drhd_unit *drhd;
0514 struct intel_iommu *iommu;
0515 u64 irta;
0516 u32 sts;
0517
0518 rcu_read_lock();
0519 for_each_active_iommu(iommu, drhd) {
0520 if (!ecap_ir_support(iommu->ecap))
0521 continue;
0522
0523 seq_printf(m, "Remapped Interrupt supported on IOMMU: %s\n",
0524 iommu->name);
0525
0526 sts = dmar_readl(iommu->reg + DMAR_GSTS_REG);
0527 if (iommu->ir_table && (sts & DMA_GSTS_IRES)) {
0528 irta = virt_to_phys(iommu->ir_table->base);
0529 seq_printf(m, " IR table address:%llx\n", irta);
0530 ir_tbl_remap_entry_show(m, iommu);
0531 } else {
0532 seq_puts(m, "Interrupt Remapping is not enabled\n");
0533 }
0534 seq_putc(m, '\n');
0535 }
0536
0537 seq_puts(m, "****\n\n");
0538
0539 for_each_active_iommu(iommu, drhd) {
0540 if (!cap_pi_support(iommu->cap))
0541 continue;
0542
0543 seq_printf(m, "Posted Interrupt supported on IOMMU: %s\n",
0544 iommu->name);
0545
0546 if (iommu->ir_table) {
0547 irta = virt_to_phys(iommu->ir_table->base);
0548 seq_printf(m, " IR table address:%llx\n", irta);
0549 ir_tbl_posted_entry_show(m, iommu);
0550 } else {
0551 seq_puts(m, "Interrupt Remapping is not enabled\n");
0552 }
0553 seq_putc(m, '\n');
0554 }
0555 rcu_read_unlock();
0556
0557 return 0;
0558 }
0559 DEFINE_SHOW_ATTRIBUTE(ir_translation_struct);
0560 #endif
0561
0562 static void latency_show_one(struct seq_file *m, struct intel_iommu *iommu,
0563 struct dmar_drhd_unit *drhd)
0564 {
0565 int ret;
0566
0567 seq_printf(m, "IOMMU: %s Register Base Address: %llx\n",
0568 iommu->name, drhd->reg_base_addr);
0569
0570 ret = dmar_latency_snapshot(iommu, debug_buf, DEBUG_BUFFER_SIZE);
0571 if (ret < 0)
0572 seq_puts(m, "Failed to get latency snapshot");
0573 else
0574 seq_puts(m, debug_buf);
0575 seq_puts(m, "\n");
0576 }
0577
0578 static int latency_show(struct seq_file *m, void *v)
0579 {
0580 struct dmar_drhd_unit *drhd;
0581 struct intel_iommu *iommu;
0582
0583 rcu_read_lock();
0584 for_each_active_iommu(iommu, drhd)
0585 latency_show_one(m, iommu, drhd);
0586 rcu_read_unlock();
0587
0588 return 0;
0589 }
0590
0591 static int dmar_perf_latency_open(struct inode *inode, struct file *filp)
0592 {
0593 return single_open(filp, latency_show, NULL);
0594 }
0595
0596 static ssize_t dmar_perf_latency_write(struct file *filp,
0597 const char __user *ubuf,
0598 size_t cnt, loff_t *ppos)
0599 {
0600 struct dmar_drhd_unit *drhd;
0601 struct intel_iommu *iommu;
0602 int counting;
0603 char buf[64];
0604
0605 if (cnt > 63)
0606 cnt = 63;
0607
0608 if (copy_from_user(&buf, ubuf, cnt))
0609 return -EFAULT;
0610
0611 buf[cnt] = 0;
0612
0613 if (kstrtoint(buf, 0, &counting))
0614 return -EINVAL;
0615
0616 switch (counting) {
0617 case 0:
0618 rcu_read_lock();
0619 for_each_active_iommu(iommu, drhd) {
0620 dmar_latency_disable(iommu, DMAR_LATENCY_INV_IOTLB);
0621 dmar_latency_disable(iommu, DMAR_LATENCY_INV_DEVTLB);
0622 dmar_latency_disable(iommu, DMAR_LATENCY_INV_IEC);
0623 dmar_latency_disable(iommu, DMAR_LATENCY_PRQ);
0624 }
0625 rcu_read_unlock();
0626 break;
0627 case 1:
0628 rcu_read_lock();
0629 for_each_active_iommu(iommu, drhd)
0630 dmar_latency_enable(iommu, DMAR_LATENCY_INV_IOTLB);
0631 rcu_read_unlock();
0632 break;
0633 case 2:
0634 rcu_read_lock();
0635 for_each_active_iommu(iommu, drhd)
0636 dmar_latency_enable(iommu, DMAR_LATENCY_INV_DEVTLB);
0637 rcu_read_unlock();
0638 break;
0639 case 3:
0640 rcu_read_lock();
0641 for_each_active_iommu(iommu, drhd)
0642 dmar_latency_enable(iommu, DMAR_LATENCY_INV_IEC);
0643 rcu_read_unlock();
0644 break;
0645 case 4:
0646 rcu_read_lock();
0647 for_each_active_iommu(iommu, drhd)
0648 dmar_latency_enable(iommu, DMAR_LATENCY_PRQ);
0649 rcu_read_unlock();
0650 break;
0651 default:
0652 return -EINVAL;
0653 }
0654
0655 *ppos += cnt;
0656 return cnt;
0657 }
0658
0659 static const struct file_operations dmar_perf_latency_fops = {
0660 .open = dmar_perf_latency_open,
0661 .write = dmar_perf_latency_write,
0662 .read = seq_read,
0663 .llseek = seq_lseek,
0664 .release = single_release,
0665 };
0666
0667 void __init intel_iommu_debugfs_init(void)
0668 {
0669 struct dentry *intel_iommu_debug = debugfs_create_dir("intel",
0670 iommu_debugfs_dir);
0671
0672 debugfs_create_file("iommu_regset", 0444, intel_iommu_debug, NULL,
0673 &iommu_regset_fops);
0674 debugfs_create_file("dmar_translation_struct", 0444, intel_iommu_debug,
0675 NULL, &dmar_translation_struct_fops);
0676 debugfs_create_file("domain_translation_struct", 0444,
0677 intel_iommu_debug, NULL,
0678 &domain_translation_struct_fops);
0679 debugfs_create_file("invalidation_queue", 0444, intel_iommu_debug,
0680 NULL, &invalidation_queue_fops);
0681 #ifdef CONFIG_IRQ_REMAP
0682 debugfs_create_file("ir_translation_struct", 0444, intel_iommu_debug,
0683 NULL, &ir_translation_struct_fops);
0684 #endif
0685 debugfs_create_file("dmar_perf_latency", 0644, intel_iommu_debug,
0686 NULL, &dmar_perf_latency_fops);
0687 }