0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050 #include <linux/debugfs.h>
0051 #include <linux/delay.h>
0052 #include <linux/init.h>
0053 #include <linux/interrupt.h>
0054 #include <linux/module.h>
0055 #include <linux/pci.h>
0056 #include <linux/random.h>
0057 #include <linux/slab.h>
0058 #include <linux/ntb.h>
0059
0060 #include "ntb_hw_intel.h"
0061 #include "ntb_hw_gen1.h"
0062 #include "ntb_hw_gen3.h"
0063 #include "ntb_hw_gen4.h"
0064
0065 #define NTB_NAME "ntb_hw_intel"
0066 #define NTB_DESC "Intel(R) PCI-E Non-Transparent Bridge Driver"
0067 #define NTB_VER "2.0"
0068
0069 MODULE_DESCRIPTION(NTB_DESC);
0070 MODULE_VERSION(NTB_VER);
0071 MODULE_LICENSE("Dual BSD/GPL");
0072 MODULE_AUTHOR("Intel Corporation");
0073
0074 #define bar0_off(base, bar) ((base) + ((bar) << 2))
0075 #define bar2_off(base, bar) bar0_off(base, (bar) - 2)
0076
0077 static const struct intel_ntb_reg xeon_reg;
0078 static const struct intel_ntb_alt_reg xeon_pri_reg;
0079 static const struct intel_ntb_alt_reg xeon_sec_reg;
0080 static const struct intel_ntb_alt_reg xeon_b2b_reg;
0081 static const struct intel_ntb_xlat_reg xeon_pri_xlat;
0082 static const struct intel_ntb_xlat_reg xeon_sec_xlat;
0083 static const struct ntb_dev_ops intel_ntb_ops;
0084
0085 static const struct file_operations intel_ntb_debugfs_info;
0086 static struct dentry *debugfs_dir;
0087
0088 static int b2b_mw_idx = -1;
0089 module_param(b2b_mw_idx, int, 0644);
0090 MODULE_PARM_DESC(b2b_mw_idx, "Use this mw idx to access the peer ntb. A "
0091 "value of zero or positive starts from first mw idx, and a "
0092 "negative value starts from last mw idx. Both sides MUST "
0093 "set the same value here!");
0094
0095 static unsigned int b2b_mw_share;
0096 module_param(b2b_mw_share, uint, 0644);
0097 MODULE_PARM_DESC(b2b_mw_share, "If the b2b mw is large enough, configure the "
0098 "ntb so that the peer ntb only occupies the first half of "
0099 "the mw, so the second half can still be used as a mw. Both "
0100 "sides MUST set the same value here!");
0101
0102 module_param_named(xeon_b2b_usd_bar2_addr64,
0103 xeon_b2b_usd_addr.bar2_addr64, ullong, 0644);
0104 MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
0105 "XEON B2B USD BAR 2 64-bit address");
0106
0107 module_param_named(xeon_b2b_usd_bar4_addr64,
0108 xeon_b2b_usd_addr.bar4_addr64, ullong, 0644);
0109 MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr64,
0110 "XEON B2B USD BAR 4 64-bit address");
0111
0112 module_param_named(xeon_b2b_usd_bar4_addr32,
0113 xeon_b2b_usd_addr.bar4_addr32, ullong, 0644);
0114 MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr32,
0115 "XEON B2B USD split-BAR 4 32-bit address");
0116
0117 module_param_named(xeon_b2b_usd_bar5_addr32,
0118 xeon_b2b_usd_addr.bar5_addr32, ullong, 0644);
0119 MODULE_PARM_DESC(xeon_b2b_usd_bar5_addr32,
0120 "XEON B2B USD split-BAR 5 32-bit address");
0121
0122 module_param_named(xeon_b2b_dsd_bar2_addr64,
0123 xeon_b2b_dsd_addr.bar2_addr64, ullong, 0644);
0124 MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
0125 "XEON B2B DSD BAR 2 64-bit address");
0126
0127 module_param_named(xeon_b2b_dsd_bar4_addr64,
0128 xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644);
0129 MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr64,
0130 "XEON B2B DSD BAR 4 64-bit address");
0131
0132 module_param_named(xeon_b2b_dsd_bar4_addr32,
0133 xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644);
0134 MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr32,
0135 "XEON B2B DSD split-BAR 4 32-bit address");
0136
0137 module_param_named(xeon_b2b_dsd_bar5_addr32,
0138 xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644);
0139 MODULE_PARM_DESC(xeon_b2b_dsd_bar5_addr32,
0140 "XEON B2B DSD split-BAR 5 32-bit address");
0141
0142
0143 static int xeon_init_isr(struct intel_ntb_dev *ndev);
0144
0145 static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev)
0146 {
0147 ndev->unsafe_flags = 0;
0148 ndev->unsafe_flags_ignore = 0;
0149
0150
0151 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP)
0152 if (!ntb_topo_is_b2b(ndev->ntb.topo))
0153 ndev->unsafe_flags |= NTB_UNSAFE_DB;
0154
0155
0156 if (ndev->hwerr_flags & NTB_HWERR_SB01BASE_LOCKUP) {
0157 ndev->unsafe_flags |= NTB_UNSAFE_DB;
0158 ndev->unsafe_flags |= NTB_UNSAFE_SPAD;
0159 }
0160 }
0161
0162 static inline int ndev_is_unsafe(struct intel_ntb_dev *ndev,
0163 unsigned long flag)
0164 {
0165 return !!(flag & ndev->unsafe_flags & ~ndev->unsafe_flags_ignore);
0166 }
0167
0168 static inline int ndev_ignore_unsafe(struct intel_ntb_dev *ndev,
0169 unsigned long flag)
0170 {
0171 flag &= ndev->unsafe_flags;
0172 ndev->unsafe_flags_ignore |= flag;
0173
0174 return !!flag;
0175 }
0176
0177 int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx)
0178 {
0179 if (idx < 0 || idx >= ndev->mw_count)
0180 return -EINVAL;
0181 return ndev->reg->mw_bar[idx];
0182 }
0183
0184 void ndev_db_addr(struct intel_ntb_dev *ndev,
0185 phys_addr_t *db_addr, resource_size_t *db_size,
0186 phys_addr_t reg_addr, unsigned long reg)
0187 {
0188 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
0189 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
0190
0191 if (db_addr) {
0192 *db_addr = reg_addr + reg;
0193 dev_dbg(&ndev->ntb.pdev->dev, "Peer db addr %llx\n", *db_addr);
0194 }
0195
0196 if (db_size) {
0197 *db_size = ndev->reg->db_size;
0198 dev_dbg(&ndev->ntb.pdev->dev, "Peer db size %llx\n", *db_size);
0199 }
0200 }
0201
0202 u64 ndev_db_read(struct intel_ntb_dev *ndev,
0203 void __iomem *mmio)
0204 {
0205 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
0206 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
0207
0208 return ndev->reg->db_ioread(mmio);
0209 }
0210
0211 int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits,
0212 void __iomem *mmio)
0213 {
0214 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
0215 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
0216
0217 if (db_bits & ~ndev->db_valid_mask)
0218 return -EINVAL;
0219
0220 ndev->reg->db_iowrite(db_bits, mmio);
0221
0222 return 0;
0223 }
0224
0225 static inline int ndev_db_set_mask(struct intel_ntb_dev *ndev, u64 db_bits,
0226 void __iomem *mmio)
0227 {
0228 unsigned long irqflags;
0229
0230 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
0231 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
0232
0233 if (db_bits & ~ndev->db_valid_mask)
0234 return -EINVAL;
0235
0236 spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
0237 {
0238 ndev->db_mask |= db_bits;
0239 ndev->reg->db_iowrite(ndev->db_mask, mmio);
0240 }
0241 spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
0242
0243 return 0;
0244 }
0245
0246 static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits,
0247 void __iomem *mmio)
0248 {
0249 unsigned long irqflags;
0250
0251 if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
0252 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
0253
0254 if (db_bits & ~ndev->db_valid_mask)
0255 return -EINVAL;
0256
0257 spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
0258 {
0259 ndev->db_mask &= ~db_bits;
0260 ndev->reg->db_iowrite(ndev->db_mask, mmio);
0261 }
0262 spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
0263
0264 return 0;
0265 }
0266
0267 static inline u64 ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector)
0268 {
0269 u64 shift, mask;
0270
0271 shift = ndev->db_vec_shift;
0272 mask = BIT_ULL(shift) - 1;
0273
0274 return mask << (shift * db_vector);
0275 }
0276
0277 static inline int ndev_spad_addr(struct intel_ntb_dev *ndev, int idx,
0278 phys_addr_t *spad_addr, phys_addr_t reg_addr,
0279 unsigned long reg)
0280 {
0281 if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
0282 pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
0283
0284 if (idx < 0 || idx >= ndev->spad_count)
0285 return -EINVAL;
0286
0287 if (spad_addr) {
0288 *spad_addr = reg_addr + reg + (idx << 2);
0289 dev_dbg(&ndev->ntb.pdev->dev, "Peer spad addr %llx\n",
0290 *spad_addr);
0291 }
0292
0293 return 0;
0294 }
0295
0296 static inline u32 ndev_spad_read(struct intel_ntb_dev *ndev, int idx,
0297 void __iomem *mmio)
0298 {
0299 if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
0300 pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
0301
0302 if (idx < 0 || idx >= ndev->spad_count)
0303 return 0;
0304
0305 return ioread32(mmio + (idx << 2));
0306 }
0307
0308 static inline int ndev_spad_write(struct intel_ntb_dev *ndev, int idx, u32 val,
0309 void __iomem *mmio)
0310 {
0311 if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
0312 pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
0313
0314 if (idx < 0 || idx >= ndev->spad_count)
0315 return -EINVAL;
0316
0317 iowrite32(val, mmio + (idx << 2));
0318
0319 return 0;
0320 }
0321
0322 static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec)
0323 {
0324 u64 vec_mask;
0325
0326 vec_mask = ndev_vec_mask(ndev, vec);
0327
0328 if ((ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) && (vec == 31))
0329 vec_mask |= ndev->db_link_mask;
0330
0331 dev_dbg(&ndev->ntb.pdev->dev, "vec %d vec_mask %llx\n", vec, vec_mask);
0332
0333 ndev->last_ts = jiffies;
0334
0335 if (vec_mask & ndev->db_link_mask) {
0336 if (ndev->reg->poll_link(ndev))
0337 ntb_link_event(&ndev->ntb);
0338 }
0339
0340 if (vec_mask & ndev->db_valid_mask)
0341 ntb_db_event(&ndev->ntb, vec);
0342
0343 return IRQ_HANDLED;
0344 }
0345
0346 static irqreturn_t ndev_vec_isr(int irq, void *dev)
0347 {
0348 struct intel_ntb_vec *nvec = dev;
0349
0350 dev_dbg(&nvec->ndev->ntb.pdev->dev, "irq: %d nvec->num: %d\n",
0351 irq, nvec->num);
0352
0353 return ndev_interrupt(nvec->ndev, nvec->num);
0354 }
0355
0356 static irqreturn_t ndev_irq_isr(int irq, void *dev)
0357 {
0358 struct intel_ntb_dev *ndev = dev;
0359
0360 return ndev_interrupt(ndev, irq - ndev->ntb.pdev->irq);
0361 }
0362
0363 int ndev_init_isr(struct intel_ntb_dev *ndev,
0364 int msix_min, int msix_max,
0365 int msix_shift, int total_shift)
0366 {
0367 struct pci_dev *pdev;
0368 int rc, i, msix_count, node;
0369
0370 pdev = ndev->ntb.pdev;
0371
0372 node = dev_to_node(&pdev->dev);
0373
0374
0375 ndev->db_mask = ndev->db_valid_mask;
0376 ndev->reg->db_iowrite(ndev->db_mask,
0377 ndev->self_mmio +
0378 ndev->self_reg->db_mask);
0379
0380
0381
0382 ndev->vec = kcalloc_node(msix_max, sizeof(*ndev->vec),
0383 GFP_KERNEL, node);
0384 if (!ndev->vec)
0385 goto err_msix_vec_alloc;
0386
0387 ndev->msix = kcalloc_node(msix_max, sizeof(*ndev->msix),
0388 GFP_KERNEL, node);
0389 if (!ndev->msix)
0390 goto err_msix_alloc;
0391
0392 for (i = 0; i < msix_max; ++i)
0393 ndev->msix[i].entry = i;
0394
0395 msix_count = pci_enable_msix_range(pdev, ndev->msix,
0396 msix_min, msix_max);
0397 if (msix_count < 0)
0398 goto err_msix_enable;
0399
0400 for (i = 0; i < msix_count; ++i) {
0401 ndev->vec[i].ndev = ndev;
0402 ndev->vec[i].num = i;
0403 rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0,
0404 "ndev_vec_isr", &ndev->vec[i]);
0405 if (rc)
0406 goto err_msix_request;
0407 }
0408
0409 dev_dbg(&pdev->dev, "Using %d msix interrupts\n", msix_count);
0410 ndev->db_vec_count = msix_count;
0411 ndev->db_vec_shift = msix_shift;
0412 return 0;
0413
0414 err_msix_request:
0415 while (i-- > 0)
0416 free_irq(ndev->msix[i].vector, &ndev->vec[i]);
0417 pci_disable_msix(pdev);
0418 err_msix_enable:
0419 kfree(ndev->msix);
0420 err_msix_alloc:
0421 kfree(ndev->vec);
0422 err_msix_vec_alloc:
0423 ndev->msix = NULL;
0424 ndev->vec = NULL;
0425
0426
0427
0428 rc = pci_enable_msi(pdev);
0429 if (rc)
0430 goto err_msi_enable;
0431
0432 rc = request_irq(pdev->irq, ndev_irq_isr, 0,
0433 "ndev_irq_isr", ndev);
0434 if (rc)
0435 goto err_msi_request;
0436
0437 dev_dbg(&pdev->dev, "Using msi interrupts\n");
0438 ndev->db_vec_count = 1;
0439 ndev->db_vec_shift = total_shift;
0440 return 0;
0441
0442 err_msi_request:
0443 pci_disable_msi(pdev);
0444 err_msi_enable:
0445
0446
0447
0448 pci_intx(pdev, 1);
0449
0450 rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
0451 "ndev_irq_isr", ndev);
0452 if (rc)
0453 goto err_intx_request;
0454
0455 dev_dbg(&pdev->dev, "Using intx interrupts\n");
0456 ndev->db_vec_count = 1;
0457 ndev->db_vec_shift = total_shift;
0458 return 0;
0459
0460 err_intx_request:
0461 return rc;
0462 }
0463
0464 static void ndev_deinit_isr(struct intel_ntb_dev *ndev)
0465 {
0466 struct pci_dev *pdev;
0467 int i;
0468
0469 pdev = ndev->ntb.pdev;
0470
0471
0472 ndev->db_mask = ndev->db_valid_mask;
0473 ndev->reg->db_iowrite(ndev->db_mask,
0474 ndev->self_mmio +
0475 ndev->self_reg->db_mask);
0476
0477 if (ndev->msix) {
0478 i = ndev->db_vec_count;
0479 while (i--)
0480 free_irq(ndev->msix[i].vector, &ndev->vec[i]);
0481 pci_disable_msix(pdev);
0482 kfree(ndev->msix);
0483 kfree(ndev->vec);
0484 } else {
0485 free_irq(pdev->irq, ndev);
0486 if (pci_dev_msi_enabled(pdev))
0487 pci_disable_msi(pdev);
0488 }
0489 }
0490
0491 static ssize_t ndev_ntb_debugfs_read(struct file *filp, char __user *ubuf,
0492 size_t count, loff_t *offp)
0493 {
0494 struct intel_ntb_dev *ndev;
0495 struct pci_dev *pdev;
0496 void __iomem *mmio;
0497 char *buf;
0498 size_t buf_size;
0499 ssize_t ret, off;
0500 union { u64 v64; u32 v32; u16 v16; u8 v8; } u;
0501
0502 ndev = filp->private_data;
0503 pdev = ndev->ntb.pdev;
0504 mmio = ndev->self_mmio;
0505
0506 buf_size = min(count, 0x800ul);
0507
0508 buf = kmalloc(buf_size, GFP_KERNEL);
0509 if (!buf)
0510 return -ENOMEM;
0511
0512 off = 0;
0513
0514 off += scnprintf(buf + off, buf_size - off,
0515 "NTB Device Information:\n");
0516
0517 off += scnprintf(buf + off, buf_size - off,
0518 "Connection Topology -\t%s\n",
0519 ntb_topo_string(ndev->ntb.topo));
0520
0521 if (ndev->b2b_idx != UINT_MAX) {
0522 off += scnprintf(buf + off, buf_size - off,
0523 "B2B MW Idx -\t\t%u\n", ndev->b2b_idx);
0524 off += scnprintf(buf + off, buf_size - off,
0525 "B2B Offset -\t\t%#lx\n", ndev->b2b_off);
0526 }
0527
0528 off += scnprintf(buf + off, buf_size - off,
0529 "BAR4 Split -\t\t%s\n",
0530 ndev->bar4_split ? "yes" : "no");
0531
0532 off += scnprintf(buf + off, buf_size - off,
0533 "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
0534 off += scnprintf(buf + off, buf_size - off,
0535 "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
0536
0537 if (!ndev->reg->link_is_up(ndev)) {
0538 off += scnprintf(buf + off, buf_size - off,
0539 "Link Status -\t\tDown\n");
0540 } else {
0541 off += scnprintf(buf + off, buf_size - off,
0542 "Link Status -\t\tUp\n");
0543 off += scnprintf(buf + off, buf_size - off,
0544 "Link Speed -\t\tPCI-E Gen %u\n",
0545 NTB_LNK_STA_SPEED(ndev->lnk_sta));
0546 off += scnprintf(buf + off, buf_size - off,
0547 "Link Width -\t\tx%u\n",
0548 NTB_LNK_STA_WIDTH(ndev->lnk_sta));
0549 }
0550
0551 off += scnprintf(buf + off, buf_size - off,
0552 "Memory Window Count -\t%u\n", ndev->mw_count);
0553 off += scnprintf(buf + off, buf_size - off,
0554 "Scratchpad Count -\t%u\n", ndev->spad_count);
0555 off += scnprintf(buf + off, buf_size - off,
0556 "Doorbell Count -\t%u\n", ndev->db_count);
0557 off += scnprintf(buf + off, buf_size - off,
0558 "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
0559 off += scnprintf(buf + off, buf_size - off,
0560 "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
0561
0562 off += scnprintf(buf + off, buf_size - off,
0563 "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
0564 off += scnprintf(buf + off, buf_size - off,
0565 "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
0566 off += scnprintf(buf + off, buf_size - off,
0567 "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
0568
0569 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
0570 off += scnprintf(buf + off, buf_size - off,
0571 "Doorbell Mask -\t\t%#llx\n", u.v64);
0572
0573 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
0574 off += scnprintf(buf + off, buf_size - off,
0575 "Doorbell Bell -\t\t%#llx\n", u.v64);
0576
0577 off += scnprintf(buf + off, buf_size - off,
0578 "\nNTB Window Size:\n");
0579
0580 pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &u.v8);
0581 off += scnprintf(buf + off, buf_size - off,
0582 "PBAR23SZ %hhu\n", u.v8);
0583 if (!ndev->bar4_split) {
0584 pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &u.v8);
0585 off += scnprintf(buf + off, buf_size - off,
0586 "PBAR45SZ %hhu\n", u.v8);
0587 } else {
0588 pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &u.v8);
0589 off += scnprintf(buf + off, buf_size - off,
0590 "PBAR4SZ %hhu\n", u.v8);
0591 pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &u.v8);
0592 off += scnprintf(buf + off, buf_size - off,
0593 "PBAR5SZ %hhu\n", u.v8);
0594 }
0595
0596 pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &u.v8);
0597 off += scnprintf(buf + off, buf_size - off,
0598 "SBAR23SZ %hhu\n", u.v8);
0599 if (!ndev->bar4_split) {
0600 pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &u.v8);
0601 off += scnprintf(buf + off, buf_size - off,
0602 "SBAR45SZ %hhu\n", u.v8);
0603 } else {
0604 pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &u.v8);
0605 off += scnprintf(buf + off, buf_size - off,
0606 "SBAR4SZ %hhu\n", u.v8);
0607 pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &u.v8);
0608 off += scnprintf(buf + off, buf_size - off,
0609 "SBAR5SZ %hhu\n", u.v8);
0610 }
0611
0612 off += scnprintf(buf + off, buf_size - off,
0613 "\nNTB Incoming XLAT:\n");
0614
0615 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 2));
0616 off += scnprintf(buf + off, buf_size - off,
0617 "XLAT23 -\t\t%#018llx\n", u.v64);
0618
0619 if (ndev->bar4_split) {
0620 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
0621 off += scnprintf(buf + off, buf_size - off,
0622 "XLAT4 -\t\t\t%#06x\n", u.v32);
0623
0624 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 5));
0625 off += scnprintf(buf + off, buf_size - off,
0626 "XLAT5 -\t\t\t%#06x\n", u.v32);
0627 } else {
0628 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
0629 off += scnprintf(buf + off, buf_size - off,
0630 "XLAT45 -\t\t%#018llx\n", u.v64);
0631 }
0632
0633 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 2));
0634 off += scnprintf(buf + off, buf_size - off,
0635 "LMT23 -\t\t\t%#018llx\n", u.v64);
0636
0637 if (ndev->bar4_split) {
0638 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
0639 off += scnprintf(buf + off, buf_size - off,
0640 "LMT4 -\t\t\t%#06x\n", u.v32);
0641 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 5));
0642 off += scnprintf(buf + off, buf_size - off,
0643 "LMT5 -\t\t\t%#06x\n", u.v32);
0644 } else {
0645 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
0646 off += scnprintf(buf + off, buf_size - off,
0647 "LMT45 -\t\t\t%#018llx\n", u.v64);
0648 }
0649
0650 if (pdev_is_gen1(pdev)) {
0651 if (ntb_topo_is_b2b(ndev->ntb.topo)) {
0652 off += scnprintf(buf + off, buf_size - off,
0653 "\nNTB Outgoing B2B XLAT:\n");
0654
0655 u.v64 = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
0656 off += scnprintf(buf + off, buf_size - off,
0657 "B2B XLAT23 -\t\t%#018llx\n", u.v64);
0658
0659 if (ndev->bar4_split) {
0660 u.v32 = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
0661 off += scnprintf(buf + off, buf_size - off,
0662 "B2B XLAT4 -\t\t%#06x\n",
0663 u.v32);
0664 u.v32 = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
0665 off += scnprintf(buf + off, buf_size - off,
0666 "B2B XLAT5 -\t\t%#06x\n",
0667 u.v32);
0668 } else {
0669 u.v64 = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
0670 off += scnprintf(buf + off, buf_size - off,
0671 "B2B XLAT45 -\t\t%#018llx\n",
0672 u.v64);
0673 }
0674
0675 u.v64 = ioread64(mmio + XEON_PBAR23LMT_OFFSET);
0676 off += scnprintf(buf + off, buf_size - off,
0677 "B2B LMT23 -\t\t%#018llx\n", u.v64);
0678
0679 if (ndev->bar4_split) {
0680 u.v32 = ioread32(mmio + XEON_PBAR4LMT_OFFSET);
0681 off += scnprintf(buf + off, buf_size - off,
0682 "B2B LMT4 -\t\t%#06x\n",
0683 u.v32);
0684 u.v32 = ioread32(mmio + XEON_PBAR5LMT_OFFSET);
0685 off += scnprintf(buf + off, buf_size - off,
0686 "B2B LMT5 -\t\t%#06x\n",
0687 u.v32);
0688 } else {
0689 u.v64 = ioread64(mmio + XEON_PBAR45LMT_OFFSET);
0690 off += scnprintf(buf + off, buf_size - off,
0691 "B2B LMT45 -\t\t%#018llx\n",
0692 u.v64);
0693 }
0694
0695 off += scnprintf(buf + off, buf_size - off,
0696 "\nNTB Secondary BAR:\n");
0697
0698 u.v64 = ioread64(mmio + XEON_SBAR0BASE_OFFSET);
0699 off += scnprintf(buf + off, buf_size - off,
0700 "SBAR01 -\t\t%#018llx\n", u.v64);
0701
0702 u.v64 = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
0703 off += scnprintf(buf + off, buf_size - off,
0704 "SBAR23 -\t\t%#018llx\n", u.v64);
0705
0706 if (ndev->bar4_split) {
0707 u.v32 = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
0708 off += scnprintf(buf + off, buf_size - off,
0709 "SBAR4 -\t\t\t%#06x\n", u.v32);
0710 u.v32 = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
0711 off += scnprintf(buf + off, buf_size - off,
0712 "SBAR5 -\t\t\t%#06x\n", u.v32);
0713 } else {
0714 u.v64 = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
0715 off += scnprintf(buf + off, buf_size - off,
0716 "SBAR45 -\t\t%#018llx\n",
0717 u.v64);
0718 }
0719 }
0720
0721 off += scnprintf(buf + off, buf_size - off,
0722 "\nXEON NTB Statistics:\n");
0723
0724 u.v16 = ioread16(mmio + XEON_USMEMMISS_OFFSET);
0725 off += scnprintf(buf + off, buf_size - off,
0726 "Upstream Memory Miss -\t%u\n", u.v16);
0727
0728 off += scnprintf(buf + off, buf_size - off,
0729 "\nXEON NTB Hardware Errors:\n");
0730
0731 if (!pci_read_config_word(pdev,
0732 XEON_DEVSTS_OFFSET, &u.v16))
0733 off += scnprintf(buf + off, buf_size - off,
0734 "DEVSTS -\t\t%#06x\n", u.v16);
0735
0736 if (!pci_read_config_word(pdev,
0737 XEON_LINK_STATUS_OFFSET, &u.v16))
0738 off += scnprintf(buf + off, buf_size - off,
0739 "LNKSTS -\t\t%#06x\n", u.v16);
0740
0741 if (!pci_read_config_dword(pdev,
0742 XEON_UNCERRSTS_OFFSET, &u.v32))
0743 off += scnprintf(buf + off, buf_size - off,
0744 "UNCERRSTS -\t\t%#06x\n", u.v32);
0745
0746 if (!pci_read_config_dword(pdev,
0747 XEON_CORERRSTS_OFFSET, &u.v32))
0748 off += scnprintf(buf + off, buf_size - off,
0749 "CORERRSTS -\t\t%#06x\n", u.v32);
0750 }
0751
0752 ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
0753 kfree(buf);
0754 return ret;
0755 }
0756
0757 static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
0758 size_t count, loff_t *offp)
0759 {
0760 struct intel_ntb_dev *ndev = filp->private_data;
0761
0762 if (pdev_is_gen1(ndev->ntb.pdev))
0763 return ndev_ntb_debugfs_read(filp, ubuf, count, offp);
0764 else if (pdev_is_gen3(ndev->ntb.pdev))
0765 return ndev_ntb3_debugfs_read(filp, ubuf, count, offp);
0766 else if (pdev_is_gen4(ndev->ntb.pdev) || pdev_is_gen5(ndev->ntb.pdev))
0767 return ndev_ntb4_debugfs_read(filp, ubuf, count, offp);
0768
0769 return -ENXIO;
0770 }
0771
0772 static void ndev_init_debugfs(struct intel_ntb_dev *ndev)
0773 {
0774 if (!debugfs_dir) {
0775 ndev->debugfs_dir = NULL;
0776 ndev->debugfs_info = NULL;
0777 } else {
0778 ndev->debugfs_dir =
0779 debugfs_create_dir(pci_name(ndev->ntb.pdev),
0780 debugfs_dir);
0781 if (!ndev->debugfs_dir)
0782 ndev->debugfs_info = NULL;
0783 else
0784 ndev->debugfs_info =
0785 debugfs_create_file("info", S_IRUSR,
0786 ndev->debugfs_dir, ndev,
0787 &intel_ntb_debugfs_info);
0788 }
0789 }
0790
0791 static void ndev_deinit_debugfs(struct intel_ntb_dev *ndev)
0792 {
0793 debugfs_remove_recursive(ndev->debugfs_dir);
0794 }
0795
0796 int intel_ntb_mw_count(struct ntb_dev *ntb, int pidx)
0797 {
0798 if (pidx != NTB_DEF_PEER_IDX)
0799 return -EINVAL;
0800
0801 return ntb_ndev(ntb)->mw_count;
0802 }
0803
0804 int intel_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int idx,
0805 resource_size_t *addr_align,
0806 resource_size_t *size_align,
0807 resource_size_t *size_max)
0808 {
0809 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
0810 resource_size_t bar_size, mw_size;
0811 int bar;
0812
0813 if (pidx != NTB_DEF_PEER_IDX)
0814 return -EINVAL;
0815
0816 if (idx >= ndev->b2b_idx && !ndev->b2b_off)
0817 idx += 1;
0818
0819 bar = ndev_mw_to_bar(ndev, idx);
0820 if (bar < 0)
0821 return bar;
0822
0823 bar_size = pci_resource_len(ndev->ntb.pdev, bar);
0824
0825 if (idx == ndev->b2b_idx)
0826 mw_size = bar_size - ndev->b2b_off;
0827 else
0828 mw_size = bar_size;
0829
0830 if (addr_align)
0831 *addr_align = pci_resource_len(ndev->ntb.pdev, bar);
0832
0833 if (size_align)
0834 *size_align = 1;
0835
0836 if (size_max)
0837 *size_max = mw_size;
0838
0839 return 0;
0840 }
0841
0842 static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
0843 dma_addr_t addr, resource_size_t size)
0844 {
0845 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
0846 unsigned long base_reg, xlat_reg, limit_reg;
0847 resource_size_t bar_size, mw_size;
0848 void __iomem *mmio;
0849 u64 base, limit, reg_val;
0850 int bar;
0851
0852 if (pidx != NTB_DEF_PEER_IDX)
0853 return -EINVAL;
0854
0855 if (idx >= ndev->b2b_idx && !ndev->b2b_off)
0856 idx += 1;
0857
0858 bar = ndev_mw_to_bar(ndev, idx);
0859 if (bar < 0)
0860 return bar;
0861
0862 bar_size = pci_resource_len(ndev->ntb.pdev, bar);
0863
0864 if (idx == ndev->b2b_idx)
0865 mw_size = bar_size - ndev->b2b_off;
0866 else
0867 mw_size = bar_size;
0868
0869
0870 if (addr & (bar_size - 1))
0871 return -EINVAL;
0872
0873
0874 if (size > mw_size)
0875 return -EINVAL;
0876
0877 mmio = ndev->self_mmio;
0878 base_reg = bar0_off(ndev->xlat_reg->bar0_base, bar);
0879 xlat_reg = bar2_off(ndev->xlat_reg->bar2_xlat, bar);
0880 limit_reg = bar2_off(ndev->xlat_reg->bar2_limit, bar);
0881
0882 if (bar < 4 || !ndev->bar4_split) {
0883 base = ioread64(mmio + base_reg) & NTB_BAR_MASK_64;
0884
0885
0886 if (limit_reg && size != mw_size)
0887 limit = base + size;
0888 else
0889 limit = 0;
0890
0891
0892 iowrite64(addr, mmio + xlat_reg);
0893 reg_val = ioread64(mmio + xlat_reg);
0894 if (reg_val != addr) {
0895 iowrite64(0, mmio + xlat_reg);
0896 return -EIO;
0897 }
0898
0899
0900 iowrite64(limit, mmio + limit_reg);
0901 reg_val = ioread64(mmio + limit_reg);
0902 if (reg_val != limit) {
0903 iowrite64(base, mmio + limit_reg);
0904 iowrite64(0, mmio + xlat_reg);
0905 return -EIO;
0906 }
0907 } else {
0908
0909 if (addr & (~0ull << 32))
0910 return -EINVAL;
0911 if ((addr + size) & (~0ull << 32))
0912 return -EINVAL;
0913
0914 base = ioread32(mmio + base_reg) & NTB_BAR_MASK_32;
0915
0916
0917 if (limit_reg && size != mw_size)
0918 limit = base + size;
0919 else
0920 limit = 0;
0921
0922
0923 iowrite32(addr, mmio + xlat_reg);
0924 reg_val = ioread32(mmio + xlat_reg);
0925 if (reg_val != addr) {
0926 iowrite32(0, mmio + xlat_reg);
0927 return -EIO;
0928 }
0929
0930
0931 iowrite32(limit, mmio + limit_reg);
0932 reg_val = ioread32(mmio + limit_reg);
0933 if (reg_val != limit) {
0934 iowrite32(base, mmio + limit_reg);
0935 iowrite32(0, mmio + xlat_reg);
0936 return -EIO;
0937 }
0938 }
0939
0940 return 0;
0941 }
0942
0943 u64 intel_ntb_link_is_up(struct ntb_dev *ntb, enum ntb_speed *speed,
0944 enum ntb_width *width)
0945 {
0946 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
0947
0948 if (ndev->reg->link_is_up(ndev)) {
0949 if (speed)
0950 *speed = NTB_LNK_STA_SPEED(ndev->lnk_sta);
0951 if (width)
0952 *width = NTB_LNK_STA_WIDTH(ndev->lnk_sta);
0953 return 1;
0954 } else {
0955
0956
0957 if (speed)
0958 *speed = NTB_SPEED_NONE;
0959 if (width)
0960 *width = NTB_WIDTH_NONE;
0961 return 0;
0962 }
0963 }
0964
0965 static int intel_ntb_link_enable(struct ntb_dev *ntb,
0966 enum ntb_speed max_speed,
0967 enum ntb_width max_width)
0968 {
0969 struct intel_ntb_dev *ndev;
0970 u32 ntb_ctl;
0971
0972 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
0973
0974 if (ndev->ntb.topo == NTB_TOPO_SEC)
0975 return -EINVAL;
0976
0977 dev_dbg(&ntb->pdev->dev,
0978 "Enabling link with max_speed %d max_width %d\n",
0979 max_speed, max_width);
0980 if (max_speed != NTB_SPEED_AUTO)
0981 dev_dbg(&ntb->pdev->dev, "ignoring max_speed %d\n", max_speed);
0982 if (max_width != NTB_WIDTH_AUTO)
0983 dev_dbg(&ntb->pdev->dev, "ignoring max_width %d\n", max_width);
0984
0985 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
0986 ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
0987 ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
0988 ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
0989 if (ndev->bar4_split)
0990 ntb_ctl |= NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP;
0991 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
0992
0993 return 0;
0994 }
0995
0996 int intel_ntb_link_disable(struct ntb_dev *ntb)
0997 {
0998 struct intel_ntb_dev *ndev;
0999 u32 ntb_cntl;
1000
1001 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1002
1003 if (ndev->ntb.topo == NTB_TOPO_SEC)
1004 return -EINVAL;
1005
1006 dev_dbg(&ntb->pdev->dev, "Disabling link\n");
1007
1008
1009 ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
1010 ntb_cntl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
1011 ntb_cntl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
1012 if (ndev->bar4_split)
1013 ntb_cntl &= ~(NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP);
1014 ntb_cntl |= NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK;
1015 iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl);
1016
1017 return 0;
1018 }
1019
1020 int intel_ntb_peer_mw_count(struct ntb_dev *ntb)
1021 {
1022
1023 return ntb_ndev(ntb)->mw_count;
1024 }
1025
1026 int intel_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
1027 phys_addr_t *base, resource_size_t *size)
1028 {
1029 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1030 int bar;
1031
1032 if (idx >= ndev->b2b_idx && !ndev->b2b_off)
1033 idx += 1;
1034
1035 bar = ndev_mw_to_bar(ndev, idx);
1036 if (bar < 0)
1037 return bar;
1038
1039 if (base)
1040 *base = pci_resource_start(ndev->ntb.pdev, bar) +
1041 (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
1042
1043 if (size)
1044 *size = pci_resource_len(ndev->ntb.pdev, bar) -
1045 (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
1046
1047 return 0;
1048 }
1049
1050 static int intel_ntb_db_is_unsafe(struct ntb_dev *ntb)
1051 {
1052 return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_DB);
1053 }
1054
1055 u64 intel_ntb_db_valid_mask(struct ntb_dev *ntb)
1056 {
1057 return ntb_ndev(ntb)->db_valid_mask;
1058 }
1059
1060 int intel_ntb_db_vector_count(struct ntb_dev *ntb)
1061 {
1062 struct intel_ntb_dev *ndev;
1063
1064 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1065
1066 return ndev->db_vec_count;
1067 }
1068
1069 u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
1070 {
1071 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1072
1073 if (db_vector < 0 || db_vector > ndev->db_vec_count)
1074 return 0;
1075
1076 return ndev->db_valid_mask & ndev_vec_mask(ndev, db_vector);
1077 }
1078
1079 static u64 intel_ntb_db_read(struct ntb_dev *ntb)
1080 {
1081 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1082
1083 return ndev_db_read(ndev,
1084 ndev->self_mmio +
1085 ndev->self_reg->db_bell);
1086 }
1087
1088 static int intel_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
1089 {
1090 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1091
1092 return ndev_db_write(ndev, db_bits,
1093 ndev->self_mmio +
1094 ndev->self_reg->db_bell);
1095 }
1096
1097 int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
1098 {
1099 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1100
1101 return ndev_db_set_mask(ndev, db_bits,
1102 ndev->self_mmio +
1103 ndev->self_reg->db_mask);
1104 }
1105
1106 int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
1107 {
1108 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1109
1110 return ndev_db_clear_mask(ndev, db_bits,
1111 ndev->self_mmio +
1112 ndev->self_reg->db_mask);
1113 }
1114
1115 static int intel_ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
1116 resource_size_t *db_size, u64 *db_data, int db_bit)
1117 {
1118 u64 db_bits;
1119 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1120
1121 if (unlikely(db_bit >= BITS_PER_LONG_LONG))
1122 return -EINVAL;
1123
1124 db_bits = BIT_ULL(db_bit);
1125
1126 if (unlikely(db_bits & ~ntb_ndev(ntb)->db_valid_mask))
1127 return -EINVAL;
1128
1129 ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr,
1130 ndev->peer_reg->db_bell);
1131
1132 if (db_data)
1133 *db_data = db_bits;
1134
1135
1136 return 0;
1137 }
1138
1139 static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
1140 {
1141 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1142
1143 return ndev_db_write(ndev, db_bits,
1144 ndev->peer_mmio +
1145 ndev->peer_reg->db_bell);
1146 }
1147
1148 int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb)
1149 {
1150 return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_SPAD);
1151 }
1152
1153 int intel_ntb_spad_count(struct ntb_dev *ntb)
1154 {
1155 struct intel_ntb_dev *ndev;
1156
1157 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1158
1159 return ndev->spad_count;
1160 }
1161
1162 u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx)
1163 {
1164 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1165
1166 return ndev_spad_read(ndev, idx,
1167 ndev->self_mmio +
1168 ndev->self_reg->spad);
1169 }
1170
1171 int intel_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
1172 {
1173 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1174
1175 return ndev_spad_write(ndev, idx, val,
1176 ndev->self_mmio +
1177 ndev->self_reg->spad);
1178 }
1179
1180 int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, int sidx,
1181 phys_addr_t *spad_addr)
1182 {
1183 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1184
1185 return ndev_spad_addr(ndev, sidx, spad_addr, ndev->peer_addr,
1186 ndev->peer_reg->spad);
1187 }
1188
1189 u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx)
1190 {
1191 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1192
1193 return ndev_spad_read(ndev, sidx,
1194 ndev->peer_mmio +
1195 ndev->peer_reg->spad);
1196 }
1197
1198 int intel_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, int sidx,
1199 u32 val)
1200 {
1201 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1202
1203 return ndev_spad_write(ndev, sidx, val,
1204 ndev->peer_mmio +
1205 ndev->peer_reg->spad);
1206 }
1207
1208 static u64 xeon_db_ioread(const void __iomem *mmio)
1209 {
1210 return (u64)ioread16(mmio);
1211 }
1212
1213 static void xeon_db_iowrite(u64 bits, void __iomem *mmio)
1214 {
1215 iowrite16((u16)bits, mmio);
1216 }
1217
1218 static int xeon_poll_link(struct intel_ntb_dev *ndev)
1219 {
1220 u16 reg_val;
1221 int rc;
1222
1223 ndev->reg->db_iowrite(ndev->db_link_mask,
1224 ndev->self_mmio +
1225 ndev->self_reg->db_bell);
1226
1227 rc = pci_read_config_word(ndev->ntb.pdev,
1228 XEON_LINK_STATUS_OFFSET, ®_val);
1229 if (rc)
1230 return 0;
1231
1232 if (reg_val == ndev->lnk_sta)
1233 return 0;
1234
1235 ndev->lnk_sta = reg_val;
1236
1237 return 1;
1238 }
1239
1240 int xeon_link_is_up(struct intel_ntb_dev *ndev)
1241 {
1242 if (ndev->ntb.topo == NTB_TOPO_SEC)
1243 return 1;
1244
1245 return NTB_LNK_STA_ACTIVE(ndev->lnk_sta);
1246 }
1247
1248 enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd)
1249 {
1250 switch (ppd & XEON_PPD_TOPO_MASK) {
1251 case XEON_PPD_TOPO_B2B_USD:
1252 return NTB_TOPO_B2B_USD;
1253
1254 case XEON_PPD_TOPO_B2B_DSD:
1255 return NTB_TOPO_B2B_DSD;
1256
1257 case XEON_PPD_TOPO_PRI_USD:
1258 case XEON_PPD_TOPO_PRI_DSD:
1259 return NTB_TOPO_PRI;
1260
1261 case XEON_PPD_TOPO_SEC_USD:
1262 case XEON_PPD_TOPO_SEC_DSD:
1263 return NTB_TOPO_SEC;
1264 }
1265
1266 return NTB_TOPO_NONE;
1267 }
1268
1269 static inline int xeon_ppd_bar4_split(struct intel_ntb_dev *ndev, u8 ppd)
1270 {
1271 if (ppd & XEON_PPD_SPLIT_BAR_MASK) {
1272 dev_dbg(&ndev->ntb.pdev->dev, "PPD %d split bar\n", ppd);
1273 return 1;
1274 }
1275 return 0;
1276 }
1277
1278 static int xeon_init_isr(struct intel_ntb_dev *ndev)
1279 {
1280 return ndev_init_isr(ndev, XEON_DB_MSIX_VECTOR_COUNT,
1281 XEON_DB_MSIX_VECTOR_COUNT,
1282 XEON_DB_MSIX_VECTOR_SHIFT,
1283 XEON_DB_TOTAL_SHIFT);
1284 }
1285
1286 static void xeon_deinit_isr(struct intel_ntb_dev *ndev)
1287 {
1288 ndev_deinit_isr(ndev);
1289 }
1290
1291 static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
1292 const struct intel_b2b_addr *addr,
1293 const struct intel_b2b_addr *peer_addr)
1294 {
1295 struct pci_dev *pdev;
1296 void __iomem *mmio;
1297 resource_size_t bar_size;
1298 phys_addr_t bar_addr;
1299 int b2b_bar;
1300 u8 bar_sz;
1301
1302 pdev = ndev->ntb.pdev;
1303 mmio = ndev->self_mmio;
1304
1305 if (ndev->b2b_idx == UINT_MAX) {
1306 dev_dbg(&pdev->dev, "not using b2b mw\n");
1307 b2b_bar = 0;
1308 ndev->b2b_off = 0;
1309 } else {
1310 b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
1311 if (b2b_bar < 0)
1312 return -EIO;
1313
1314 dev_dbg(&pdev->dev, "using b2b mw bar %d\n", b2b_bar);
1315
1316 bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
1317
1318 dev_dbg(&pdev->dev, "b2b bar size %#llx\n", bar_size);
1319
1320 if (b2b_mw_share && XEON_B2B_MIN_SIZE <= bar_size >> 1) {
1321 dev_dbg(&pdev->dev, "b2b using first half of bar\n");
1322 ndev->b2b_off = bar_size >> 1;
1323 } else if (XEON_B2B_MIN_SIZE <= bar_size) {
1324 dev_dbg(&pdev->dev, "b2b using whole bar\n");
1325 ndev->b2b_off = 0;
1326 --ndev->mw_count;
1327 } else {
1328 dev_dbg(&pdev->dev, "b2b bar size is too small\n");
1329 return -EIO;
1330 }
1331 }
1332
1333
1334
1335
1336
1337
1338
1339 pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &bar_sz);
1340 dev_dbg(&pdev->dev, "PBAR23SZ %#x\n", bar_sz);
1341 if (b2b_bar == 2) {
1342 if (ndev->b2b_off)
1343 bar_sz -= 1;
1344 else
1345 bar_sz = 0;
1346 }
1347 pci_write_config_byte(pdev, XEON_SBAR23SZ_OFFSET, bar_sz);
1348 pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &bar_sz);
1349 dev_dbg(&pdev->dev, "SBAR23SZ %#x\n", bar_sz);
1350
1351 if (!ndev->bar4_split) {
1352 pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &bar_sz);
1353 dev_dbg(&pdev->dev, "PBAR45SZ %#x\n", bar_sz);
1354 if (b2b_bar == 4) {
1355 if (ndev->b2b_off)
1356 bar_sz -= 1;
1357 else
1358 bar_sz = 0;
1359 }
1360 pci_write_config_byte(pdev, XEON_SBAR45SZ_OFFSET, bar_sz);
1361 pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &bar_sz);
1362 dev_dbg(&pdev->dev, "SBAR45SZ %#x\n", bar_sz);
1363 } else {
1364 pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &bar_sz);
1365 dev_dbg(&pdev->dev, "PBAR4SZ %#x\n", bar_sz);
1366 if (b2b_bar == 4) {
1367 if (ndev->b2b_off)
1368 bar_sz -= 1;
1369 else
1370 bar_sz = 0;
1371 }
1372 pci_write_config_byte(pdev, XEON_SBAR4SZ_OFFSET, bar_sz);
1373 pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &bar_sz);
1374 dev_dbg(&pdev->dev, "SBAR4SZ %#x\n", bar_sz);
1375
1376 pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &bar_sz);
1377 dev_dbg(&pdev->dev, "PBAR5SZ %#x\n", bar_sz);
1378 if (b2b_bar == 5) {
1379 if (ndev->b2b_off)
1380 bar_sz -= 1;
1381 else
1382 bar_sz = 0;
1383 }
1384 pci_write_config_byte(pdev, XEON_SBAR5SZ_OFFSET, bar_sz);
1385 pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &bar_sz);
1386 dev_dbg(&pdev->dev, "SBAR5SZ %#x\n", bar_sz);
1387 }
1388
1389
1390 if (b2b_bar == 0)
1391 bar_addr = addr->bar0_addr;
1392 else if (b2b_bar == 2)
1393 bar_addr = addr->bar2_addr64;
1394 else if (b2b_bar == 4 && !ndev->bar4_split)
1395 bar_addr = addr->bar4_addr64;
1396 else if (b2b_bar == 4)
1397 bar_addr = addr->bar4_addr32;
1398 else if (b2b_bar == 5)
1399 bar_addr = addr->bar5_addr32;
1400 else
1401 return -EIO;
1402
1403 dev_dbg(&pdev->dev, "SBAR01 %#018llx\n", bar_addr);
1404 iowrite64(bar_addr, mmio + XEON_SBAR0BASE_OFFSET);
1405
1406
1407
1408
1409
1410
1411 bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
1412 iowrite64(bar_addr, mmio + XEON_SBAR23BASE_OFFSET);
1413 bar_addr = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
1414 dev_dbg(&pdev->dev, "SBAR23 %#018llx\n", bar_addr);
1415
1416 if (!ndev->bar4_split) {
1417 bar_addr = addr->bar4_addr64 +
1418 (b2b_bar == 4 ? ndev->b2b_off : 0);
1419 iowrite64(bar_addr, mmio + XEON_SBAR45BASE_OFFSET);
1420 bar_addr = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
1421 dev_dbg(&pdev->dev, "SBAR45 %#018llx\n", bar_addr);
1422 } else {
1423 bar_addr = addr->bar4_addr32 +
1424 (b2b_bar == 4 ? ndev->b2b_off : 0);
1425 iowrite32(bar_addr, mmio + XEON_SBAR4BASE_OFFSET);
1426 bar_addr = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
1427 dev_dbg(&pdev->dev, "SBAR4 %#010llx\n", bar_addr);
1428
1429 bar_addr = addr->bar5_addr32 +
1430 (b2b_bar == 5 ? ndev->b2b_off : 0);
1431 iowrite32(bar_addr, mmio + XEON_SBAR5BASE_OFFSET);
1432 bar_addr = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
1433 dev_dbg(&pdev->dev, "SBAR5 %#010llx\n", bar_addr);
1434 }
1435
1436
1437
1438 bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
1439 iowrite64(bar_addr, mmio + XEON_SBAR23LMT_OFFSET);
1440 bar_addr = ioread64(mmio + XEON_SBAR23LMT_OFFSET);
1441 dev_dbg(&pdev->dev, "SBAR23LMT %#018llx\n", bar_addr);
1442
1443 if (!ndev->bar4_split) {
1444 bar_addr = addr->bar4_addr64 +
1445 (b2b_bar == 4 ? ndev->b2b_off : 0);
1446 iowrite64(bar_addr, mmio + XEON_SBAR45LMT_OFFSET);
1447 bar_addr = ioread64(mmio + XEON_SBAR45LMT_OFFSET);
1448 dev_dbg(&pdev->dev, "SBAR45LMT %#018llx\n", bar_addr);
1449 } else {
1450 bar_addr = addr->bar4_addr32 +
1451 (b2b_bar == 4 ? ndev->b2b_off : 0);
1452 iowrite32(bar_addr, mmio + XEON_SBAR4LMT_OFFSET);
1453 bar_addr = ioread32(mmio + XEON_SBAR4LMT_OFFSET);
1454 dev_dbg(&pdev->dev, "SBAR4LMT %#010llx\n", bar_addr);
1455
1456 bar_addr = addr->bar5_addr32 +
1457 (b2b_bar == 5 ? ndev->b2b_off : 0);
1458 iowrite32(bar_addr, mmio + XEON_SBAR5LMT_OFFSET);
1459 bar_addr = ioread32(mmio + XEON_SBAR5LMT_OFFSET);
1460 dev_dbg(&pdev->dev, "SBAR5LMT %#05llx\n", bar_addr);
1461 }
1462
1463
1464 iowrite64(0, mmio + XEON_SBAR23XLAT_OFFSET);
1465
1466 if (!ndev->bar4_split) {
1467 iowrite64(0, mmio + XEON_SBAR45XLAT_OFFSET);
1468 } else {
1469 iowrite32(0, mmio + XEON_SBAR4XLAT_OFFSET);
1470 iowrite32(0, mmio + XEON_SBAR5XLAT_OFFSET);
1471 }
1472
1473
1474 iowrite64(0, mmio + XEON_PBAR23LMT_OFFSET);
1475 if (!ndev->bar4_split) {
1476 iowrite64(0, mmio + XEON_PBAR45LMT_OFFSET);
1477 } else {
1478 iowrite32(0, mmio + XEON_PBAR4LMT_OFFSET);
1479 iowrite32(0, mmio + XEON_PBAR5LMT_OFFSET);
1480 }
1481
1482
1483 bar_addr = peer_addr->bar2_addr64;
1484 iowrite64(bar_addr, mmio + XEON_PBAR23XLAT_OFFSET);
1485 bar_addr = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
1486 dev_dbg(&pdev->dev, "PBAR23XLAT %#018llx\n", bar_addr);
1487
1488 if (!ndev->bar4_split) {
1489 bar_addr = peer_addr->bar4_addr64;
1490 iowrite64(bar_addr, mmio + XEON_PBAR45XLAT_OFFSET);
1491 bar_addr = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
1492 dev_dbg(&pdev->dev, "PBAR45XLAT %#018llx\n", bar_addr);
1493 } else {
1494 bar_addr = peer_addr->bar4_addr32;
1495 iowrite32(bar_addr, mmio + XEON_PBAR4XLAT_OFFSET);
1496 bar_addr = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
1497 dev_dbg(&pdev->dev, "PBAR4XLAT %#010llx\n", bar_addr);
1498
1499 bar_addr = peer_addr->bar5_addr32;
1500 iowrite32(bar_addr, mmio + XEON_PBAR5XLAT_OFFSET);
1501 bar_addr = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
1502 dev_dbg(&pdev->dev, "PBAR5XLAT %#010llx\n", bar_addr);
1503 }
1504
1505
1506 if (b2b_bar == 0)
1507 bar_addr = peer_addr->bar0_addr;
1508 else if (b2b_bar == 2)
1509 bar_addr = peer_addr->bar2_addr64;
1510 else if (b2b_bar == 4 && !ndev->bar4_split)
1511 bar_addr = peer_addr->bar4_addr64;
1512 else if (b2b_bar == 4)
1513 bar_addr = peer_addr->bar4_addr32;
1514 else if (b2b_bar == 5)
1515 bar_addr = peer_addr->bar5_addr32;
1516 else
1517 return -EIO;
1518
1519
1520 dev_dbg(&pdev->dev, "B2BXLAT %#018llx\n", bar_addr);
1521 iowrite32(bar_addr, mmio + XEON_B2B_XLAT_OFFSETL);
1522 iowrite32(bar_addr >> 32, mmio + XEON_B2B_XLAT_OFFSETU);
1523
1524 if (b2b_bar) {
1525
1526 ndev->peer_mmio = pci_iomap(pdev, b2b_bar,
1527 XEON_B2B_MIN_SIZE);
1528 if (!ndev->peer_mmio)
1529 return -EIO;
1530
1531 ndev->peer_addr = pci_resource_start(pdev, b2b_bar);
1532 }
1533
1534 return 0;
1535 }
1536
1537 static int xeon_init_ntb(struct intel_ntb_dev *ndev)
1538 {
1539 struct device *dev = &ndev->ntb.pdev->dev;
1540 int rc;
1541 u32 ntb_ctl;
1542
1543 if (ndev->bar4_split)
1544 ndev->mw_count = HSX_SPLIT_BAR_MW_COUNT;
1545 else
1546 ndev->mw_count = XEON_MW_COUNT;
1547
1548 ndev->spad_count = XEON_SPAD_COUNT;
1549 ndev->db_count = XEON_DB_COUNT;
1550 ndev->db_link_mask = XEON_DB_LINK_BIT;
1551
1552 switch (ndev->ntb.topo) {
1553 case NTB_TOPO_PRI:
1554 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
1555 dev_err(dev, "NTB Primary config disabled\n");
1556 return -EINVAL;
1557 }
1558
1559
1560 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
1561 ntb_ctl &= ~NTB_CTL_DISABLE;
1562 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
1563
1564
1565 ndev->spad_count >>= 1;
1566 ndev->self_reg = &xeon_pri_reg;
1567 ndev->peer_reg = &xeon_sec_reg;
1568 ndev->xlat_reg = &xeon_sec_xlat;
1569 break;
1570
1571 case NTB_TOPO_SEC:
1572 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
1573 dev_err(dev, "NTB Secondary config disabled\n");
1574 return -EINVAL;
1575 }
1576
1577 ndev->spad_count >>= 1;
1578 ndev->self_reg = &xeon_sec_reg;
1579 ndev->peer_reg = &xeon_pri_reg;
1580 ndev->xlat_reg = &xeon_pri_xlat;
1581 break;
1582
1583 case NTB_TOPO_B2B_USD:
1584 case NTB_TOPO_B2B_DSD:
1585 ndev->self_reg = &xeon_pri_reg;
1586 ndev->peer_reg = &xeon_b2b_reg;
1587 ndev->xlat_reg = &xeon_sec_xlat;
1588
1589 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
1590 ndev->peer_reg = &xeon_pri_reg;
1591
1592 if (b2b_mw_idx < 0)
1593 ndev->b2b_idx = b2b_mw_idx + ndev->mw_count;
1594 else
1595 ndev->b2b_idx = b2b_mw_idx;
1596
1597 if (ndev->b2b_idx >= ndev->mw_count) {
1598 dev_dbg(dev,
1599 "b2b_mw_idx %d invalid for mw_count %u\n",
1600 b2b_mw_idx, ndev->mw_count);
1601 return -EINVAL;
1602 }
1603
1604 dev_dbg(dev, "setting up b2b mw idx %d means %d\n",
1605 b2b_mw_idx, ndev->b2b_idx);
1606
1607 } else if (ndev->hwerr_flags & NTB_HWERR_B2BDOORBELL_BIT14) {
1608 dev_warn(dev, "Reduce doorbell count by 1\n");
1609 ndev->db_count -= 1;
1610 }
1611
1612 if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
1613 rc = xeon_setup_b2b_mw(ndev,
1614 &xeon_b2b_dsd_addr,
1615 &xeon_b2b_usd_addr);
1616 } else {
1617 rc = xeon_setup_b2b_mw(ndev,
1618 &xeon_b2b_usd_addr,
1619 &xeon_b2b_dsd_addr);
1620 }
1621 if (rc)
1622 return rc;
1623
1624
1625 iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
1626 ndev->self_mmio + XEON_SPCICMD_OFFSET);
1627
1628 break;
1629
1630 default:
1631 return -EINVAL;
1632 }
1633
1634 ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
1635
1636 ndev->reg->db_iowrite(ndev->db_valid_mask,
1637 ndev->self_mmio +
1638 ndev->self_reg->db_mask);
1639
1640 return 0;
1641 }
1642
1643 static int xeon_init_dev(struct intel_ntb_dev *ndev)
1644 {
1645 struct pci_dev *pdev;
1646 u8 ppd;
1647 int rc, mem;
1648
1649 pdev = ndev->ntb.pdev;
1650
1651 switch (pdev->device) {
1652
1653
1654
1655
1656
1657
1658 case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
1659 case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
1660 case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
1661 case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
1662 case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
1663 case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
1664 case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
1665 case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
1666 case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
1667 case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
1668 case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
1669 case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
1670 case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
1671 case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
1672 case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
1673 ndev->hwerr_flags |= NTB_HWERR_SDOORBELL_LOCKUP;
1674 break;
1675 }
1676
1677 switch (pdev->device) {
1678
1679
1680
1681 case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
1682 case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
1683 case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
1684 case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
1685 case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
1686 case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
1687 case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
1688 case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
1689 case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
1690 ndev->hwerr_flags |= NTB_HWERR_SB01BASE_LOCKUP;
1691 break;
1692 }
1693
1694 switch (pdev->device) {
1695
1696
1697
1698
1699 case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
1700 case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
1701 case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
1702 case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
1703 case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
1704 case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
1705 case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
1706 case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
1707 case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
1708 case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
1709 case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
1710 case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
1711 case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
1712 case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
1713 case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
1714 ndev->hwerr_flags |= NTB_HWERR_B2BDOORBELL_BIT14;
1715 break;
1716 }
1717
1718 ndev->reg = &xeon_reg;
1719
1720 rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd);
1721 if (rc)
1722 return -EIO;
1723
1724 ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
1725 dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd,
1726 ntb_topo_string(ndev->ntb.topo));
1727 if (ndev->ntb.topo == NTB_TOPO_NONE)
1728 return -EINVAL;
1729
1730 if (ndev->ntb.topo != NTB_TOPO_SEC) {
1731 ndev->bar4_split = xeon_ppd_bar4_split(ndev, ppd);
1732 dev_dbg(&pdev->dev, "ppd %#x bar4_split %d\n",
1733 ppd, ndev->bar4_split);
1734 } else {
1735
1736
1737
1738
1739 mem = pci_select_bars(pdev, IORESOURCE_MEM);
1740 ndev->bar4_split = hweight32(mem) ==
1741 HSX_SPLIT_BAR_MW_COUNT + 1;
1742 dev_dbg(&pdev->dev, "mem %#x bar4_split %d\n",
1743 mem, ndev->bar4_split);
1744 }
1745
1746 rc = xeon_init_ntb(ndev);
1747 if (rc)
1748 return rc;
1749
1750 return xeon_init_isr(ndev);
1751 }
1752
1753 static void xeon_deinit_dev(struct intel_ntb_dev *ndev)
1754 {
1755 xeon_deinit_isr(ndev);
1756 }
1757
1758 static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
1759 {
1760 int rc;
1761
1762 pci_set_drvdata(pdev, ndev);
1763
1764 rc = pci_enable_device(pdev);
1765 if (rc)
1766 goto err_pci_enable;
1767
1768 rc = pci_request_regions(pdev, NTB_NAME);
1769 if (rc)
1770 goto err_pci_regions;
1771
1772 pci_set_master(pdev);
1773
1774 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1775 if (rc) {
1776 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1777 if (rc)
1778 goto err_dma_mask;
1779 dev_warn(&pdev->dev, "Cannot DMA highmem\n");
1780 }
1781
1782 ndev->self_mmio = pci_iomap(pdev, 0, 0);
1783 if (!ndev->self_mmio) {
1784 rc = -EIO;
1785 goto err_mmio;
1786 }
1787 ndev->peer_mmio = ndev->self_mmio;
1788 ndev->peer_addr = pci_resource_start(pdev, 0);
1789
1790 return 0;
1791
1792 err_mmio:
1793 err_dma_mask:
1794 pci_clear_master(pdev);
1795 pci_release_regions(pdev);
1796 err_pci_regions:
1797 pci_disable_device(pdev);
1798 err_pci_enable:
1799 pci_set_drvdata(pdev, NULL);
1800 return rc;
1801 }
1802
1803 static void intel_ntb_deinit_pci(struct intel_ntb_dev *ndev)
1804 {
1805 struct pci_dev *pdev = ndev->ntb.pdev;
1806
1807 if (ndev->peer_mmio && ndev->peer_mmio != ndev->self_mmio)
1808 pci_iounmap(pdev, ndev->peer_mmio);
1809 pci_iounmap(pdev, ndev->self_mmio);
1810
1811 pci_clear_master(pdev);
1812 pci_release_regions(pdev);
1813 pci_disable_device(pdev);
1814 pci_set_drvdata(pdev, NULL);
1815 }
1816
1817 static inline void ndev_init_struct(struct intel_ntb_dev *ndev,
1818 struct pci_dev *pdev)
1819 {
1820 ndev->ntb.pdev = pdev;
1821 ndev->ntb.topo = NTB_TOPO_NONE;
1822 ndev->ntb.ops = &intel_ntb_ops;
1823
1824 ndev->b2b_off = 0;
1825 ndev->b2b_idx = UINT_MAX;
1826
1827 ndev->bar4_split = 0;
1828
1829 ndev->mw_count = 0;
1830 ndev->spad_count = 0;
1831 ndev->db_count = 0;
1832 ndev->db_vec_count = 0;
1833 ndev->db_vec_shift = 0;
1834
1835 ndev->ntb_ctl = 0;
1836 ndev->lnk_sta = 0;
1837
1838 ndev->db_valid_mask = 0;
1839 ndev->db_link_mask = 0;
1840 ndev->db_mask = 0;
1841
1842 spin_lock_init(&ndev->db_mask_lock);
1843 }
1844
1845 static int intel_ntb_pci_probe(struct pci_dev *pdev,
1846 const struct pci_device_id *id)
1847 {
1848 struct intel_ntb_dev *ndev;
1849 int rc, node;
1850
1851 node = dev_to_node(&pdev->dev);
1852 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
1853 if (!ndev) {
1854 rc = -ENOMEM;
1855 goto err_ndev;
1856 }
1857
1858 ndev_init_struct(ndev, pdev);
1859
1860 if (pdev_is_gen1(pdev)) {
1861 rc = intel_ntb_init_pci(ndev, pdev);
1862 if (rc)
1863 goto err_init_pci;
1864
1865 rc = xeon_init_dev(ndev);
1866 if (rc)
1867 goto err_init_dev;
1868 } else if (pdev_is_gen3(pdev)) {
1869 ndev->ntb.ops = &intel_ntb3_ops;
1870 rc = intel_ntb_init_pci(ndev, pdev);
1871 if (rc)
1872 goto err_init_pci;
1873
1874 rc = gen3_init_dev(ndev);
1875 if (rc)
1876 goto err_init_dev;
1877 } else if (pdev_is_gen4(pdev) || pdev_is_gen5(pdev)) {
1878 ndev->ntb.ops = &intel_ntb4_ops;
1879 rc = intel_ntb_init_pci(ndev, pdev);
1880 if (rc)
1881 goto err_init_pci;
1882
1883 rc = gen4_init_dev(ndev);
1884 if (rc)
1885 goto err_init_dev;
1886 } else {
1887 rc = -EINVAL;
1888 goto err_init_pci;
1889 }
1890
1891 ndev_reset_unsafe_flags(ndev);
1892
1893 ndev->reg->poll_link(ndev);
1894
1895 ndev_init_debugfs(ndev);
1896
1897 rc = ntb_register_device(&ndev->ntb);
1898 if (rc)
1899 goto err_register;
1900
1901 dev_info(&pdev->dev, "NTB device registered.\n");
1902
1903 return 0;
1904
1905 err_register:
1906 ndev_deinit_debugfs(ndev);
1907 if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev) ||
1908 pdev_is_gen4(pdev) || pdev_is_gen5(pdev))
1909 xeon_deinit_dev(ndev);
1910 err_init_dev:
1911 intel_ntb_deinit_pci(ndev);
1912 err_init_pci:
1913 kfree(ndev);
1914 err_ndev:
1915 return rc;
1916 }
1917
1918 static void intel_ntb_pci_remove(struct pci_dev *pdev)
1919 {
1920 struct intel_ntb_dev *ndev = pci_get_drvdata(pdev);
1921
1922 ntb_unregister_device(&ndev->ntb);
1923 ndev_deinit_debugfs(ndev);
1924 if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev) ||
1925 pdev_is_gen4(pdev) || pdev_is_gen5(pdev))
1926 xeon_deinit_dev(ndev);
1927 intel_ntb_deinit_pci(ndev);
1928 kfree(ndev);
1929 }
1930
1931 static const struct intel_ntb_reg xeon_reg = {
1932 .poll_link = xeon_poll_link,
1933 .link_is_up = xeon_link_is_up,
1934 .db_ioread = xeon_db_ioread,
1935 .db_iowrite = xeon_db_iowrite,
1936 .db_size = sizeof(u32),
1937 .ntb_ctl = XEON_NTBCNTL_OFFSET,
1938 .mw_bar = {2, 4, 5},
1939 };
1940
1941 static const struct intel_ntb_alt_reg xeon_pri_reg = {
1942 .db_bell = XEON_PDOORBELL_OFFSET,
1943 .db_mask = XEON_PDBMSK_OFFSET,
1944 .spad = XEON_SPAD_OFFSET,
1945 };
1946
1947 static const struct intel_ntb_alt_reg xeon_sec_reg = {
1948 .db_bell = XEON_SDOORBELL_OFFSET,
1949 .db_mask = XEON_SDBMSK_OFFSET,
1950
1951 .spad = XEON_SPAD_OFFSET + (XEON_SPAD_COUNT << 1),
1952 };
1953
1954 static const struct intel_ntb_alt_reg xeon_b2b_reg = {
1955 .db_bell = XEON_B2B_DOORBELL_OFFSET,
1956 .spad = XEON_B2B_SPAD_OFFSET,
1957 };
1958
1959 static const struct intel_ntb_xlat_reg xeon_pri_xlat = {
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970 .bar2_limit = XEON_PBAR23LMT_OFFSET,
1971 .bar2_xlat = XEON_PBAR23XLAT_OFFSET,
1972 };
1973
1974 static const struct intel_ntb_xlat_reg xeon_sec_xlat = {
1975 .bar0_base = XEON_SBAR0BASE_OFFSET,
1976 .bar2_limit = XEON_SBAR23LMT_OFFSET,
1977 .bar2_xlat = XEON_SBAR23XLAT_OFFSET,
1978 };
1979
1980 struct intel_b2b_addr xeon_b2b_usd_addr = {
1981 .bar2_addr64 = XEON_B2B_BAR2_ADDR64,
1982 .bar4_addr64 = XEON_B2B_BAR4_ADDR64,
1983 .bar4_addr32 = XEON_B2B_BAR4_ADDR32,
1984 .bar5_addr32 = XEON_B2B_BAR5_ADDR32,
1985 };
1986
1987 struct intel_b2b_addr xeon_b2b_dsd_addr = {
1988 .bar2_addr64 = XEON_B2B_BAR2_ADDR64,
1989 .bar4_addr64 = XEON_B2B_BAR4_ADDR64,
1990 .bar4_addr32 = XEON_B2B_BAR4_ADDR32,
1991 .bar5_addr32 = XEON_B2B_BAR5_ADDR32,
1992 };
1993
1994
1995 static const struct ntb_dev_ops intel_ntb_ops = {
1996 .mw_count = intel_ntb_mw_count,
1997 .mw_get_align = intel_ntb_mw_get_align,
1998 .mw_set_trans = intel_ntb_mw_set_trans,
1999 .peer_mw_count = intel_ntb_peer_mw_count,
2000 .peer_mw_get_addr = intel_ntb_peer_mw_get_addr,
2001 .link_is_up = intel_ntb_link_is_up,
2002 .link_enable = intel_ntb_link_enable,
2003 .link_disable = intel_ntb_link_disable,
2004 .db_is_unsafe = intel_ntb_db_is_unsafe,
2005 .db_valid_mask = intel_ntb_db_valid_mask,
2006 .db_vector_count = intel_ntb_db_vector_count,
2007 .db_vector_mask = intel_ntb_db_vector_mask,
2008 .db_read = intel_ntb_db_read,
2009 .db_clear = intel_ntb_db_clear,
2010 .db_set_mask = intel_ntb_db_set_mask,
2011 .db_clear_mask = intel_ntb_db_clear_mask,
2012 .peer_db_addr = intel_ntb_peer_db_addr,
2013 .peer_db_set = intel_ntb_peer_db_set,
2014 .spad_is_unsafe = intel_ntb_spad_is_unsafe,
2015 .spad_count = intel_ntb_spad_count,
2016 .spad_read = intel_ntb_spad_read,
2017 .spad_write = intel_ntb_spad_write,
2018 .peer_spad_addr = intel_ntb_peer_spad_addr,
2019 .peer_spad_read = intel_ntb_peer_spad_read,
2020 .peer_spad_write = intel_ntb_peer_spad_write,
2021 };
2022
2023 static const struct file_operations intel_ntb_debugfs_info = {
2024 .owner = THIS_MODULE,
2025 .open = simple_open,
2026 .read = ndev_debugfs_read,
2027 };
2028
2029 static const struct pci_device_id intel_ntb_pci_tbl[] = {
2030
2031 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
2032 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
2033 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
2034 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)},
2035 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BDX)},
2036 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)},
2037 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)},
2038 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)},
2039 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)},
2040 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_BDX)},
2041 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)},
2042 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)},
2043 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
2044 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
2045 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_BDX)},
2046
2047
2048 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SKX)},
2049
2050
2051 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_ICX)},
2052
2053 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_GNR)},
2054 {0}
2055 };
2056 MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
2057
2058 static struct pci_driver intel_ntb_pci_driver = {
2059 .name = KBUILD_MODNAME,
2060 .id_table = intel_ntb_pci_tbl,
2061 .probe = intel_ntb_pci_probe,
2062 .remove = intel_ntb_pci_remove,
2063 };
2064
2065 static int __init intel_ntb_pci_driver_init(void)
2066 {
2067 pr_info("%s %s\n", NTB_DESC, NTB_VER);
2068
2069 if (debugfs_initialized())
2070 debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
2071
2072 return pci_register_driver(&intel_ntb_pci_driver);
2073 }
2074 module_init(intel_ntb_pci_driver_init);
2075
2076 static void __exit intel_ntb_pci_driver_exit(void)
2077 {
2078 pci_unregister_driver(&intel_ntb_pci_driver);
2079
2080 debugfs_remove_recursive(debugfs_dir);
2081 }
2082 module_exit(intel_ntb_pci_driver_exit);