0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/crc32.h>
0010 #include <linux/delay.h>
0011 #include <linux/dmaengine.h>
0012 #include <linux/io.h>
0013 #include <linux/module.h>
0014 #include <linux/slab.h>
0015 #include <linux/pci_ids.h>
0016 #include <linux/random.h>
0017
0018 #include <linux/pci-epc.h>
0019 #include <linux/pci-epf.h>
0020 #include <linux/pci_regs.h>
0021
0022 #define IRQ_TYPE_LEGACY 0
0023 #define IRQ_TYPE_MSI 1
0024 #define IRQ_TYPE_MSIX 2
0025
0026 #define COMMAND_RAISE_LEGACY_IRQ BIT(0)
0027 #define COMMAND_RAISE_MSI_IRQ BIT(1)
0028 #define COMMAND_RAISE_MSIX_IRQ BIT(2)
0029 #define COMMAND_READ BIT(3)
0030 #define COMMAND_WRITE BIT(4)
0031 #define COMMAND_COPY BIT(5)
0032
0033 #define STATUS_READ_SUCCESS BIT(0)
0034 #define STATUS_READ_FAIL BIT(1)
0035 #define STATUS_WRITE_SUCCESS BIT(2)
0036 #define STATUS_WRITE_FAIL BIT(3)
0037 #define STATUS_COPY_SUCCESS BIT(4)
0038 #define STATUS_COPY_FAIL BIT(5)
0039 #define STATUS_IRQ_RAISED BIT(6)
0040 #define STATUS_SRC_ADDR_INVALID BIT(7)
0041 #define STATUS_DST_ADDR_INVALID BIT(8)
0042
0043 #define FLAG_USE_DMA BIT(0)
0044
0045 #define TIMER_RESOLUTION 1
0046
0047 static struct workqueue_struct *kpcitest_workqueue;
0048
0049 struct pci_epf_test {
0050 void *reg[PCI_STD_NUM_BARS];
0051 struct pci_epf *epf;
0052 enum pci_barno test_reg_bar;
0053 size_t msix_table_offset;
0054 struct delayed_work cmd_handler;
0055 struct dma_chan *dma_chan_tx;
0056 struct dma_chan *dma_chan_rx;
0057 struct completion transfer_complete;
0058 bool dma_supported;
0059 bool dma_private;
0060 const struct pci_epc_features *epc_features;
0061 };
0062
0063 struct pci_epf_test_reg {
0064 u32 magic;
0065 u32 command;
0066 u32 status;
0067 u64 src_addr;
0068 u64 dst_addr;
0069 u32 size;
0070 u32 checksum;
0071 u32 irq_type;
0072 u32 irq_number;
0073 u32 flags;
0074 } __packed;
0075
0076 static struct pci_epf_header test_header = {
0077 .vendorid = PCI_ANY_ID,
0078 .deviceid = PCI_ANY_ID,
0079 .baseclass_code = PCI_CLASS_OTHERS,
0080 .interrupt_pin = PCI_INTERRUPT_INTA,
0081 };
0082
0083 static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
0084
0085 static void pci_epf_test_dma_callback(void *param)
0086 {
0087 struct pci_epf_test *epf_test = param;
0088
0089 complete(&epf_test->transfer_complete);
0090 }
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110 static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
0111 dma_addr_t dma_dst, dma_addr_t dma_src,
0112 size_t len, dma_addr_t dma_remote,
0113 enum dma_transfer_direction dir)
0114 {
0115 struct dma_chan *chan = (dir == DMA_DEV_TO_MEM) ?
0116 epf_test->dma_chan_tx : epf_test->dma_chan_rx;
0117 dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst;
0118 enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
0119 struct pci_epf *epf = epf_test->epf;
0120 struct dma_async_tx_descriptor *tx;
0121 struct dma_slave_config sconf = {};
0122 struct device *dev = &epf->dev;
0123 dma_cookie_t cookie;
0124 int ret;
0125
0126 if (IS_ERR_OR_NULL(chan)) {
0127 dev_err(dev, "Invalid DMA memcpy channel\n");
0128 return -EINVAL;
0129 }
0130
0131 if (epf_test->dma_private) {
0132 sconf.direction = dir;
0133 if (dir == DMA_MEM_TO_DEV)
0134 sconf.dst_addr = dma_remote;
0135 else
0136 sconf.src_addr = dma_remote;
0137
0138 if (dmaengine_slave_config(chan, &sconf)) {
0139 dev_err(dev, "DMA slave config fail\n");
0140 return -EIO;
0141 }
0142 tx = dmaengine_prep_slave_single(chan, dma_local, len, dir,
0143 flags);
0144 } else {
0145 tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len,
0146 flags);
0147 }
0148
0149 if (!tx) {
0150 dev_err(dev, "Failed to prepare DMA memcpy\n");
0151 return -EIO;
0152 }
0153
0154 tx->callback = pci_epf_test_dma_callback;
0155 tx->callback_param = epf_test;
0156 cookie = tx->tx_submit(tx);
0157 reinit_completion(&epf_test->transfer_complete);
0158
0159 ret = dma_submit_error(cookie);
0160 if (ret) {
0161 dev_err(dev, "Failed to do DMA tx_submit %d\n", cookie);
0162 return -EIO;
0163 }
0164
0165 dma_async_issue_pending(chan);
0166 ret = wait_for_completion_interruptible(&epf_test->transfer_complete);
0167 if (ret < 0) {
0168 dmaengine_terminate_sync(chan);
0169 dev_err(dev, "DMA wait_for_completion_timeout\n");
0170 return -ETIMEDOUT;
0171 }
0172
0173 return 0;
0174 }
0175
0176 struct epf_dma_filter {
0177 struct device *dev;
0178 u32 dma_mask;
0179 };
0180
0181 static bool epf_dma_filter_fn(struct dma_chan *chan, void *node)
0182 {
0183 struct epf_dma_filter *filter = node;
0184 struct dma_slave_caps caps;
0185
0186 memset(&caps, 0, sizeof(caps));
0187 dma_get_slave_caps(chan, &caps);
0188
0189 return chan->device->dev == filter->dev
0190 && (filter->dma_mask & caps.directions);
0191 }
0192
0193
0194
0195
0196
0197
0198
0199 static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
0200 {
0201 struct pci_epf *epf = epf_test->epf;
0202 struct device *dev = &epf->dev;
0203 struct epf_dma_filter filter;
0204 struct dma_chan *dma_chan;
0205 dma_cap_mask_t mask;
0206 int ret;
0207
0208 filter.dev = epf->epc->dev.parent;
0209 filter.dma_mask = BIT(DMA_DEV_TO_MEM);
0210
0211 dma_cap_zero(mask);
0212 dma_cap_set(DMA_SLAVE, mask);
0213 dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
0214 if (!dma_chan) {
0215 dev_info(dev, "Failed to get private DMA rx channel. Falling back to generic one\n");
0216 goto fail_back_tx;
0217 }
0218
0219 epf_test->dma_chan_rx = dma_chan;
0220
0221 filter.dma_mask = BIT(DMA_MEM_TO_DEV);
0222 dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
0223
0224 if (!dma_chan) {
0225 dev_info(dev, "Failed to get private DMA tx channel. Falling back to generic one\n");
0226 goto fail_back_rx;
0227 }
0228
0229 epf_test->dma_chan_tx = dma_chan;
0230 epf_test->dma_private = true;
0231
0232 init_completion(&epf_test->transfer_complete);
0233
0234 return 0;
0235
0236 fail_back_rx:
0237 dma_release_channel(epf_test->dma_chan_rx);
0238 epf_test->dma_chan_tx = NULL;
0239
0240 fail_back_tx:
0241 dma_cap_zero(mask);
0242 dma_cap_set(DMA_MEMCPY, mask);
0243
0244 dma_chan = dma_request_chan_by_mask(&mask);
0245 if (IS_ERR(dma_chan)) {
0246 ret = PTR_ERR(dma_chan);
0247 if (ret != -EPROBE_DEFER)
0248 dev_err(dev, "Failed to get DMA channel\n");
0249 return ret;
0250 }
0251 init_completion(&epf_test->transfer_complete);
0252
0253 epf_test->dma_chan_tx = epf_test->dma_chan_rx = dma_chan;
0254
0255 return 0;
0256 }
0257
0258
0259
0260
0261
0262
0263
0264 static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
0265 {
0266 if (!epf_test->dma_supported)
0267 return;
0268
0269 dma_release_channel(epf_test->dma_chan_tx);
0270 if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
0271 epf_test->dma_chan_tx = NULL;
0272 epf_test->dma_chan_rx = NULL;
0273 return;
0274 }
0275
0276 dma_release_channel(epf_test->dma_chan_rx);
0277 epf_test->dma_chan_rx = NULL;
0278
0279 return;
0280 }
0281
0282 static void pci_epf_test_print_rate(const char *ops, u64 size,
0283 struct timespec64 *start,
0284 struct timespec64 *end, bool dma)
0285 {
0286 struct timespec64 ts;
0287 u64 rate, ns;
0288
0289 ts = timespec64_sub(*end, *start);
0290
0291
0292 ns = timespec64_to_ns(&ts);
0293 rate = size * NSEC_PER_SEC;
0294
0295
0296 while (ns > UINT_MAX) {
0297 rate >>= 1;
0298 ns >>= 1;
0299 }
0300
0301 if (!ns)
0302 return;
0303
0304
0305 do_div(rate, (uint32_t)ns);
0306
0307 pr_info("\n%s => Size: %llu bytes\t DMA: %s\t Time: %llu.%09u seconds\t"
0308 "Rate: %llu KB/s\n", ops, size, dma ? "YES" : "NO",
0309 (u64)ts.tv_sec, (u32)ts.tv_nsec, rate / 1024);
0310 }
0311
0312 static int pci_epf_test_copy(struct pci_epf_test *epf_test)
0313 {
0314 int ret;
0315 bool use_dma;
0316 void __iomem *src_addr;
0317 void __iomem *dst_addr;
0318 phys_addr_t src_phys_addr;
0319 phys_addr_t dst_phys_addr;
0320 struct timespec64 start, end;
0321 struct pci_epf *epf = epf_test->epf;
0322 struct device *dev = &epf->dev;
0323 struct pci_epc *epc = epf->epc;
0324 enum pci_barno test_reg_bar = epf_test->test_reg_bar;
0325 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
0326
0327 src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
0328 if (!src_addr) {
0329 dev_err(dev, "Failed to allocate source address\n");
0330 reg->status = STATUS_SRC_ADDR_INVALID;
0331 ret = -ENOMEM;
0332 goto err;
0333 }
0334
0335 ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr,
0336 reg->src_addr, reg->size);
0337 if (ret) {
0338 dev_err(dev, "Failed to map source address\n");
0339 reg->status = STATUS_SRC_ADDR_INVALID;
0340 goto err_src_addr;
0341 }
0342
0343 dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
0344 if (!dst_addr) {
0345 dev_err(dev, "Failed to allocate destination address\n");
0346 reg->status = STATUS_DST_ADDR_INVALID;
0347 ret = -ENOMEM;
0348 goto err_src_map_addr;
0349 }
0350
0351 ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr,
0352 reg->dst_addr, reg->size);
0353 if (ret) {
0354 dev_err(dev, "Failed to map destination address\n");
0355 reg->status = STATUS_DST_ADDR_INVALID;
0356 goto err_dst_addr;
0357 }
0358
0359 ktime_get_ts64(&start);
0360 use_dma = !!(reg->flags & FLAG_USE_DMA);
0361 if (use_dma) {
0362 if (!epf_test->dma_supported) {
0363 dev_err(dev, "Cannot transfer data using DMA\n");
0364 ret = -EINVAL;
0365 goto err_map_addr;
0366 }
0367
0368 if (epf_test->dma_private) {
0369 dev_err(dev, "Cannot transfer data using DMA\n");
0370 ret = -EINVAL;
0371 goto err_map_addr;
0372 }
0373
0374 ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
0375 src_phys_addr, reg->size, 0,
0376 DMA_MEM_TO_MEM);
0377 if (ret)
0378 dev_err(dev, "Data transfer failed\n");
0379 } else {
0380 void *buf;
0381
0382 buf = kzalloc(reg->size, GFP_KERNEL);
0383 if (!buf) {
0384 ret = -ENOMEM;
0385 goto err_map_addr;
0386 }
0387
0388 memcpy_fromio(buf, src_addr, reg->size);
0389 memcpy_toio(dst_addr, buf, reg->size);
0390 kfree(buf);
0391 }
0392 ktime_get_ts64(&end);
0393 pci_epf_test_print_rate("COPY", reg->size, &start, &end, use_dma);
0394
0395 err_map_addr:
0396 pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr);
0397
0398 err_dst_addr:
0399 pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
0400
0401 err_src_map_addr:
0402 pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr);
0403
0404 err_src_addr:
0405 pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
0406
0407 err:
0408 return ret;
0409 }
0410
0411 static int pci_epf_test_read(struct pci_epf_test *epf_test)
0412 {
0413 int ret;
0414 void __iomem *src_addr;
0415 void *buf;
0416 u32 crc32;
0417 bool use_dma;
0418 phys_addr_t phys_addr;
0419 phys_addr_t dst_phys_addr;
0420 struct timespec64 start, end;
0421 struct pci_epf *epf = epf_test->epf;
0422 struct device *dev = &epf->dev;
0423 struct pci_epc *epc = epf->epc;
0424 struct device *dma_dev = epf->epc->dev.parent;
0425 enum pci_barno test_reg_bar = epf_test->test_reg_bar;
0426 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
0427
0428 src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
0429 if (!src_addr) {
0430 dev_err(dev, "Failed to allocate address\n");
0431 reg->status = STATUS_SRC_ADDR_INVALID;
0432 ret = -ENOMEM;
0433 goto err;
0434 }
0435
0436 ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
0437 reg->src_addr, reg->size);
0438 if (ret) {
0439 dev_err(dev, "Failed to map address\n");
0440 reg->status = STATUS_SRC_ADDR_INVALID;
0441 goto err_addr;
0442 }
0443
0444 buf = kzalloc(reg->size, GFP_KERNEL);
0445 if (!buf) {
0446 ret = -ENOMEM;
0447 goto err_map_addr;
0448 }
0449
0450 use_dma = !!(reg->flags & FLAG_USE_DMA);
0451 if (use_dma) {
0452 if (!epf_test->dma_supported) {
0453 dev_err(dev, "Cannot transfer data using DMA\n");
0454 ret = -EINVAL;
0455 goto err_dma_map;
0456 }
0457
0458 dst_phys_addr = dma_map_single(dma_dev, buf, reg->size,
0459 DMA_FROM_DEVICE);
0460 if (dma_mapping_error(dma_dev, dst_phys_addr)) {
0461 dev_err(dev, "Failed to map destination buffer addr\n");
0462 ret = -ENOMEM;
0463 goto err_dma_map;
0464 }
0465
0466 ktime_get_ts64(&start);
0467 ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
0468 phys_addr, reg->size,
0469 reg->src_addr, DMA_DEV_TO_MEM);
0470 if (ret)
0471 dev_err(dev, "Data transfer failed\n");
0472 ktime_get_ts64(&end);
0473
0474 dma_unmap_single(dma_dev, dst_phys_addr, reg->size,
0475 DMA_FROM_DEVICE);
0476 } else {
0477 ktime_get_ts64(&start);
0478 memcpy_fromio(buf, src_addr, reg->size);
0479 ktime_get_ts64(&end);
0480 }
0481
0482 pci_epf_test_print_rate("READ", reg->size, &start, &end, use_dma);
0483
0484 crc32 = crc32_le(~0, buf, reg->size);
0485 if (crc32 != reg->checksum)
0486 ret = -EIO;
0487
0488 err_dma_map:
0489 kfree(buf);
0490
0491 err_map_addr:
0492 pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
0493
0494 err_addr:
0495 pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
0496
0497 err:
0498 return ret;
0499 }
0500
0501 static int pci_epf_test_write(struct pci_epf_test *epf_test)
0502 {
0503 int ret;
0504 void __iomem *dst_addr;
0505 void *buf;
0506 bool use_dma;
0507 phys_addr_t phys_addr;
0508 phys_addr_t src_phys_addr;
0509 struct timespec64 start, end;
0510 struct pci_epf *epf = epf_test->epf;
0511 struct device *dev = &epf->dev;
0512 struct pci_epc *epc = epf->epc;
0513 struct device *dma_dev = epf->epc->dev.parent;
0514 enum pci_barno test_reg_bar = epf_test->test_reg_bar;
0515 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
0516
0517 dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
0518 if (!dst_addr) {
0519 dev_err(dev, "Failed to allocate address\n");
0520 reg->status = STATUS_DST_ADDR_INVALID;
0521 ret = -ENOMEM;
0522 goto err;
0523 }
0524
0525 ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
0526 reg->dst_addr, reg->size);
0527 if (ret) {
0528 dev_err(dev, "Failed to map address\n");
0529 reg->status = STATUS_DST_ADDR_INVALID;
0530 goto err_addr;
0531 }
0532
0533 buf = kzalloc(reg->size, GFP_KERNEL);
0534 if (!buf) {
0535 ret = -ENOMEM;
0536 goto err_map_addr;
0537 }
0538
0539 get_random_bytes(buf, reg->size);
0540 reg->checksum = crc32_le(~0, buf, reg->size);
0541
0542 use_dma = !!(reg->flags & FLAG_USE_DMA);
0543 if (use_dma) {
0544 if (!epf_test->dma_supported) {
0545 dev_err(dev, "Cannot transfer data using DMA\n");
0546 ret = -EINVAL;
0547 goto err_dma_map;
0548 }
0549
0550 src_phys_addr = dma_map_single(dma_dev, buf, reg->size,
0551 DMA_TO_DEVICE);
0552 if (dma_mapping_error(dma_dev, src_phys_addr)) {
0553 dev_err(dev, "Failed to map source buffer addr\n");
0554 ret = -ENOMEM;
0555 goto err_dma_map;
0556 }
0557
0558 ktime_get_ts64(&start);
0559
0560 ret = pci_epf_test_data_transfer(epf_test, phys_addr,
0561 src_phys_addr, reg->size,
0562 reg->dst_addr,
0563 DMA_MEM_TO_DEV);
0564 if (ret)
0565 dev_err(dev, "Data transfer failed\n");
0566 ktime_get_ts64(&end);
0567
0568 dma_unmap_single(dma_dev, src_phys_addr, reg->size,
0569 DMA_TO_DEVICE);
0570 } else {
0571 ktime_get_ts64(&start);
0572 memcpy_toio(dst_addr, buf, reg->size);
0573 ktime_get_ts64(&end);
0574 }
0575
0576 pci_epf_test_print_rate("WRITE", reg->size, &start, &end, use_dma);
0577
0578
0579
0580
0581
0582 usleep_range(1000, 2000);
0583
0584 err_dma_map:
0585 kfree(buf);
0586
0587 err_map_addr:
0588 pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
0589
0590 err_addr:
0591 pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
0592
0593 err:
0594 return ret;
0595 }
0596
0597 static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq_type,
0598 u16 irq)
0599 {
0600 struct pci_epf *epf = epf_test->epf;
0601 struct device *dev = &epf->dev;
0602 struct pci_epc *epc = epf->epc;
0603 enum pci_barno test_reg_bar = epf_test->test_reg_bar;
0604 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
0605
0606 reg->status |= STATUS_IRQ_RAISED;
0607
0608 switch (irq_type) {
0609 case IRQ_TYPE_LEGACY:
0610 pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
0611 PCI_EPC_IRQ_LEGACY, 0);
0612 break;
0613 case IRQ_TYPE_MSI:
0614 pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
0615 PCI_EPC_IRQ_MSI, irq);
0616 break;
0617 case IRQ_TYPE_MSIX:
0618 pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
0619 PCI_EPC_IRQ_MSIX, irq);
0620 break;
0621 default:
0622 dev_err(dev, "Failed to raise IRQ, unknown type\n");
0623 break;
0624 }
0625 }
0626
0627 static void pci_epf_test_cmd_handler(struct work_struct *work)
0628 {
0629 int ret;
0630 int count;
0631 u32 command;
0632 struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
0633 cmd_handler.work);
0634 struct pci_epf *epf = epf_test->epf;
0635 struct device *dev = &epf->dev;
0636 struct pci_epc *epc = epf->epc;
0637 enum pci_barno test_reg_bar = epf_test->test_reg_bar;
0638 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
0639
0640 command = reg->command;
0641 if (!command)
0642 goto reset_handler;
0643
0644 reg->command = 0;
0645 reg->status = 0;
0646
0647 if (reg->irq_type > IRQ_TYPE_MSIX) {
0648 dev_err(dev, "Failed to detect IRQ type\n");
0649 goto reset_handler;
0650 }
0651
0652 if (command & COMMAND_RAISE_LEGACY_IRQ) {
0653 reg->status = STATUS_IRQ_RAISED;
0654 pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
0655 PCI_EPC_IRQ_LEGACY, 0);
0656 goto reset_handler;
0657 }
0658
0659 if (command & COMMAND_WRITE) {
0660 ret = pci_epf_test_write(epf_test);
0661 if (ret)
0662 reg->status |= STATUS_WRITE_FAIL;
0663 else
0664 reg->status |= STATUS_WRITE_SUCCESS;
0665 pci_epf_test_raise_irq(epf_test, reg->irq_type,
0666 reg->irq_number);
0667 goto reset_handler;
0668 }
0669
0670 if (command & COMMAND_READ) {
0671 ret = pci_epf_test_read(epf_test);
0672 if (!ret)
0673 reg->status |= STATUS_READ_SUCCESS;
0674 else
0675 reg->status |= STATUS_READ_FAIL;
0676 pci_epf_test_raise_irq(epf_test, reg->irq_type,
0677 reg->irq_number);
0678 goto reset_handler;
0679 }
0680
0681 if (command & COMMAND_COPY) {
0682 ret = pci_epf_test_copy(epf_test);
0683 if (!ret)
0684 reg->status |= STATUS_COPY_SUCCESS;
0685 else
0686 reg->status |= STATUS_COPY_FAIL;
0687 pci_epf_test_raise_irq(epf_test, reg->irq_type,
0688 reg->irq_number);
0689 goto reset_handler;
0690 }
0691
0692 if (command & COMMAND_RAISE_MSI_IRQ) {
0693 count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no);
0694 if (reg->irq_number > count || count <= 0)
0695 goto reset_handler;
0696 reg->status = STATUS_IRQ_RAISED;
0697 pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
0698 PCI_EPC_IRQ_MSI, reg->irq_number);
0699 goto reset_handler;
0700 }
0701
0702 if (command & COMMAND_RAISE_MSIX_IRQ) {
0703 count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
0704 if (reg->irq_number > count || count <= 0)
0705 goto reset_handler;
0706 reg->status = STATUS_IRQ_RAISED;
0707 pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
0708 PCI_EPC_IRQ_MSIX, reg->irq_number);
0709 goto reset_handler;
0710 }
0711
0712 reset_handler:
0713 queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
0714 msecs_to_jiffies(1));
0715 }
0716
0717 static void pci_epf_test_unbind(struct pci_epf *epf)
0718 {
0719 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
0720 struct pci_epc *epc = epf->epc;
0721 struct pci_epf_bar *epf_bar;
0722 int bar;
0723
0724 cancel_delayed_work(&epf_test->cmd_handler);
0725 pci_epf_test_clean_dma_chan(epf_test);
0726 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
0727 epf_bar = &epf->bar[bar];
0728
0729 if (epf_test->reg[bar]) {
0730 pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
0731 epf_bar);
0732 pci_epf_free_space(epf, epf_test->reg[bar], bar,
0733 PRIMARY_INTERFACE);
0734 }
0735 }
0736 }
0737
0738 static int pci_epf_test_set_bar(struct pci_epf *epf)
0739 {
0740 int bar, add;
0741 int ret;
0742 struct pci_epf_bar *epf_bar;
0743 struct pci_epc *epc = epf->epc;
0744 struct device *dev = &epf->dev;
0745 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
0746 enum pci_barno test_reg_bar = epf_test->test_reg_bar;
0747 const struct pci_epc_features *epc_features;
0748
0749 epc_features = epf_test->epc_features;
0750
0751 for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
0752 epf_bar = &epf->bar[bar];
0753
0754
0755
0756
0757
0758 add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
0759
0760 if (!!(epc_features->reserved_bar & (1 << bar)))
0761 continue;
0762
0763 ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
0764 epf_bar);
0765 if (ret) {
0766 pci_epf_free_space(epf, epf_test->reg[bar], bar,
0767 PRIMARY_INTERFACE);
0768 dev_err(dev, "Failed to set BAR%d\n", bar);
0769 if (bar == test_reg_bar)
0770 return ret;
0771 }
0772 }
0773
0774 return 0;
0775 }
0776
0777 static int pci_epf_test_core_init(struct pci_epf *epf)
0778 {
0779 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
0780 struct pci_epf_header *header = epf->header;
0781 const struct pci_epc_features *epc_features;
0782 struct pci_epc *epc = epf->epc;
0783 struct device *dev = &epf->dev;
0784 bool msix_capable = false;
0785 bool msi_capable = true;
0786 int ret;
0787
0788 epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
0789 if (epc_features) {
0790 msix_capable = epc_features->msix_capable;
0791 msi_capable = epc_features->msi_capable;
0792 }
0793
0794 if (epf->vfunc_no <= 1) {
0795 ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header);
0796 if (ret) {
0797 dev_err(dev, "Configuration header write failed\n");
0798 return ret;
0799 }
0800 }
0801
0802 ret = pci_epf_test_set_bar(epf);
0803 if (ret)
0804 return ret;
0805
0806 if (msi_capable) {
0807 ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
0808 epf->msi_interrupts);
0809 if (ret) {
0810 dev_err(dev, "MSI configuration failed\n");
0811 return ret;
0812 }
0813 }
0814
0815 if (msix_capable) {
0816 ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no,
0817 epf->msix_interrupts,
0818 epf_test->test_reg_bar,
0819 epf_test->msix_table_offset);
0820 if (ret) {
0821 dev_err(dev, "MSI-X configuration failed\n");
0822 return ret;
0823 }
0824 }
0825
0826 return 0;
0827 }
0828
0829 static int pci_epf_test_notifier(struct notifier_block *nb, unsigned long val,
0830 void *data)
0831 {
0832 struct pci_epf *epf = container_of(nb, struct pci_epf, nb);
0833 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
0834 int ret;
0835
0836 switch (val) {
0837 case CORE_INIT:
0838 ret = pci_epf_test_core_init(epf);
0839 if (ret)
0840 return NOTIFY_BAD;
0841 break;
0842
0843 case LINK_UP:
0844 queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
0845 msecs_to_jiffies(1));
0846 break;
0847
0848 default:
0849 dev_err(&epf->dev, "Invalid EPF test notifier event\n");
0850 return NOTIFY_BAD;
0851 }
0852
0853 return NOTIFY_OK;
0854 }
0855
0856 static int pci_epf_test_alloc_space(struct pci_epf *epf)
0857 {
0858 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
0859 struct device *dev = &epf->dev;
0860 struct pci_epf_bar *epf_bar;
0861 size_t msix_table_size = 0;
0862 size_t test_reg_bar_size;
0863 size_t pba_size = 0;
0864 bool msix_capable;
0865 void *base;
0866 int bar, add;
0867 enum pci_barno test_reg_bar = epf_test->test_reg_bar;
0868 const struct pci_epc_features *epc_features;
0869 size_t test_reg_size;
0870
0871 epc_features = epf_test->epc_features;
0872
0873 test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
0874
0875 msix_capable = epc_features->msix_capable;
0876 if (msix_capable) {
0877 msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
0878 epf_test->msix_table_offset = test_reg_bar_size;
0879
0880 pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
0881 }
0882 test_reg_size = test_reg_bar_size + msix_table_size + pba_size;
0883
0884 if (epc_features->bar_fixed_size[test_reg_bar]) {
0885 if (test_reg_size > bar_size[test_reg_bar])
0886 return -ENOMEM;
0887 test_reg_size = bar_size[test_reg_bar];
0888 }
0889
0890 base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
0891 epc_features->align, PRIMARY_INTERFACE);
0892 if (!base) {
0893 dev_err(dev, "Failed to allocated register space\n");
0894 return -ENOMEM;
0895 }
0896 epf_test->reg[test_reg_bar] = base;
0897
0898 for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
0899 epf_bar = &epf->bar[bar];
0900 add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
0901
0902 if (bar == test_reg_bar)
0903 continue;
0904
0905 if (!!(epc_features->reserved_bar & (1 << bar)))
0906 continue;
0907
0908 base = pci_epf_alloc_space(epf, bar_size[bar], bar,
0909 epc_features->align,
0910 PRIMARY_INTERFACE);
0911 if (!base)
0912 dev_err(dev, "Failed to allocate space for BAR%d\n",
0913 bar);
0914 epf_test->reg[bar] = base;
0915 }
0916
0917 return 0;
0918 }
0919
0920 static void pci_epf_configure_bar(struct pci_epf *epf,
0921 const struct pci_epc_features *epc_features)
0922 {
0923 struct pci_epf_bar *epf_bar;
0924 bool bar_fixed_64bit;
0925 int i;
0926
0927 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
0928 epf_bar = &epf->bar[i];
0929 bar_fixed_64bit = !!(epc_features->bar_fixed_64bit & (1 << i));
0930 if (bar_fixed_64bit)
0931 epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
0932 if (epc_features->bar_fixed_size[i])
0933 bar_size[i] = epc_features->bar_fixed_size[i];
0934 }
0935 }
0936
0937 static int pci_epf_test_bind(struct pci_epf *epf)
0938 {
0939 int ret;
0940 struct pci_epf_test *epf_test = epf_get_drvdata(epf);
0941 const struct pci_epc_features *epc_features;
0942 enum pci_barno test_reg_bar = BAR_0;
0943 struct pci_epc *epc = epf->epc;
0944 bool linkup_notifier = false;
0945 bool core_init_notifier = false;
0946
0947 if (WARN_ON_ONCE(!epc))
0948 return -EINVAL;
0949
0950 epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
0951 if (!epc_features) {
0952 dev_err(&epf->dev, "epc_features not implemented\n");
0953 return -EOPNOTSUPP;
0954 }
0955
0956 linkup_notifier = epc_features->linkup_notifier;
0957 core_init_notifier = epc_features->core_init_notifier;
0958 test_reg_bar = pci_epc_get_first_free_bar(epc_features);
0959 if (test_reg_bar < 0)
0960 return -EINVAL;
0961 pci_epf_configure_bar(epf, epc_features);
0962
0963 epf_test->test_reg_bar = test_reg_bar;
0964 epf_test->epc_features = epc_features;
0965
0966 ret = pci_epf_test_alloc_space(epf);
0967 if (ret)
0968 return ret;
0969
0970 if (!core_init_notifier) {
0971 ret = pci_epf_test_core_init(epf);
0972 if (ret)
0973 return ret;
0974 }
0975
0976 epf_test->dma_supported = true;
0977
0978 ret = pci_epf_test_init_dma_chan(epf_test);
0979 if (ret)
0980 epf_test->dma_supported = false;
0981
0982 if (linkup_notifier) {
0983 epf->nb.notifier_call = pci_epf_test_notifier;
0984 pci_epc_register_notifier(epc, &epf->nb);
0985 } else {
0986 queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
0987 }
0988
0989 return 0;
0990 }
0991
0992 static const struct pci_epf_device_id pci_epf_test_ids[] = {
0993 {
0994 .name = "pci_epf_test",
0995 },
0996 {},
0997 };
0998
0999 static int pci_epf_test_probe(struct pci_epf *epf)
1000 {
1001 struct pci_epf_test *epf_test;
1002 struct device *dev = &epf->dev;
1003
1004 epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
1005 if (!epf_test)
1006 return -ENOMEM;
1007
1008 epf->header = &test_header;
1009 epf_test->epf = epf;
1010
1011 INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
1012
1013 epf_set_drvdata(epf, epf_test);
1014 return 0;
1015 }
1016
1017 static struct pci_epf_ops ops = {
1018 .unbind = pci_epf_test_unbind,
1019 .bind = pci_epf_test_bind,
1020 };
1021
1022 static struct pci_epf_driver test_driver = {
1023 .driver.name = "pci_epf_test",
1024 .probe = pci_epf_test_probe,
1025 .id_table = pci_epf_test_ids,
1026 .ops = &ops,
1027 .owner = THIS_MODULE,
1028 };
1029
1030 static int __init pci_epf_test_init(void)
1031 {
1032 int ret;
1033
1034 kpcitest_workqueue = alloc_workqueue("kpcitest",
1035 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1036 if (!kpcitest_workqueue) {
1037 pr_err("Failed to allocate the kpcitest work queue\n");
1038 return -ENOMEM;
1039 }
1040
1041 ret = pci_epf_register_driver(&test_driver);
1042 if (ret) {
1043 destroy_workqueue(kpcitest_workqueue);
1044 pr_err("Failed to register pci epf test driver --> %d\n", ret);
1045 return ret;
1046 }
1047
1048 return 0;
1049 }
1050 module_init(pci_epf_test_init);
1051
1052 static void __exit pci_epf_test_exit(void)
1053 {
1054 if (kpcitest_workqueue)
1055 destroy_workqueue(kpcitest_workqueue);
1056 pci_epf_unregister_driver(&test_driver);
1057 }
1058 module_exit(pci_epf_test_exit);
1059
1060 MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
1061 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1062 MODULE_LICENSE("GPL v2");