0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #define pr_fmt(fmt) "pci-p2pdma: " fmt
0012 #include <linux/ctype.h>
0013 #include <linux/dma-map-ops.h>
0014 #include <linux/pci-p2pdma.h>
0015 #include <linux/module.h>
0016 #include <linux/slab.h>
0017 #include <linux/genalloc.h>
0018 #include <linux/memremap.h>
0019 #include <linux/percpu-refcount.h>
0020 #include <linux/random.h>
0021 #include <linux/seq_buf.h>
0022 #include <linux/xarray.h>
0023
0024 struct pci_p2pdma {
0025 struct gen_pool *pool;
0026 bool p2pmem_published;
0027 struct xarray map_types;
0028 };
0029
0030 struct pci_p2pdma_pagemap {
0031 struct dev_pagemap pgmap;
0032 struct pci_dev *provider;
0033 u64 bus_offset;
0034 };
0035
0036 static struct pci_p2pdma_pagemap *to_p2p_pgmap(struct dev_pagemap *pgmap)
0037 {
0038 return container_of(pgmap, struct pci_p2pdma_pagemap, pgmap);
0039 }
0040
0041 static ssize_t size_show(struct device *dev, struct device_attribute *attr,
0042 char *buf)
0043 {
0044 struct pci_dev *pdev = to_pci_dev(dev);
0045 struct pci_p2pdma *p2pdma;
0046 size_t size = 0;
0047
0048 rcu_read_lock();
0049 p2pdma = rcu_dereference(pdev->p2pdma);
0050 if (p2pdma && p2pdma->pool)
0051 size = gen_pool_size(p2pdma->pool);
0052 rcu_read_unlock();
0053
0054 return sysfs_emit(buf, "%zd\n", size);
0055 }
0056 static DEVICE_ATTR_RO(size);
0057
0058 static ssize_t available_show(struct device *dev, struct device_attribute *attr,
0059 char *buf)
0060 {
0061 struct pci_dev *pdev = to_pci_dev(dev);
0062 struct pci_p2pdma *p2pdma;
0063 size_t avail = 0;
0064
0065 rcu_read_lock();
0066 p2pdma = rcu_dereference(pdev->p2pdma);
0067 if (p2pdma && p2pdma->pool)
0068 avail = gen_pool_avail(p2pdma->pool);
0069 rcu_read_unlock();
0070
0071 return sysfs_emit(buf, "%zd\n", avail);
0072 }
0073 static DEVICE_ATTR_RO(available);
0074
0075 static ssize_t published_show(struct device *dev, struct device_attribute *attr,
0076 char *buf)
0077 {
0078 struct pci_dev *pdev = to_pci_dev(dev);
0079 struct pci_p2pdma *p2pdma;
0080 bool published = false;
0081
0082 rcu_read_lock();
0083 p2pdma = rcu_dereference(pdev->p2pdma);
0084 if (p2pdma)
0085 published = p2pdma->p2pmem_published;
0086 rcu_read_unlock();
0087
0088 return sysfs_emit(buf, "%d\n", published);
0089 }
0090 static DEVICE_ATTR_RO(published);
0091
0092 static struct attribute *p2pmem_attrs[] = {
0093 &dev_attr_size.attr,
0094 &dev_attr_available.attr,
0095 &dev_attr_published.attr,
0096 NULL,
0097 };
0098
0099 static const struct attribute_group p2pmem_group = {
0100 .attrs = p2pmem_attrs,
0101 .name = "p2pmem",
0102 };
0103
0104 static void pci_p2pdma_release(void *data)
0105 {
0106 struct pci_dev *pdev = data;
0107 struct pci_p2pdma *p2pdma;
0108
0109 p2pdma = rcu_dereference_protected(pdev->p2pdma, 1);
0110 if (!p2pdma)
0111 return;
0112
0113
0114 pdev->p2pdma = NULL;
0115 synchronize_rcu();
0116
0117 gen_pool_destroy(p2pdma->pool);
0118 sysfs_remove_group(&pdev->dev.kobj, &p2pmem_group);
0119 xa_destroy(&p2pdma->map_types);
0120 }
0121
0122 static int pci_p2pdma_setup(struct pci_dev *pdev)
0123 {
0124 int error = -ENOMEM;
0125 struct pci_p2pdma *p2p;
0126
0127 p2p = devm_kzalloc(&pdev->dev, sizeof(*p2p), GFP_KERNEL);
0128 if (!p2p)
0129 return -ENOMEM;
0130
0131 xa_init(&p2p->map_types);
0132
0133 p2p->pool = gen_pool_create(PAGE_SHIFT, dev_to_node(&pdev->dev));
0134 if (!p2p->pool)
0135 goto out;
0136
0137 error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_release, pdev);
0138 if (error)
0139 goto out_pool_destroy;
0140
0141 error = sysfs_create_group(&pdev->dev.kobj, &p2pmem_group);
0142 if (error)
0143 goto out_pool_destroy;
0144
0145 rcu_assign_pointer(pdev->p2pdma, p2p);
0146 return 0;
0147
0148 out_pool_destroy:
0149 gen_pool_destroy(p2p->pool);
0150 out:
0151 devm_kfree(&pdev->dev, p2p);
0152 return error;
0153 }
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165 int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
0166 u64 offset)
0167 {
0168 struct pci_p2pdma_pagemap *p2p_pgmap;
0169 struct dev_pagemap *pgmap;
0170 struct pci_p2pdma *p2pdma;
0171 void *addr;
0172 int error;
0173
0174 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
0175 return -EINVAL;
0176
0177 if (offset >= pci_resource_len(pdev, bar))
0178 return -EINVAL;
0179
0180 if (!size)
0181 size = pci_resource_len(pdev, bar) - offset;
0182
0183 if (size + offset > pci_resource_len(pdev, bar))
0184 return -EINVAL;
0185
0186 if (!pdev->p2pdma) {
0187 error = pci_p2pdma_setup(pdev);
0188 if (error)
0189 return error;
0190 }
0191
0192 p2p_pgmap = devm_kzalloc(&pdev->dev, sizeof(*p2p_pgmap), GFP_KERNEL);
0193 if (!p2p_pgmap)
0194 return -ENOMEM;
0195
0196 pgmap = &p2p_pgmap->pgmap;
0197 pgmap->range.start = pci_resource_start(pdev, bar) + offset;
0198 pgmap->range.end = pgmap->range.start + size - 1;
0199 pgmap->nr_range = 1;
0200 pgmap->type = MEMORY_DEVICE_PCI_P2PDMA;
0201
0202 p2p_pgmap->provider = pdev;
0203 p2p_pgmap->bus_offset = pci_bus_address(pdev, bar) -
0204 pci_resource_start(pdev, bar);
0205
0206 addr = devm_memremap_pages(&pdev->dev, pgmap);
0207 if (IS_ERR(addr)) {
0208 error = PTR_ERR(addr);
0209 goto pgmap_free;
0210 }
0211
0212 p2pdma = rcu_dereference_protected(pdev->p2pdma, 1);
0213 error = gen_pool_add_owner(p2pdma->pool, (unsigned long)addr,
0214 pci_bus_address(pdev, bar) + offset,
0215 range_len(&pgmap->range), dev_to_node(&pdev->dev),
0216 &pgmap->ref);
0217 if (error)
0218 goto pages_free;
0219
0220 pci_info(pdev, "added peer-to-peer DMA memory %#llx-%#llx\n",
0221 pgmap->range.start, pgmap->range.end);
0222
0223 return 0;
0224
0225 pages_free:
0226 devm_memunmap_pages(&pdev->dev, pgmap);
0227 pgmap_free:
0228 devm_kfree(&pdev->dev, pgmap);
0229 return error;
0230 }
0231 EXPORT_SYMBOL_GPL(pci_p2pdma_add_resource);
0232
0233
0234
0235
0236
0237
0238 static struct pci_dev *find_parent_pci_dev(struct device *dev)
0239 {
0240 struct device *parent;
0241
0242 dev = get_device(dev);
0243
0244 while (dev) {
0245 if (dev_is_pci(dev))
0246 return to_pci_dev(dev);
0247
0248 parent = get_device(dev->parent);
0249 put_device(dev);
0250 dev = parent;
0251 }
0252
0253 return NULL;
0254 }
0255
0256
0257
0258
0259
0260
0261 static int pci_bridge_has_acs_redir(struct pci_dev *pdev)
0262 {
0263 int pos;
0264 u16 ctrl;
0265
0266 pos = pdev->acs_cap;
0267 if (!pos)
0268 return 0;
0269
0270 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
0271
0272 if (ctrl & (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC))
0273 return 1;
0274
0275 return 0;
0276 }
0277
0278 static void seq_buf_print_bus_devfn(struct seq_buf *buf, struct pci_dev *pdev)
0279 {
0280 if (!buf)
0281 return;
0282
0283 seq_buf_printf(buf, "%s;", pci_name(pdev));
0284 }
0285
0286 static bool cpu_supports_p2pdma(void)
0287 {
0288 #ifdef CONFIG_X86
0289 struct cpuinfo_x86 *c = &cpu_data(0);
0290
0291
0292 if (c->x86_vendor == X86_VENDOR_AMD && c->x86 >= 0x17)
0293 return true;
0294 #endif
0295
0296 return false;
0297 }
0298
0299 static const struct pci_p2pdma_whitelist_entry {
0300 unsigned short vendor;
0301 unsigned short device;
0302 enum {
0303 REQ_SAME_HOST_BRIDGE = 1 << 0,
0304 } flags;
0305 } pci_p2pdma_whitelist[] = {
0306
0307 {PCI_VENDOR_ID_INTEL, 0x3c00, REQ_SAME_HOST_BRIDGE},
0308 {PCI_VENDOR_ID_INTEL, 0x3c01, REQ_SAME_HOST_BRIDGE},
0309
0310 {PCI_VENDOR_ID_INTEL, 0x2f00, REQ_SAME_HOST_BRIDGE},
0311 {PCI_VENDOR_ID_INTEL, 0x2f01, REQ_SAME_HOST_BRIDGE},
0312
0313 {PCI_VENDOR_ID_INTEL, 0x2030, 0},
0314 {PCI_VENDOR_ID_INTEL, 0x2031, 0},
0315 {PCI_VENDOR_ID_INTEL, 0x2032, 0},
0316 {PCI_VENDOR_ID_INTEL, 0x2033, 0},
0317 {PCI_VENDOR_ID_INTEL, 0x2020, 0},
0318 {PCI_VENDOR_ID_INTEL, 0x09a2, 0},
0319 {}
0320 };
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339 static struct pci_dev *pci_host_bridge_dev(struct pci_host_bridge *host)
0340 {
0341 struct pci_dev *root;
0342
0343 root = list_first_entry_or_null(&host->bus->devices,
0344 struct pci_dev, bus_list);
0345
0346 if (!root)
0347 return NULL;
0348
0349 if (root->devfn == PCI_DEVFN(0, 0))
0350 return root;
0351
0352 if (pci_pcie_type(root) == PCI_EXP_TYPE_ROOT_PORT)
0353 return root;
0354
0355 return NULL;
0356 }
0357
0358 static bool __host_bridge_whitelist(struct pci_host_bridge *host,
0359 bool same_host_bridge, bool warn)
0360 {
0361 struct pci_dev *root = pci_host_bridge_dev(host);
0362 const struct pci_p2pdma_whitelist_entry *entry;
0363 unsigned short vendor, device;
0364
0365 if (!root)
0366 return false;
0367
0368 vendor = root->vendor;
0369 device = root->device;
0370
0371 for (entry = pci_p2pdma_whitelist; entry->vendor; entry++) {
0372 if (vendor != entry->vendor || device != entry->device)
0373 continue;
0374 if (entry->flags & REQ_SAME_HOST_BRIDGE && !same_host_bridge)
0375 return false;
0376
0377 return true;
0378 }
0379
0380 if (warn)
0381 pci_warn(root, "Host bridge not in P2PDMA whitelist: %04x:%04x\n",
0382 vendor, device);
0383
0384 return false;
0385 }
0386
0387
0388
0389
0390
0391 static bool host_bridge_whitelist(struct pci_dev *a, struct pci_dev *b,
0392 bool warn)
0393 {
0394 struct pci_host_bridge *host_a = pci_find_host_bridge(a->bus);
0395 struct pci_host_bridge *host_b = pci_find_host_bridge(b->bus);
0396
0397 if (host_a == host_b)
0398 return __host_bridge_whitelist(host_a, true, warn);
0399
0400 if (__host_bridge_whitelist(host_a, false, warn) &&
0401 __host_bridge_whitelist(host_b, false, warn))
0402 return true;
0403
0404 return false;
0405 }
0406
0407 static unsigned long map_types_idx(struct pci_dev *client)
0408 {
0409 return (pci_domain_nr(client->bus) << 16) |
0410 (client->bus->number << 8) | client->devfn;
0411 }
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449 static enum pci_p2pdma_map_type
0450 calc_map_type_and_dist(struct pci_dev *provider, struct pci_dev *client,
0451 int *dist, bool verbose)
0452 {
0453 enum pci_p2pdma_map_type map_type = PCI_P2PDMA_MAP_THRU_HOST_BRIDGE;
0454 struct pci_dev *a = provider, *b = client, *bb;
0455 bool acs_redirects = false;
0456 struct pci_p2pdma *p2pdma;
0457 struct seq_buf acs_list;
0458 int acs_cnt = 0;
0459 int dist_a = 0;
0460 int dist_b = 0;
0461 char buf[128];
0462
0463 seq_buf_init(&acs_list, buf, sizeof(buf));
0464
0465
0466
0467
0468
0469
0470 while (a) {
0471 dist_b = 0;
0472
0473 if (pci_bridge_has_acs_redir(a)) {
0474 seq_buf_print_bus_devfn(&acs_list, a);
0475 acs_cnt++;
0476 }
0477
0478 bb = b;
0479
0480 while (bb) {
0481 if (a == bb)
0482 goto check_b_path_acs;
0483
0484 bb = pci_upstream_bridge(bb);
0485 dist_b++;
0486 }
0487
0488 a = pci_upstream_bridge(a);
0489 dist_a++;
0490 }
0491
0492 *dist = dist_a + dist_b;
0493 goto map_through_host_bridge;
0494
0495 check_b_path_acs:
0496 bb = b;
0497
0498 while (bb) {
0499 if (a == bb)
0500 break;
0501
0502 if (pci_bridge_has_acs_redir(bb)) {
0503 seq_buf_print_bus_devfn(&acs_list, bb);
0504 acs_cnt++;
0505 }
0506
0507 bb = pci_upstream_bridge(bb);
0508 }
0509
0510 *dist = dist_a + dist_b;
0511
0512 if (!acs_cnt) {
0513 map_type = PCI_P2PDMA_MAP_BUS_ADDR;
0514 goto done;
0515 }
0516
0517 if (verbose) {
0518 acs_list.buffer[acs_list.len-1] = 0;
0519 pci_warn(client, "ACS redirect is set between the client and provider (%s)\n",
0520 pci_name(provider));
0521 pci_warn(client, "to disable ACS redirect for this path, add the kernel parameter: pci=disable_acs_redir=%s\n",
0522 acs_list.buffer);
0523 }
0524 acs_redirects = true;
0525
0526 map_through_host_bridge:
0527 if (!cpu_supports_p2pdma() &&
0528 !host_bridge_whitelist(provider, client, acs_redirects)) {
0529 if (verbose)
0530 pci_warn(client, "cannot be used for peer-to-peer DMA as the client and provider (%s) do not share an upstream bridge or whitelisted host bridge\n",
0531 pci_name(provider));
0532 map_type = PCI_P2PDMA_MAP_NOT_SUPPORTED;
0533 }
0534 done:
0535 rcu_read_lock();
0536 p2pdma = rcu_dereference(provider->p2pdma);
0537 if (p2pdma)
0538 xa_store(&p2pdma->map_types, map_types_idx(client),
0539 xa_mk_value(map_type), GFP_KERNEL);
0540 rcu_read_unlock();
0541 return map_type;
0542 }
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561 int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients,
0562 int num_clients, bool verbose)
0563 {
0564 enum pci_p2pdma_map_type map;
0565 bool not_supported = false;
0566 struct pci_dev *pci_client;
0567 int total_dist = 0;
0568 int i, distance;
0569
0570 if (num_clients == 0)
0571 return -1;
0572
0573 for (i = 0; i < num_clients; i++) {
0574 pci_client = find_parent_pci_dev(clients[i]);
0575 if (!pci_client) {
0576 if (verbose)
0577 dev_warn(clients[i],
0578 "cannot be used for peer-to-peer DMA as it is not a PCI device\n");
0579 return -1;
0580 }
0581
0582 map = calc_map_type_and_dist(provider, pci_client, &distance,
0583 verbose);
0584
0585 pci_dev_put(pci_client);
0586
0587 if (map == PCI_P2PDMA_MAP_NOT_SUPPORTED)
0588 not_supported = true;
0589
0590 if (not_supported && !verbose)
0591 break;
0592
0593 total_dist += distance;
0594 }
0595
0596 if (not_supported)
0597 return -1;
0598
0599 return total_dist;
0600 }
0601 EXPORT_SYMBOL_GPL(pci_p2pdma_distance_many);
0602
0603
0604
0605
0606
0607 bool pci_has_p2pmem(struct pci_dev *pdev)
0608 {
0609 struct pci_p2pdma *p2pdma;
0610 bool res;
0611
0612 rcu_read_lock();
0613 p2pdma = rcu_dereference(pdev->p2pdma);
0614 res = p2pdma && p2pdma->p2pmem_published;
0615 rcu_read_unlock();
0616
0617 return res;
0618 }
0619 EXPORT_SYMBOL_GPL(pci_has_p2pmem);
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638 struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients)
0639 {
0640 struct pci_dev *pdev = NULL;
0641 int distance;
0642 int closest_distance = INT_MAX;
0643 struct pci_dev **closest_pdevs;
0644 int dev_cnt = 0;
0645 const int max_devs = PAGE_SIZE / sizeof(*closest_pdevs);
0646 int i;
0647
0648 closest_pdevs = kmalloc(PAGE_SIZE, GFP_KERNEL);
0649 if (!closest_pdevs)
0650 return NULL;
0651
0652 while ((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
0653 if (!pci_has_p2pmem(pdev))
0654 continue;
0655
0656 distance = pci_p2pdma_distance_many(pdev, clients,
0657 num_clients, false);
0658 if (distance < 0 || distance > closest_distance)
0659 continue;
0660
0661 if (distance == closest_distance && dev_cnt >= max_devs)
0662 continue;
0663
0664 if (distance < closest_distance) {
0665 for (i = 0; i < dev_cnt; i++)
0666 pci_dev_put(closest_pdevs[i]);
0667
0668 dev_cnt = 0;
0669 closest_distance = distance;
0670 }
0671
0672 closest_pdevs[dev_cnt++] = pci_dev_get(pdev);
0673 }
0674
0675 if (dev_cnt)
0676 pdev = pci_dev_get(closest_pdevs[prandom_u32_max(dev_cnt)]);
0677
0678 for (i = 0; i < dev_cnt; i++)
0679 pci_dev_put(closest_pdevs[i]);
0680
0681 kfree(closest_pdevs);
0682 return pdev;
0683 }
0684 EXPORT_SYMBOL_GPL(pci_p2pmem_find_many);
0685
0686
0687
0688
0689
0690
0691
0692
0693 void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size)
0694 {
0695 void *ret = NULL;
0696 struct percpu_ref *ref;
0697 struct pci_p2pdma *p2pdma;
0698
0699
0700
0701
0702
0703
0704 rcu_read_lock();
0705 p2pdma = rcu_dereference(pdev->p2pdma);
0706 if (unlikely(!p2pdma))
0707 goto out;
0708
0709 ret = (void *)gen_pool_alloc_owner(p2pdma->pool, size, (void **) &ref);
0710 if (!ret)
0711 goto out;
0712
0713 if (unlikely(!percpu_ref_tryget_live_rcu(ref))) {
0714 gen_pool_free(p2pdma->pool, (unsigned long) ret, size);
0715 ret = NULL;
0716 goto out;
0717 }
0718 out:
0719 rcu_read_unlock();
0720 return ret;
0721 }
0722 EXPORT_SYMBOL_GPL(pci_alloc_p2pmem);
0723
0724
0725
0726
0727
0728
0729
0730 void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size)
0731 {
0732 struct percpu_ref *ref;
0733 struct pci_p2pdma *p2pdma = rcu_dereference_protected(pdev->p2pdma, 1);
0734
0735 gen_pool_free_owner(p2pdma->pool, (uintptr_t)addr, size,
0736 (void **) &ref);
0737 percpu_ref_put(ref);
0738 }
0739 EXPORT_SYMBOL_GPL(pci_free_p2pmem);
0740
0741
0742
0743
0744
0745
0746
0747 pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev, void *addr)
0748 {
0749 struct pci_p2pdma *p2pdma;
0750
0751 if (!addr)
0752 return 0;
0753
0754 p2pdma = rcu_dereference_protected(pdev->p2pdma, 1);
0755 if (!p2pdma)
0756 return 0;
0757
0758
0759
0760
0761
0762
0763 return gen_pool_virt_to_phys(p2pdma->pool, (unsigned long)addr);
0764 }
0765 EXPORT_SYMBOL_GPL(pci_p2pmem_virt_to_bus);
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775 struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
0776 unsigned int *nents, u32 length)
0777 {
0778 struct scatterlist *sg;
0779 void *addr;
0780
0781 sg = kmalloc(sizeof(*sg), GFP_KERNEL);
0782 if (!sg)
0783 return NULL;
0784
0785 sg_init_table(sg, 1);
0786
0787 addr = pci_alloc_p2pmem(pdev, length);
0788 if (!addr)
0789 goto out_free_sg;
0790
0791 sg_set_buf(sg, addr, length);
0792 *nents = 1;
0793 return sg;
0794
0795 out_free_sg:
0796 kfree(sg);
0797 return NULL;
0798 }
0799 EXPORT_SYMBOL_GPL(pci_p2pmem_alloc_sgl);
0800
0801
0802
0803
0804
0805
0806 void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl)
0807 {
0808 struct scatterlist *sg;
0809 int count;
0810
0811 for_each_sg(sgl, sg, INT_MAX, count) {
0812 if (!sg)
0813 break;
0814
0815 pci_free_p2pmem(pdev, sg_virt(sg), sg->length);
0816 }
0817 kfree(sgl);
0818 }
0819 EXPORT_SYMBOL_GPL(pci_p2pmem_free_sgl);
0820
0821
0822
0823
0824
0825
0826
0827
0828
0829
0830
0831
0832 void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
0833 {
0834 struct pci_p2pdma *p2pdma;
0835
0836 rcu_read_lock();
0837 p2pdma = rcu_dereference(pdev->p2pdma);
0838 if (p2pdma)
0839 p2pdma->p2pmem_published = publish;
0840 rcu_read_unlock();
0841 }
0842 EXPORT_SYMBOL_GPL(pci_p2pmem_publish);
0843
0844 static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct dev_pagemap *pgmap,
0845 struct device *dev)
0846 {
0847 enum pci_p2pdma_map_type type = PCI_P2PDMA_MAP_NOT_SUPPORTED;
0848 struct pci_dev *provider = to_p2p_pgmap(pgmap)->provider;
0849 struct pci_dev *client;
0850 struct pci_p2pdma *p2pdma;
0851 int dist;
0852
0853 if (!provider->p2pdma)
0854 return PCI_P2PDMA_MAP_NOT_SUPPORTED;
0855
0856 if (!dev_is_pci(dev))
0857 return PCI_P2PDMA_MAP_NOT_SUPPORTED;
0858
0859 client = to_pci_dev(dev);
0860
0861 rcu_read_lock();
0862 p2pdma = rcu_dereference(provider->p2pdma);
0863
0864 if (p2pdma)
0865 type = xa_to_value(xa_load(&p2pdma->map_types,
0866 map_types_idx(client)));
0867 rcu_read_unlock();
0868
0869 if (type == PCI_P2PDMA_MAP_UNKNOWN)
0870 return calc_map_type_and_dist(provider, client, &dist, true);
0871
0872 return type;
0873 }
0874
0875
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886
0887
0888
0889
0890
0891
0892 enum pci_p2pdma_map_type
0893 pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
0894 struct scatterlist *sg)
0895 {
0896 if (state->pgmap != sg_page(sg)->pgmap) {
0897 state->pgmap = sg_page(sg)->pgmap;
0898 state->map = pci_p2pdma_map_type(state->pgmap, dev);
0899 state->bus_off = to_p2p_pgmap(state->pgmap)->bus_offset;
0900 }
0901
0902 if (state->map == PCI_P2PDMA_MAP_BUS_ADDR) {
0903 sg->dma_address = sg_phys(sg) + state->bus_off;
0904 sg_dma_len(sg) = sg->length;
0905 sg_dma_mark_bus_address(sg);
0906 }
0907
0908 return state->map;
0909 }
0910
0911
0912
0913
0914
0915
0916
0917
0918
0919
0920
0921
0922
0923
0924
0925
0926
0927
0928
0929
0930
0931 int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev,
0932 bool *use_p2pdma)
0933 {
0934 struct device *dev;
0935
0936 dev = bus_find_device_by_name(&pci_bus_type, NULL, page);
0937 if (dev) {
0938 *use_p2pdma = true;
0939 *p2p_dev = to_pci_dev(dev);
0940
0941 if (!pci_has_p2pmem(*p2p_dev)) {
0942 pci_err(*p2p_dev,
0943 "PCI device has no peer-to-peer memory: %s\n",
0944 page);
0945 pci_dev_put(*p2p_dev);
0946 return -ENODEV;
0947 }
0948
0949 return 0;
0950 } else if ((page[0] == '0' || page[0] == '1') && !iscntrl(page[1])) {
0951
0952
0953
0954
0955
0956
0957 } else if (!kstrtobool(page, use_p2pdma)) {
0958 return 0;
0959 }
0960
0961 pr_err("No such PCI device: %.*s\n", (int)strcspn(page, "\n"), page);
0962 return -ENODEV;
0963 }
0964 EXPORT_SYMBOL_GPL(pci_p2pdma_enable_store);
0965
0966
0967
0968
0969
0970
0971
0972
0973
0974
0975
0976
0977
0978 ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev,
0979 bool use_p2pdma)
0980 {
0981 if (!use_p2pdma)
0982 return sprintf(page, "0\n");
0983
0984 if (!p2p_dev)
0985 return sprintf(page, "1\n");
0986
0987 return sprintf(page, "%s\n", pci_name(p2p_dev));
0988 }
0989 EXPORT_SYMBOL_GPL(pci_p2pdma_enable_show);