0001
0002
0003
0004
0005
0006
0007 #include <linux/kernel.h>
0008 #include <linux/types.h>
0009 #include <linux/pci.h>
0010 #include <linux/init.h>
0011 #include <linux/slab.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/percpu.h>
0014 #include <linux/irq.h>
0015 #include <linux/msi.h>
0016 #include <linux/export.h>
0017 #include <linux/log2.h>
0018 #include <linux/of_device.h>
0019 #include <linux/dma-map-ops.h>
0020 #include <asm/iommu-common.h>
0021
0022 #include <asm/iommu.h>
0023 #include <asm/irq.h>
0024 #include <asm/hypervisor.h>
0025 #include <asm/prom.h>
0026
0027 #include "pci_impl.h"
0028 #include "iommu_common.h"
0029 #include "kernel.h"
0030
0031 #include "pci_sun4v.h"
0032
0033 #define DRIVER_NAME "pci_sun4v"
0034 #define PFX DRIVER_NAME ": "
0035
0036 static unsigned long vpci_major;
0037 static unsigned long vpci_minor;
0038
0039 struct vpci_version {
0040 unsigned long major;
0041 unsigned long minor;
0042 };
0043
0044
0045 static struct vpci_version vpci_versions[] = {
0046 { .major = 2, .minor = 0 },
0047 { .major = 1, .minor = 1 },
0048 };
0049
0050 static unsigned long vatu_major = 1;
0051 static unsigned long vatu_minor = 1;
0052
0053 #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
0054
0055 struct iommu_batch {
0056 struct device *dev;
0057 unsigned long prot;
0058 unsigned long entry;
0059 u64 *pglist;
0060 unsigned long npages;
0061 };
0062
0063 static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
0064 static int iommu_batch_initialized;
0065
0066
0067 static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
0068 {
0069 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
0070
0071 p->dev = dev;
0072 p->prot = prot;
0073 p->entry = entry;
0074 p->npages = 0;
0075 }
0076
0077 static inline bool iommu_use_atu(struct iommu *iommu, u64 mask)
0078 {
0079 return iommu->atu && mask > DMA_BIT_MASK(32);
0080 }
0081
0082
0083 static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
0084 {
0085 struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
0086 u64 *pglist = p->pglist;
0087 u64 index_count;
0088 unsigned long devhandle = pbm->devhandle;
0089 unsigned long prot = p->prot;
0090 unsigned long entry = p->entry;
0091 unsigned long npages = p->npages;
0092 unsigned long iotsb_num;
0093 unsigned long ret;
0094 long num;
0095
0096
0097 if (vpci_major < 2)
0098 prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
0099
0100 while (npages != 0) {
0101 if (!iommu_use_atu(pbm->iommu, mask)) {
0102 num = pci_sun4v_iommu_map(devhandle,
0103 HV_PCI_TSBID(0, entry),
0104 npages,
0105 prot,
0106 __pa(pglist));
0107 if (unlikely(num < 0)) {
0108 pr_err_ratelimited("%s: IOMMU map of [%08lx:%08llx:%lx:%lx:%lx] failed with status %ld\n",
0109 __func__,
0110 devhandle,
0111 HV_PCI_TSBID(0, entry),
0112 npages, prot, __pa(pglist),
0113 num);
0114 return -1;
0115 }
0116 } else {
0117 index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry),
0118 iotsb_num = pbm->iommu->atu->iotsb->iotsb_num;
0119 ret = pci_sun4v_iotsb_map(devhandle,
0120 iotsb_num,
0121 index_count,
0122 prot,
0123 __pa(pglist),
0124 &num);
0125 if (unlikely(ret != HV_EOK)) {
0126 pr_err_ratelimited("%s: ATU map of [%08lx:%lx:%llx:%lx:%lx] failed with status %ld\n",
0127 __func__,
0128 devhandle, iotsb_num,
0129 index_count, prot,
0130 __pa(pglist), ret);
0131 return -1;
0132 }
0133 }
0134 entry += num;
0135 npages -= num;
0136 pglist += num;
0137 }
0138
0139 p->entry = entry;
0140 p->npages = 0;
0141
0142 return 0;
0143 }
0144
0145 static inline void iommu_batch_new_entry(unsigned long entry, u64 mask)
0146 {
0147 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
0148
0149 if (p->entry + p->npages == entry)
0150 return;
0151 if (p->entry != ~0UL)
0152 iommu_batch_flush(p, mask);
0153 p->entry = entry;
0154 }
0155
0156
0157 static inline long iommu_batch_add(u64 phys_page, u64 mask)
0158 {
0159 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
0160
0161 BUG_ON(p->npages >= PGLIST_NENTS);
0162
0163 p->pglist[p->npages++] = phys_page;
0164 if (p->npages == PGLIST_NENTS)
0165 return iommu_batch_flush(p, mask);
0166
0167 return 0;
0168 }
0169
0170
0171 static inline long iommu_batch_end(u64 mask)
0172 {
0173 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
0174
0175 BUG_ON(p->npages >= PGLIST_NENTS);
0176
0177 return iommu_batch_flush(p, mask);
0178 }
0179
0180 static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
0181 dma_addr_t *dma_addrp, gfp_t gfp,
0182 unsigned long attrs)
0183 {
0184 u64 mask;
0185 unsigned long flags, order, first_page, npages, n;
0186 unsigned long prot = 0;
0187 struct iommu *iommu;
0188 struct iommu_map_table *tbl;
0189 struct page *page;
0190 void *ret;
0191 long entry;
0192 int nid;
0193
0194 size = IO_PAGE_ALIGN(size);
0195 order = get_order(size);
0196 if (unlikely(order >= MAX_ORDER))
0197 return NULL;
0198
0199 npages = size >> IO_PAGE_SHIFT;
0200
0201 if (attrs & DMA_ATTR_WEAK_ORDERING)
0202 prot = HV_PCI_MAP_ATTR_RELAXED_ORDER;
0203
0204 nid = dev->archdata.numa_node;
0205 page = alloc_pages_node(nid, gfp, order);
0206 if (unlikely(!page))
0207 return NULL;
0208
0209 first_page = (unsigned long) page_address(page);
0210 memset((char *)first_page, 0, PAGE_SIZE << order);
0211
0212 iommu = dev->archdata.iommu;
0213 mask = dev->coherent_dma_mask;
0214 if (!iommu_use_atu(iommu, mask))
0215 tbl = &iommu->tbl;
0216 else
0217 tbl = &iommu->atu->tbl;
0218
0219 entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
0220 (unsigned long)(-1), 0);
0221
0222 if (unlikely(entry == IOMMU_ERROR_CODE))
0223 goto range_alloc_fail;
0224
0225 *dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
0226 ret = (void *) first_page;
0227 first_page = __pa(first_page);
0228
0229 local_irq_save(flags);
0230
0231 iommu_batch_start(dev,
0232 (HV_PCI_MAP_ATTR_READ | prot |
0233 HV_PCI_MAP_ATTR_WRITE),
0234 entry);
0235
0236 for (n = 0; n < npages; n++) {
0237 long err = iommu_batch_add(first_page + (n * PAGE_SIZE), mask);
0238 if (unlikely(err < 0L))
0239 goto iommu_map_fail;
0240 }
0241
0242 if (unlikely(iommu_batch_end(mask) < 0L))
0243 goto iommu_map_fail;
0244
0245 local_irq_restore(flags);
0246
0247 return ret;
0248
0249 iommu_map_fail:
0250 local_irq_restore(flags);
0251 iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
0252
0253 range_alloc_fail:
0254 free_pages(first_page, order);
0255 return NULL;
0256 }
0257
0258 unsigned long dma_4v_iotsb_bind(unsigned long devhandle,
0259 unsigned long iotsb_num,
0260 struct pci_bus *bus_dev)
0261 {
0262 struct pci_dev *pdev;
0263 unsigned long err;
0264 unsigned int bus;
0265 unsigned int device;
0266 unsigned int fun;
0267
0268 list_for_each_entry(pdev, &bus_dev->devices, bus_list) {
0269 if (pdev->subordinate) {
0270
0271 dma_4v_iotsb_bind(devhandle, iotsb_num,
0272 pdev->subordinate);
0273 } else {
0274 bus = bus_dev->number;
0275 device = PCI_SLOT(pdev->devfn);
0276 fun = PCI_FUNC(pdev->devfn);
0277 err = pci_sun4v_iotsb_bind(devhandle, iotsb_num,
0278 HV_PCI_DEVICE_BUILD(bus,
0279 device,
0280 fun));
0281
0282
0283
0284
0285
0286
0287 if (err)
0288 return err;
0289 }
0290 }
0291
0292 return 0;
0293 }
0294
0295 static void dma_4v_iommu_demap(struct device *dev, unsigned long devhandle,
0296 dma_addr_t dvma, unsigned long iotsb_num,
0297 unsigned long entry, unsigned long npages)
0298 {
0299 unsigned long num, flags;
0300 unsigned long ret;
0301
0302 local_irq_save(flags);
0303 do {
0304 if (dvma <= DMA_BIT_MASK(32)) {
0305 num = pci_sun4v_iommu_demap(devhandle,
0306 HV_PCI_TSBID(0, entry),
0307 npages);
0308 } else {
0309 ret = pci_sun4v_iotsb_demap(devhandle, iotsb_num,
0310 entry, npages, &num);
0311 if (unlikely(ret != HV_EOK)) {
0312 pr_err_ratelimited("pci_iotsb_demap() failed with error: %ld\n",
0313 ret);
0314 }
0315 }
0316 entry += num;
0317 npages -= num;
0318 } while (npages != 0);
0319 local_irq_restore(flags);
0320 }
0321
0322 static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
0323 dma_addr_t dvma, unsigned long attrs)
0324 {
0325 struct pci_pbm_info *pbm;
0326 struct iommu *iommu;
0327 struct atu *atu;
0328 struct iommu_map_table *tbl;
0329 unsigned long order, npages, entry;
0330 unsigned long iotsb_num;
0331 u32 devhandle;
0332
0333 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
0334 iommu = dev->archdata.iommu;
0335 pbm = dev->archdata.host_controller;
0336 atu = iommu->atu;
0337 devhandle = pbm->devhandle;
0338
0339 if (!iommu_use_atu(iommu, dvma)) {
0340 tbl = &iommu->tbl;
0341 iotsb_num = 0;
0342 } else {
0343 tbl = &atu->tbl;
0344 iotsb_num = atu->iotsb->iotsb_num;
0345 }
0346 entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT);
0347 dma_4v_iommu_demap(dev, devhandle, dvma, iotsb_num, entry, npages);
0348 iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE);
0349 order = get_order(size);
0350 if (order < 10)
0351 free_pages((unsigned long)cpu, order);
0352 }
0353
0354 static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
0355 unsigned long offset, size_t sz,
0356 enum dma_data_direction direction,
0357 unsigned long attrs)
0358 {
0359 struct iommu *iommu;
0360 struct atu *atu;
0361 struct iommu_map_table *tbl;
0362 u64 mask;
0363 unsigned long flags, npages, oaddr;
0364 unsigned long i, base_paddr;
0365 unsigned long prot;
0366 dma_addr_t bus_addr, ret;
0367 long entry;
0368
0369 iommu = dev->archdata.iommu;
0370 atu = iommu->atu;
0371
0372 if (unlikely(direction == DMA_NONE))
0373 goto bad;
0374
0375 oaddr = (unsigned long)(page_address(page) + offset);
0376 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
0377 npages >>= IO_PAGE_SHIFT;
0378
0379 mask = *dev->dma_mask;
0380 if (!iommu_use_atu(iommu, mask))
0381 tbl = &iommu->tbl;
0382 else
0383 tbl = &atu->tbl;
0384
0385 entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
0386 (unsigned long)(-1), 0);
0387
0388 if (unlikely(entry == IOMMU_ERROR_CODE))
0389 goto bad;
0390
0391 bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
0392 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
0393 base_paddr = __pa(oaddr & IO_PAGE_MASK);
0394 prot = HV_PCI_MAP_ATTR_READ;
0395 if (direction != DMA_TO_DEVICE)
0396 prot |= HV_PCI_MAP_ATTR_WRITE;
0397
0398 if (attrs & DMA_ATTR_WEAK_ORDERING)
0399 prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
0400
0401 local_irq_save(flags);
0402
0403 iommu_batch_start(dev, prot, entry);
0404
0405 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
0406 long err = iommu_batch_add(base_paddr, mask);
0407 if (unlikely(err < 0L))
0408 goto iommu_map_fail;
0409 }
0410 if (unlikely(iommu_batch_end(mask) < 0L))
0411 goto iommu_map_fail;
0412
0413 local_irq_restore(flags);
0414
0415 return ret;
0416
0417 bad:
0418 if (printk_ratelimit())
0419 WARN_ON(1);
0420 return DMA_MAPPING_ERROR;
0421
0422 iommu_map_fail:
0423 local_irq_restore(flags);
0424 iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
0425 return DMA_MAPPING_ERROR;
0426 }
0427
0428 static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
0429 size_t sz, enum dma_data_direction direction,
0430 unsigned long attrs)
0431 {
0432 struct pci_pbm_info *pbm;
0433 struct iommu *iommu;
0434 struct atu *atu;
0435 struct iommu_map_table *tbl;
0436 unsigned long npages;
0437 unsigned long iotsb_num;
0438 long entry;
0439 u32 devhandle;
0440
0441 if (unlikely(direction == DMA_NONE)) {
0442 if (printk_ratelimit())
0443 WARN_ON(1);
0444 return;
0445 }
0446
0447 iommu = dev->archdata.iommu;
0448 pbm = dev->archdata.host_controller;
0449 atu = iommu->atu;
0450 devhandle = pbm->devhandle;
0451
0452 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
0453 npages >>= IO_PAGE_SHIFT;
0454 bus_addr &= IO_PAGE_MASK;
0455
0456 if (bus_addr <= DMA_BIT_MASK(32)) {
0457 iotsb_num = 0;
0458 tbl = &iommu->tbl;
0459 } else {
0460 iotsb_num = atu->iotsb->iotsb_num;
0461 tbl = &atu->tbl;
0462 }
0463 entry = (bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT;
0464 dma_4v_iommu_demap(dev, devhandle, bus_addr, iotsb_num, entry, npages);
0465 iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
0466 }
0467
0468 static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
0469 int nelems, enum dma_data_direction direction,
0470 unsigned long attrs)
0471 {
0472 struct scatterlist *s, *outs, *segstart;
0473 unsigned long flags, handle, prot;
0474 dma_addr_t dma_next = 0, dma_addr;
0475 unsigned int max_seg_size;
0476 unsigned long seg_boundary_size;
0477 int outcount, incount, i;
0478 struct iommu *iommu;
0479 struct atu *atu;
0480 struct iommu_map_table *tbl;
0481 u64 mask;
0482 unsigned long base_shift;
0483 long err;
0484
0485 BUG_ON(direction == DMA_NONE);
0486
0487 iommu = dev->archdata.iommu;
0488 if (nelems == 0 || !iommu)
0489 return -EINVAL;
0490 atu = iommu->atu;
0491
0492 prot = HV_PCI_MAP_ATTR_READ;
0493 if (direction != DMA_TO_DEVICE)
0494 prot |= HV_PCI_MAP_ATTR_WRITE;
0495
0496 if (attrs & DMA_ATTR_WEAK_ORDERING)
0497 prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
0498
0499 outs = s = segstart = &sglist[0];
0500 outcount = 1;
0501 incount = nelems;
0502 handle = 0;
0503
0504
0505 outs->dma_length = 0;
0506
0507 local_irq_save(flags);
0508
0509 iommu_batch_start(dev, prot, ~0UL);
0510
0511 max_seg_size = dma_get_max_seg_size(dev);
0512 seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT);
0513
0514 mask = *dev->dma_mask;
0515 if (!iommu_use_atu(iommu, mask))
0516 tbl = &iommu->tbl;
0517 else
0518 tbl = &atu->tbl;
0519
0520 base_shift = tbl->table_map_base >> IO_PAGE_SHIFT;
0521
0522 for_each_sg(sglist, s, nelems, i) {
0523 unsigned long paddr, npages, entry, out_entry = 0, slen;
0524
0525 slen = s->length;
0526
0527 if (slen == 0) {
0528 dma_next = 0;
0529 continue;
0530 }
0531
0532 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
0533 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
0534 entry = iommu_tbl_range_alloc(dev, tbl, npages,
0535 &handle, (unsigned long)(-1), 0);
0536
0537
0538 if (unlikely(entry == IOMMU_ERROR_CODE)) {
0539 pr_err_ratelimited("iommu_alloc failed, iommu %p paddr %lx npages %lx\n",
0540 tbl, paddr, npages);
0541 goto iommu_map_failed;
0542 }
0543
0544 iommu_batch_new_entry(entry, mask);
0545
0546
0547 dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT);
0548 dma_addr |= (s->offset & ~IO_PAGE_MASK);
0549
0550
0551 paddr &= IO_PAGE_MASK;
0552 while (npages--) {
0553 err = iommu_batch_add(paddr, mask);
0554 if (unlikely(err < 0L))
0555 goto iommu_map_failed;
0556 paddr += IO_PAGE_SIZE;
0557 }
0558
0559
0560 if (segstart != s) {
0561
0562
0563
0564 if ((dma_addr != dma_next) ||
0565 (outs->dma_length + s->length > max_seg_size) ||
0566 (is_span_boundary(out_entry, base_shift,
0567 seg_boundary_size, outs, s))) {
0568
0569 segstart = s;
0570 outcount++;
0571 outs = sg_next(outs);
0572 } else {
0573 outs->dma_length += s->length;
0574 }
0575 }
0576
0577 if (segstart == s) {
0578
0579 outs->dma_address = dma_addr;
0580 outs->dma_length = slen;
0581 out_entry = entry;
0582 }
0583
0584
0585 dma_next = dma_addr + slen;
0586 }
0587
0588 err = iommu_batch_end(mask);
0589
0590 if (unlikely(err < 0L))
0591 goto iommu_map_failed;
0592
0593 local_irq_restore(flags);
0594
0595 if (outcount < incount) {
0596 outs = sg_next(outs);
0597 outs->dma_length = 0;
0598 }
0599
0600 return outcount;
0601
0602 iommu_map_failed:
0603 for_each_sg(sglist, s, nelems, i) {
0604 if (s->dma_length != 0) {
0605 unsigned long vaddr, npages;
0606
0607 vaddr = s->dma_address & IO_PAGE_MASK;
0608 npages = iommu_num_pages(s->dma_address, s->dma_length,
0609 IO_PAGE_SIZE);
0610 iommu_tbl_range_free(tbl, vaddr, npages,
0611 IOMMU_ERROR_CODE);
0612
0613 s->dma_length = 0;
0614 }
0615 if (s == outs)
0616 break;
0617 }
0618 local_irq_restore(flags);
0619
0620 return -EINVAL;
0621 }
0622
0623 static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
0624 int nelems, enum dma_data_direction direction,
0625 unsigned long attrs)
0626 {
0627 struct pci_pbm_info *pbm;
0628 struct scatterlist *sg;
0629 struct iommu *iommu;
0630 struct atu *atu;
0631 unsigned long flags, entry;
0632 unsigned long iotsb_num;
0633 u32 devhandle;
0634
0635 BUG_ON(direction == DMA_NONE);
0636
0637 iommu = dev->archdata.iommu;
0638 pbm = dev->archdata.host_controller;
0639 atu = iommu->atu;
0640 devhandle = pbm->devhandle;
0641
0642 local_irq_save(flags);
0643
0644 sg = sglist;
0645 while (nelems--) {
0646 dma_addr_t dma_handle = sg->dma_address;
0647 unsigned int len = sg->dma_length;
0648 unsigned long npages;
0649 struct iommu_map_table *tbl;
0650 unsigned long shift = IO_PAGE_SHIFT;
0651
0652 if (!len)
0653 break;
0654 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
0655
0656 if (dma_handle <= DMA_BIT_MASK(32)) {
0657 iotsb_num = 0;
0658 tbl = &iommu->tbl;
0659 } else {
0660 iotsb_num = atu->iotsb->iotsb_num;
0661 tbl = &atu->tbl;
0662 }
0663 entry = ((dma_handle - tbl->table_map_base) >> shift);
0664 dma_4v_iommu_demap(dev, devhandle, dma_handle, iotsb_num,
0665 entry, npages);
0666 iommu_tbl_range_free(tbl, dma_handle, npages,
0667 IOMMU_ERROR_CODE);
0668 sg = sg_next(sg);
0669 }
0670
0671 local_irq_restore(flags);
0672 }
0673
0674 static int dma_4v_supported(struct device *dev, u64 device_mask)
0675 {
0676 struct iommu *iommu = dev->archdata.iommu;
0677
0678 if (ali_sound_dma_hack(dev, device_mask))
0679 return 1;
0680 if (device_mask < iommu->dma_addr_mask)
0681 return 0;
0682 return 1;
0683 }
0684
0685 static const struct dma_map_ops sun4v_dma_ops = {
0686 .alloc = dma_4v_alloc_coherent,
0687 .free = dma_4v_free_coherent,
0688 .map_page = dma_4v_map_page,
0689 .unmap_page = dma_4v_unmap_page,
0690 .map_sg = dma_4v_map_sg,
0691 .unmap_sg = dma_4v_unmap_sg,
0692 .dma_supported = dma_4v_supported,
0693 };
0694
0695 static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
0696 {
0697 struct property *prop;
0698 struct device_node *dp;
0699
0700 dp = pbm->op->dev.of_node;
0701 prop = of_find_property(dp, "66mhz-capable", NULL);
0702 pbm->is_66mhz_capable = (prop != NULL);
0703 pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
0704
0705
0706 }
0707
0708 static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
0709 struct iommu_map_table *iommu)
0710 {
0711 struct iommu_pool *pool;
0712 unsigned long i, pool_nr, cnt = 0;
0713 u32 devhandle;
0714
0715 devhandle = pbm->devhandle;
0716 for (pool_nr = 0; pool_nr < iommu->nr_pools; pool_nr++) {
0717 pool = &(iommu->pools[pool_nr]);
0718 for (i = pool->start; i <= pool->end; i++) {
0719 unsigned long ret, io_attrs, ra;
0720
0721 ret = pci_sun4v_iommu_getmap(devhandle,
0722 HV_PCI_TSBID(0, i),
0723 &io_attrs, &ra);
0724 if (ret == HV_EOK) {
0725 if (page_in_phys_avail(ra)) {
0726 pci_sun4v_iommu_demap(devhandle,
0727 HV_PCI_TSBID(0,
0728 i), 1);
0729 } else {
0730 cnt++;
0731 __set_bit(i, iommu->map);
0732 }
0733 }
0734 }
0735 }
0736 return cnt;
0737 }
0738
0739 static int pci_sun4v_atu_alloc_iotsb(struct pci_pbm_info *pbm)
0740 {
0741 struct atu *atu = pbm->iommu->atu;
0742 struct atu_iotsb *iotsb;
0743 void *table;
0744 u64 table_size;
0745 u64 iotsb_num;
0746 unsigned long order;
0747 unsigned long err;
0748
0749 iotsb = kzalloc(sizeof(*iotsb), GFP_KERNEL);
0750 if (!iotsb) {
0751 err = -ENOMEM;
0752 goto out_err;
0753 }
0754 atu->iotsb = iotsb;
0755
0756
0757 table_size = (atu->size / IO_PAGE_SIZE) * 8;
0758 order = get_order(table_size);
0759 table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
0760 if (!table) {
0761 err = -ENOMEM;
0762 goto table_failed;
0763 }
0764 iotsb->table = table;
0765 iotsb->ra = __pa(table);
0766 iotsb->dvma_size = atu->size;
0767 iotsb->dvma_base = atu->base;
0768 iotsb->table_size = table_size;
0769 iotsb->page_size = IO_PAGE_SIZE;
0770
0771
0772 err = pci_sun4v_iotsb_conf(pbm->devhandle,
0773 iotsb->ra,
0774 iotsb->table_size,
0775 iotsb->page_size,
0776 iotsb->dvma_base,
0777 &iotsb_num);
0778 if (err) {
0779 pr_err(PFX "pci_iotsb_conf failed error: %ld\n", err);
0780 goto iotsb_conf_failed;
0781 }
0782 iotsb->iotsb_num = iotsb_num;
0783
0784 err = dma_4v_iotsb_bind(pbm->devhandle, iotsb_num, pbm->pci_bus);
0785 if (err) {
0786 pr_err(PFX "pci_iotsb_bind failed error: %ld\n", err);
0787 goto iotsb_conf_failed;
0788 }
0789
0790 return 0;
0791
0792 iotsb_conf_failed:
0793 free_pages((unsigned long)table, order);
0794 table_failed:
0795 kfree(iotsb);
0796 out_err:
0797 return err;
0798 }
0799
0800 static int pci_sun4v_atu_init(struct pci_pbm_info *pbm)
0801 {
0802 struct atu *atu = pbm->iommu->atu;
0803 unsigned long err;
0804 const u64 *ranges;
0805 u64 map_size, num_iotte;
0806 u64 dma_mask;
0807 const u32 *page_size;
0808 int len;
0809
0810 ranges = of_get_property(pbm->op->dev.of_node, "iommu-address-ranges",
0811 &len);
0812 if (!ranges) {
0813 pr_err(PFX "No iommu-address-ranges\n");
0814 return -EINVAL;
0815 }
0816
0817 page_size = of_get_property(pbm->op->dev.of_node, "iommu-pagesizes",
0818 NULL);
0819 if (!page_size) {
0820 pr_err(PFX "No iommu-pagesizes\n");
0821 return -EINVAL;
0822 }
0823
0824
0825
0826
0827
0828
0829
0830
0831
0832
0833
0834 atu->ranges = (struct atu_ranges *)ranges;
0835 atu->base = atu->ranges[3].base;
0836 atu->size = ATU_64_SPACE_SIZE;
0837
0838
0839 err = pci_sun4v_atu_alloc_iotsb(pbm);
0840 if (err) {
0841 pr_err(PFX "Error creating ATU IOTSB\n");
0842 return err;
0843 }
0844
0845
0846
0847
0848 dma_mask = (roundup_pow_of_two(atu->size) - 1UL);
0849 num_iotte = atu->size / IO_PAGE_SIZE;
0850 map_size = num_iotte / 8;
0851 atu->tbl.table_map_base = atu->base;
0852 atu->dma_addr_mask = dma_mask;
0853 atu->tbl.map = kzalloc(map_size, GFP_KERNEL);
0854 if (!atu->tbl.map)
0855 return -ENOMEM;
0856
0857 iommu_tbl_pool_init(&atu->tbl, num_iotte, IO_PAGE_SHIFT,
0858 NULL, false ,
0859 0 ,
0860 false );
0861
0862 return 0;
0863 }
0864
0865 static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
0866 {
0867 static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
0868 struct iommu *iommu = pbm->iommu;
0869 unsigned long num_tsb_entries, sz;
0870 u32 dma_mask, dma_offset;
0871 const u32 *vdma;
0872
0873 vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL);
0874 if (!vdma)
0875 vdma = vdma_default;
0876
0877 if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
0878 printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n",
0879 vdma[0], vdma[1]);
0880 return -EINVAL;
0881 }
0882
0883 dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
0884 num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
0885
0886 dma_offset = vdma[0];
0887
0888
0889 spin_lock_init(&iommu->lock);
0890 iommu->ctx_lowest_free = 1;
0891 iommu->tbl.table_map_base = dma_offset;
0892 iommu->dma_addr_mask = dma_mask;
0893
0894
0895 sz = (num_tsb_entries + 7) / 8;
0896 sz = (sz + 7UL) & ~7UL;
0897 iommu->tbl.map = kzalloc(sz, GFP_KERNEL);
0898 if (!iommu->tbl.map) {
0899 printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
0900 return -ENOMEM;
0901 }
0902 iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
0903 NULL, false ,
0904 0 ,
0905 false );
0906 sz = probe_existing_entries(pbm, &iommu->tbl);
0907 if (sz)
0908 printk("%s: Imported %lu TSB entries from OBP\n",
0909 pbm->name, sz);
0910
0911 return 0;
0912 }
0913
0914 #ifdef CONFIG_PCI_MSI
0915 struct pci_sun4v_msiq_entry {
0916 u64 version_type;
0917 #define MSIQ_VERSION_MASK 0xffffffff00000000UL
0918 #define MSIQ_VERSION_SHIFT 32
0919 #define MSIQ_TYPE_MASK 0x00000000000000ffUL
0920 #define MSIQ_TYPE_SHIFT 0
0921 #define MSIQ_TYPE_NONE 0x00
0922 #define MSIQ_TYPE_MSG 0x01
0923 #define MSIQ_TYPE_MSI32 0x02
0924 #define MSIQ_TYPE_MSI64 0x03
0925 #define MSIQ_TYPE_INTX 0x08
0926 #define MSIQ_TYPE_NONE2 0xff
0927
0928 u64 intx_sysino;
0929 u64 reserved1;
0930 u64 stick;
0931 u64 req_id;
0932 #define MSIQ_REQID_BUS_MASK 0xff00UL
0933 #define MSIQ_REQID_BUS_SHIFT 8
0934 #define MSIQ_REQID_DEVICE_MASK 0x00f8UL
0935 #define MSIQ_REQID_DEVICE_SHIFT 3
0936 #define MSIQ_REQID_FUNC_MASK 0x0007UL
0937 #define MSIQ_REQID_FUNC_SHIFT 0
0938
0939 u64 msi_address;
0940
0941
0942
0943
0944
0945
0946
0947
0948
0949
0950
0951
0952
0953
0954 u64 msi_data;
0955
0956 u64 reserved2;
0957 };
0958
0959 static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
0960 unsigned long *head)
0961 {
0962 unsigned long err, limit;
0963
0964 err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
0965 if (unlikely(err))
0966 return -ENXIO;
0967
0968 limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
0969 if (unlikely(*head >= limit))
0970 return -EFBIG;
0971
0972 return 0;
0973 }
0974
0975 static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
0976 unsigned long msiqid, unsigned long *head,
0977 unsigned long *msi)
0978 {
0979 struct pci_sun4v_msiq_entry *ep;
0980 unsigned long err, type;
0981
0982
0983 ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
0984 (pbm->msiq_ent_count *
0985 sizeof(struct pci_sun4v_msiq_entry))) +
0986 *head);
0987
0988 if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
0989 return 0;
0990
0991 type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
0992 if (unlikely(type != MSIQ_TYPE_MSI32 &&
0993 type != MSIQ_TYPE_MSI64))
0994 return -EINVAL;
0995
0996 *msi = ep->msi_data;
0997
0998 err = pci_sun4v_msi_setstate(pbm->devhandle,
0999 ep->msi_data ,
1000 HV_MSISTATE_IDLE);
1001 if (unlikely(err))
1002 return -ENXIO;
1003
1004
1005 ep->version_type &= ~MSIQ_TYPE_MASK;
1006
1007 (*head) += sizeof(struct pci_sun4v_msiq_entry);
1008 if (*head >=
1009 (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
1010 *head = 0;
1011
1012 return 1;
1013 }
1014
1015 static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
1016 unsigned long head)
1017 {
1018 unsigned long err;
1019
1020 err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
1021 if (unlikely(err))
1022 return -EINVAL;
1023
1024 return 0;
1025 }
1026
1027 static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
1028 unsigned long msi, int is_msi64)
1029 {
1030 if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
1031 (is_msi64 ?
1032 HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
1033 return -ENXIO;
1034 if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
1035 return -ENXIO;
1036 if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
1037 return -ENXIO;
1038 return 0;
1039 }
1040
1041 static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
1042 {
1043 unsigned long err, msiqid;
1044
1045 err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
1046 if (err)
1047 return -ENXIO;
1048
1049 pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
1050
1051 return 0;
1052 }
1053
1054 static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
1055 {
1056 unsigned long q_size, alloc_size, pages, order;
1057 int i;
1058
1059 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
1060 alloc_size = (pbm->msiq_num * q_size);
1061 order = get_order(alloc_size);
1062 pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
1063 if (pages == 0UL) {
1064 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
1065 order);
1066 return -ENOMEM;
1067 }
1068 memset((char *)pages, 0, PAGE_SIZE << order);
1069 pbm->msi_queues = (void *) pages;
1070
1071 for (i = 0; i < pbm->msiq_num; i++) {
1072 unsigned long err, base = __pa(pages + (i * q_size));
1073 unsigned long ret1, ret2;
1074
1075 err = pci_sun4v_msiq_conf(pbm->devhandle,
1076 pbm->msiq_first + i,
1077 base, pbm->msiq_ent_count);
1078 if (err) {
1079 printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
1080 err);
1081 goto h_error;
1082 }
1083
1084 err = pci_sun4v_msiq_info(pbm->devhandle,
1085 pbm->msiq_first + i,
1086 &ret1, &ret2);
1087 if (err) {
1088 printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
1089 err);
1090 goto h_error;
1091 }
1092 if (ret1 != base || ret2 != pbm->msiq_ent_count) {
1093 printk(KERN_ERR "MSI: Bogus qconf "
1094 "expected[%lx:%x] got[%lx:%lx]\n",
1095 base, pbm->msiq_ent_count,
1096 ret1, ret2);
1097 goto h_error;
1098 }
1099 }
1100
1101 return 0;
1102
1103 h_error:
1104 free_pages(pages, order);
1105 return -EINVAL;
1106 }
1107
1108 static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
1109 {
1110 unsigned long q_size, alloc_size, pages, order;
1111 int i;
1112
1113 for (i = 0; i < pbm->msiq_num; i++) {
1114 unsigned long msiqid = pbm->msiq_first + i;
1115
1116 (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
1117 }
1118
1119 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
1120 alloc_size = (pbm->msiq_num * q_size);
1121 order = get_order(alloc_size);
1122
1123 pages = (unsigned long) pbm->msi_queues;
1124
1125 free_pages(pages, order);
1126
1127 pbm->msi_queues = NULL;
1128 }
1129
1130 static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
1131 unsigned long msiqid,
1132 unsigned long devino)
1133 {
1134 unsigned int irq = sun4v_build_irq(pbm->devhandle, devino);
1135
1136 if (!irq)
1137 return -ENOMEM;
1138
1139 if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
1140 return -EINVAL;
1141 if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
1142 return -EINVAL;
1143
1144 return irq;
1145 }
1146
1147 static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
1148 .get_head = pci_sun4v_get_head,
1149 .dequeue_msi = pci_sun4v_dequeue_msi,
1150 .set_head = pci_sun4v_set_head,
1151 .msi_setup = pci_sun4v_msi_setup,
1152 .msi_teardown = pci_sun4v_msi_teardown,
1153 .msiq_alloc = pci_sun4v_msiq_alloc,
1154 .msiq_free = pci_sun4v_msiq_free,
1155 .msiq_build_irq = pci_sun4v_msiq_build_irq,
1156 };
1157
1158 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
1159 {
1160 sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
1161 }
1162 #else
1163 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
1164 {
1165 }
1166 #endif
1167
1168 static int pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
1169 struct platform_device *op, u32 devhandle)
1170 {
1171 struct device_node *dp = op->dev.of_node;
1172 int err;
1173
1174 pbm->numa_node = of_node_to_nid(dp);
1175
1176 pbm->pci_ops = &sun4v_pci_ops;
1177 pbm->config_space_reg_bits = 12;
1178
1179 pbm->index = pci_num_pbms++;
1180
1181 pbm->op = op;
1182
1183 pbm->devhandle = devhandle;
1184
1185 pbm->name = dp->full_name;
1186
1187 printk("%s: SUN4V PCI Bus Module\n", pbm->name);
1188 printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
1189
1190 pci_determine_mem_io_space(pbm);
1191
1192 pci_get_pbm_props(pbm);
1193
1194 err = pci_sun4v_iommu_init(pbm);
1195 if (err)
1196 return err;
1197
1198 pci_sun4v_msi_init(pbm);
1199
1200 pci_sun4v_scan_bus(pbm, &op->dev);
1201
1202
1203
1204
1205 if (pbm->iommu->atu) {
1206 err = pci_sun4v_atu_init(pbm);
1207 if (err) {
1208 kfree(pbm->iommu->atu);
1209 pbm->iommu->atu = NULL;
1210 pr_err(PFX "ATU init failed, err=%d\n", err);
1211 }
1212 }
1213
1214 pbm->next = pci_pbm_root;
1215 pci_pbm_root = pbm;
1216
1217 return 0;
1218 }
1219
1220 static int pci_sun4v_probe(struct platform_device *op)
1221 {
1222 const struct linux_prom64_registers *regs;
1223 static int hvapi_negotiated = 0;
1224 struct pci_pbm_info *pbm;
1225 struct device_node *dp;
1226 struct iommu *iommu;
1227 struct atu *atu;
1228 u32 devhandle;
1229 int i, err = -ENODEV;
1230 static bool hv_atu = true;
1231
1232 dp = op->dev.of_node;
1233
1234 if (!hvapi_negotiated++) {
1235 for (i = 0; i < ARRAY_SIZE(vpci_versions); i++) {
1236 vpci_major = vpci_versions[i].major;
1237 vpci_minor = vpci_versions[i].minor;
1238
1239 err = sun4v_hvapi_register(HV_GRP_PCI, vpci_major,
1240 &vpci_minor);
1241 if (!err)
1242 break;
1243 }
1244
1245 if (err) {
1246 pr_err(PFX "Could not register hvapi, err=%d\n", err);
1247 return err;
1248 }
1249 pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n",
1250 vpci_major, vpci_minor);
1251
1252 err = sun4v_hvapi_register(HV_GRP_ATU, vatu_major, &vatu_minor);
1253 if (err) {
1254
1255
1256
1257 hv_atu = false;
1258 } else {
1259 pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n",
1260 vatu_major, vatu_minor);
1261 }
1262
1263 dma_ops = &sun4v_dma_ops;
1264 }
1265
1266 regs = of_get_property(dp, "reg", NULL);
1267 err = -ENODEV;
1268 if (!regs) {
1269 printk(KERN_ERR PFX "Could not find config registers\n");
1270 goto out_err;
1271 }
1272 devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
1273
1274 err = -ENOMEM;
1275 if (!iommu_batch_initialized) {
1276 for_each_possible_cpu(i) {
1277 unsigned long page = get_zeroed_page(GFP_KERNEL);
1278
1279 if (!page)
1280 goto out_err;
1281
1282 per_cpu(iommu_batch, i).pglist = (u64 *) page;
1283 }
1284 iommu_batch_initialized = 1;
1285 }
1286
1287 pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
1288 if (!pbm) {
1289 printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n");
1290 goto out_err;
1291 }
1292
1293 iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
1294 if (!iommu) {
1295 printk(KERN_ERR PFX "Could not allocate pbm iommu\n");
1296 goto out_free_controller;
1297 }
1298
1299 pbm->iommu = iommu;
1300 iommu->atu = NULL;
1301 if (hv_atu) {
1302 atu = kzalloc(sizeof(*atu), GFP_KERNEL);
1303 if (!atu)
1304 pr_err(PFX "Could not allocate atu\n");
1305 else
1306 iommu->atu = atu;
1307 }
1308
1309 err = pci_sun4v_pbm_init(pbm, op, devhandle);
1310 if (err)
1311 goto out_free_iommu;
1312
1313 dev_set_drvdata(&op->dev, pbm);
1314
1315 return 0;
1316
1317 out_free_iommu:
1318 kfree(iommu->atu);
1319 kfree(pbm->iommu);
1320
1321 out_free_controller:
1322 kfree(pbm);
1323
1324 out_err:
1325 return err;
1326 }
1327
1328 static const struct of_device_id pci_sun4v_match[] = {
1329 {
1330 .name = "pci",
1331 .compatible = "SUNW,sun4v-pci",
1332 },
1333 {},
1334 };
1335
1336 static struct platform_driver pci_sun4v_driver = {
1337 .driver = {
1338 .name = DRIVER_NAME,
1339 .of_match_table = pci_sun4v_match,
1340 },
1341 .probe = pci_sun4v_probe,
1342 };
1343
1344 static int __init pci_sun4v_init(void)
1345 {
1346 return platform_driver_register(&pci_sun4v_driver);
1347 }
1348
1349 subsys_initcall(pci_sun4v_init);