0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/cpu.h>
0014 #include <linux/types.h>
0015 #include <linux/delay.h>
0016 #include <linux/stat.h>
0017 #include <linux/device.h>
0018 #include <linux/init.h>
0019 #include <linux/slab.h>
0020 #include <linux/console.h>
0021 #include <linux/export.h>
0022 #include <linux/mm.h>
0023 #include <linux/dma-map-ops.h>
0024 #include <linux/kobject.h>
0025 #include <linux/kexec.h>
0026 #include <linux/of_irq.h>
0027
0028 #include <asm/iommu.h>
0029 #include <asm/dma.h>
0030 #include <asm/vio.h>
0031 #include <asm/prom.h>
0032 #include <asm/firmware.h>
0033 #include <asm/tce.h>
0034 #include <asm/page.h>
0035 #include <asm/hvcall.h>
0036 #include <asm/machdep.h>
0037
0038 static struct vio_dev vio_bus_device = {
0039 .name = "vio",
0040 .type = "",
0041 .dev.init_name = "vio",
0042 .dev.bus = &vio_bus_type,
0043 };
0044
0045 #ifdef CONFIG_PPC_SMLPAR
0046
0047
0048
0049
0050
0051
0052 struct vio_cmo_pool {
0053 size_t size;
0054 size_t free;
0055 };
0056
0057
0058 #define VIO_CMO_BALANCE_DELAY 100
0059
0060
0061 #define VIO_CMO_BALANCE_CHUNK 131072
0062
0063
0064
0065
0066
0067
0068
0069 struct vio_cmo_dev_entry {
0070 struct vio_dev *viodev;
0071 struct list_head list;
0072 };
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089 static struct vio_cmo {
0090 spinlock_t lock;
0091 struct delayed_work balance_q;
0092 struct list_head device_list;
0093 size_t entitled;
0094 struct vio_cmo_pool reserve;
0095 struct vio_cmo_pool excess;
0096 size_t spare;
0097 size_t min;
0098 size_t desired;
0099 size_t curr;
0100 size_t high;
0101 } vio_cmo;
0102
0103
0104
0105
0106 static int vio_cmo_num_OF_devs(void)
0107 {
0108 struct device_node *node_vroot;
0109 int count = 0;
0110
0111
0112
0113
0114
0115 node_vroot = of_find_node_by_name(NULL, "vdevice");
0116 if (node_vroot) {
0117 struct device_node *of_node;
0118 struct property *prop;
0119
0120 for_each_child_of_node(node_vroot, of_node) {
0121 prop = of_find_property(of_node, "ibm,my-dma-window",
0122 NULL);
0123 if (prop)
0124 count++;
0125 }
0126 }
0127 of_node_put(node_vroot);
0128 return count;
0129 }
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145 static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size)
0146 {
0147 unsigned long flags;
0148 size_t reserve_free = 0;
0149 size_t excess_free = 0;
0150 int ret = -ENOMEM;
0151
0152 spin_lock_irqsave(&vio_cmo.lock, flags);
0153
0154
0155 if (viodev->cmo.entitled > viodev->cmo.allocated)
0156 reserve_free = viodev->cmo.entitled - viodev->cmo.allocated;
0157
0158
0159 if (vio_cmo.spare >= VIO_CMO_MIN_ENT)
0160 excess_free = vio_cmo.excess.free;
0161
0162
0163 if ((reserve_free + excess_free) >= size) {
0164 vio_cmo.curr += size;
0165 if (vio_cmo.curr > vio_cmo.high)
0166 vio_cmo.high = vio_cmo.curr;
0167 viodev->cmo.allocated += size;
0168 size -= min(reserve_free, size);
0169 vio_cmo.excess.free -= size;
0170 ret = 0;
0171 }
0172
0173 spin_unlock_irqrestore(&vio_cmo.lock, flags);
0174 return ret;
0175 }
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189 static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size)
0190 {
0191 unsigned long flags;
0192 size_t spare_needed = 0;
0193 size_t excess_freed = 0;
0194 size_t reserve_freed = size;
0195 size_t tmp;
0196 int balance = 0;
0197
0198 spin_lock_irqsave(&vio_cmo.lock, flags);
0199 vio_cmo.curr -= size;
0200
0201
0202 if (viodev->cmo.allocated > viodev->cmo.entitled) {
0203 excess_freed = min(reserve_freed, (viodev->cmo.allocated -
0204 viodev->cmo.entitled));
0205 reserve_freed -= excess_freed;
0206 }
0207
0208
0209 viodev->cmo.allocated -= (reserve_freed + excess_freed);
0210
0211
0212 spare_needed = VIO_CMO_MIN_ENT - vio_cmo.spare;
0213
0214
0215
0216
0217
0218 if (spare_needed && excess_freed) {
0219 tmp = min(excess_freed, spare_needed);
0220 vio_cmo.excess.size -= tmp;
0221 vio_cmo.reserve.size += tmp;
0222 vio_cmo.spare += tmp;
0223 excess_freed -= tmp;
0224 spare_needed -= tmp;
0225 balance = 1;
0226 }
0227
0228
0229
0230
0231
0232
0233
0234 if (spare_needed && reserve_freed) {
0235 tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT));
0236
0237 vio_cmo.spare += tmp;
0238 viodev->cmo.entitled -= tmp;
0239 reserve_freed -= tmp;
0240 spare_needed -= tmp;
0241 balance = 1;
0242 }
0243
0244
0245
0246
0247
0248
0249 if (excess_freed && (vio_cmo.desired > vio_cmo.reserve.size)) {
0250 tmp = min(excess_freed, (vio_cmo.desired - vio_cmo.reserve.size));
0251
0252 vio_cmo.excess.size -= tmp;
0253 vio_cmo.reserve.size += tmp;
0254 excess_freed -= tmp;
0255 balance = 1;
0256 }
0257
0258
0259 if (excess_freed)
0260 vio_cmo.excess.free += excess_freed;
0261
0262 if (balance)
0263 schedule_delayed_work(&vio_cmo.balance_q, VIO_CMO_BALANCE_DELAY);
0264 spin_unlock_irqrestore(&vio_cmo.lock, flags);
0265 }
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278 int vio_cmo_entitlement_update(size_t new_entitlement)
0279 {
0280 struct vio_dev *viodev;
0281 struct vio_cmo_dev_entry *dev_ent;
0282 unsigned long flags;
0283 size_t avail, delta, tmp;
0284
0285 spin_lock_irqsave(&vio_cmo.lock, flags);
0286
0287
0288 if (new_entitlement > vio_cmo.entitled) {
0289 delta = new_entitlement - vio_cmo.entitled;
0290
0291
0292 if (vio_cmo.spare < VIO_CMO_MIN_ENT) {
0293 tmp = min(delta, (VIO_CMO_MIN_ENT - vio_cmo.spare));
0294 vio_cmo.spare += tmp;
0295 vio_cmo.reserve.size += tmp;
0296 delta -= tmp;
0297 }
0298
0299
0300 vio_cmo.entitled += delta;
0301 vio_cmo.excess.size += delta;
0302 vio_cmo.excess.free += delta;
0303
0304 goto out;
0305 }
0306
0307
0308 delta = vio_cmo.entitled - new_entitlement;
0309 avail = vio_cmo.excess.free;
0310
0311
0312
0313
0314
0315 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
0316 if (avail >= delta)
0317 break;
0318
0319 viodev = dev_ent->viodev;
0320 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
0321 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
0322 avail += viodev->cmo.entitled -
0323 max_t(size_t, viodev->cmo.allocated,
0324 VIO_CMO_MIN_ENT);
0325 }
0326
0327 if (delta <= avail) {
0328 vio_cmo.entitled -= delta;
0329
0330
0331 tmp = min(vio_cmo.excess.free, delta);
0332 vio_cmo.excess.size -= tmp;
0333 vio_cmo.excess.free -= tmp;
0334 delta -= tmp;
0335
0336
0337
0338
0339
0340 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
0341 if (!delta)
0342 break;
0343
0344 viodev = dev_ent->viodev;
0345 tmp = 0;
0346 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
0347 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
0348 tmp = viodev->cmo.entitled -
0349 max_t(size_t, viodev->cmo.allocated,
0350 VIO_CMO_MIN_ENT);
0351 viodev->cmo.entitled -= min(tmp, delta);
0352 delta -= min(tmp, delta);
0353 }
0354 } else {
0355 spin_unlock_irqrestore(&vio_cmo.lock, flags);
0356 return -ENOMEM;
0357 }
0358
0359 out:
0360 schedule_delayed_work(&vio_cmo.balance_q, 0);
0361 spin_unlock_irqrestore(&vio_cmo.lock, flags);
0362 return 0;
0363 }
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386 static void vio_cmo_balance(struct work_struct *work)
0387 {
0388 struct vio_cmo *cmo;
0389 struct vio_dev *viodev;
0390 struct vio_cmo_dev_entry *dev_ent;
0391 unsigned long flags;
0392 size_t avail = 0, level, chunk, need;
0393 int devcount = 0, fulfilled;
0394
0395 cmo = container_of(work, struct vio_cmo, balance_q.work);
0396
0397 spin_lock_irqsave(&vio_cmo.lock, flags);
0398
0399
0400 cmo->min = vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT;
0401 BUG_ON(cmo->min > cmo->entitled);
0402 cmo->spare = min_t(size_t, VIO_CMO_MIN_ENT, (cmo->entitled - cmo->min));
0403 cmo->min += cmo->spare;
0404 cmo->desired = cmo->min;
0405
0406
0407
0408
0409
0410 avail = cmo->entitled - cmo->spare;
0411 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
0412 viodev = dev_ent->viodev;
0413 devcount++;
0414 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
0415 cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT);
0416 avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT);
0417 }
0418
0419
0420
0421
0422
0423
0424 level = VIO_CMO_MIN_ENT;
0425 while (avail) {
0426 fulfilled = 0;
0427 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
0428 viodev = dev_ent->viodev;
0429
0430 if (viodev->cmo.desired <= level) {
0431 fulfilled++;
0432 continue;
0433 }
0434
0435
0436
0437
0438
0439
0440 chunk = min_t(size_t, avail, VIO_CMO_BALANCE_CHUNK);
0441 chunk = min(chunk, (viodev->cmo.desired -
0442 viodev->cmo.entitled));
0443 viodev->cmo.entitled += chunk;
0444
0445
0446
0447
0448
0449
0450 need = max(viodev->cmo.allocated, viodev->cmo.entitled)-
0451 max(viodev->cmo.allocated, level);
0452 avail -= need;
0453
0454 }
0455 if (fulfilled == devcount)
0456 break;
0457 level += VIO_CMO_BALANCE_CHUNK;
0458 }
0459
0460
0461 cmo->reserve.size = cmo->min;
0462 cmo->excess.free = 0;
0463 cmo->excess.size = 0;
0464 need = 0;
0465 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
0466 viodev = dev_ent->viodev;
0467
0468 if (viodev->cmo.entitled)
0469 cmo->reserve.size += (viodev->cmo.entitled -
0470 VIO_CMO_MIN_ENT);
0471
0472 if (viodev->cmo.allocated > viodev->cmo.entitled)
0473 need += viodev->cmo.allocated - viodev->cmo.entitled;
0474 }
0475 cmo->excess.size = cmo->entitled - cmo->reserve.size;
0476 cmo->excess.free = cmo->excess.size - need;
0477
0478 cancel_delayed_work(to_delayed_work(work));
0479 spin_unlock_irqrestore(&vio_cmo.lock, flags);
0480 }
0481
0482 static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
0483 dma_addr_t *dma_handle, gfp_t flag,
0484 unsigned long attrs)
0485 {
0486 struct vio_dev *viodev = to_vio_dev(dev);
0487 void *ret;
0488
0489 if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) {
0490 atomic_inc(&viodev->cmo.allocs_failed);
0491 return NULL;
0492 }
0493
0494 ret = iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
0495 dma_handle, dev->coherent_dma_mask, flag,
0496 dev_to_node(dev));
0497 if (unlikely(ret == NULL)) {
0498 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
0499 atomic_inc(&viodev->cmo.allocs_failed);
0500 }
0501
0502 return ret;
0503 }
0504
0505 static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
0506 void *vaddr, dma_addr_t dma_handle,
0507 unsigned long attrs)
0508 {
0509 struct vio_dev *viodev = to_vio_dev(dev);
0510
0511 iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
0512 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
0513 }
0514
0515 static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
0516 unsigned long offset, size_t size,
0517 enum dma_data_direction direction,
0518 unsigned long attrs)
0519 {
0520 struct vio_dev *viodev = to_vio_dev(dev);
0521 struct iommu_table *tbl = get_iommu_table_base(dev);
0522 dma_addr_t ret = DMA_MAPPING_ERROR;
0523
0524 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))))
0525 goto out_fail;
0526 ret = iommu_map_page(dev, tbl, page, offset, size, dma_get_mask(dev),
0527 direction, attrs);
0528 if (unlikely(ret == DMA_MAPPING_ERROR))
0529 goto out_deallocate;
0530 return ret;
0531
0532 out_deallocate:
0533 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
0534 out_fail:
0535 atomic_inc(&viodev->cmo.allocs_failed);
0536 return DMA_MAPPING_ERROR;
0537 }
0538
0539 static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
0540 size_t size,
0541 enum dma_data_direction direction,
0542 unsigned long attrs)
0543 {
0544 struct vio_dev *viodev = to_vio_dev(dev);
0545 struct iommu_table *tbl = get_iommu_table_base(dev);
0546
0547 iommu_unmap_page(tbl, dma_handle, size, direction, attrs);
0548 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
0549 }
0550
0551 static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
0552 int nelems, enum dma_data_direction direction,
0553 unsigned long attrs)
0554 {
0555 struct vio_dev *viodev = to_vio_dev(dev);
0556 struct iommu_table *tbl = get_iommu_table_base(dev);
0557 struct scatterlist *sgl;
0558 int ret, count;
0559 size_t alloc_size = 0;
0560
0561 for_each_sg(sglist, sgl, nelems, count)
0562 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl));
0563
0564 ret = vio_cmo_alloc(viodev, alloc_size);
0565 if (ret)
0566 goto out_fail;
0567 ret = ppc_iommu_map_sg(dev, tbl, sglist, nelems, dma_get_mask(dev),
0568 direction, attrs);
0569 if (unlikely(!ret))
0570 goto out_deallocate;
0571
0572 for_each_sg(sglist, sgl, ret, count)
0573 alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
0574 if (alloc_size)
0575 vio_cmo_dealloc(viodev, alloc_size);
0576 return ret;
0577
0578 out_deallocate:
0579 vio_cmo_dealloc(viodev, alloc_size);
0580 out_fail:
0581 atomic_inc(&viodev->cmo.allocs_failed);
0582 return ret;
0583 }
0584
0585 static void vio_dma_iommu_unmap_sg(struct device *dev,
0586 struct scatterlist *sglist, int nelems,
0587 enum dma_data_direction direction,
0588 unsigned long attrs)
0589 {
0590 struct vio_dev *viodev = to_vio_dev(dev);
0591 struct iommu_table *tbl = get_iommu_table_base(dev);
0592 struct scatterlist *sgl;
0593 size_t alloc_size = 0;
0594 int count;
0595
0596 for_each_sg(sglist, sgl, nelems, count)
0597 alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
0598
0599 ppc_iommu_unmap_sg(tbl, sglist, nelems, direction, attrs);
0600 vio_cmo_dealloc(viodev, alloc_size);
0601 }
0602
0603 static const struct dma_map_ops vio_dma_mapping_ops = {
0604 .alloc = vio_dma_iommu_alloc_coherent,
0605 .free = vio_dma_iommu_free_coherent,
0606 .map_sg = vio_dma_iommu_map_sg,
0607 .unmap_sg = vio_dma_iommu_unmap_sg,
0608 .map_page = vio_dma_iommu_map_page,
0609 .unmap_page = vio_dma_iommu_unmap_page,
0610 .dma_supported = dma_iommu_dma_supported,
0611 .get_required_mask = dma_iommu_get_required_mask,
0612 .mmap = dma_common_mmap,
0613 .get_sgtable = dma_common_get_sgtable,
0614 .alloc_pages = dma_common_alloc_pages,
0615 .free_pages = dma_common_free_pages,
0616 };
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627
0628 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired)
0629 {
0630 unsigned long flags;
0631 struct vio_cmo_dev_entry *dev_ent;
0632 int found = 0;
0633
0634 if (!firmware_has_feature(FW_FEATURE_CMO))
0635 return;
0636
0637 spin_lock_irqsave(&vio_cmo.lock, flags);
0638 if (desired < VIO_CMO_MIN_ENT)
0639 desired = VIO_CMO_MIN_ENT;
0640
0641
0642
0643
0644
0645
0646 list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
0647 if (viodev == dev_ent->viodev) {
0648 found = 1;
0649 break;
0650 }
0651 if (!found) {
0652 spin_unlock_irqrestore(&vio_cmo.lock, flags);
0653 return;
0654 }
0655
0656
0657 if (desired >= viodev->cmo.desired) {
0658
0659 vio_cmo.desired += desired - viodev->cmo.desired;
0660 viodev->cmo.desired = desired;
0661 } else {
0662
0663 vio_cmo.desired -= viodev->cmo.desired - desired;
0664 viodev->cmo.desired = desired;
0665
0666
0667
0668
0669 if (viodev->cmo.entitled > desired) {
0670 vio_cmo.reserve.size -= viodev->cmo.entitled - desired;
0671 vio_cmo.excess.size += viodev->cmo.entitled - desired;
0672
0673
0674
0675
0676
0677 if (viodev->cmo.allocated < viodev->cmo.entitled)
0678 vio_cmo.excess.free += viodev->cmo.entitled -
0679 max(viodev->cmo.allocated, desired);
0680 viodev->cmo.entitled = desired;
0681 }
0682 }
0683 schedule_delayed_work(&vio_cmo.balance_q, 0);
0684 spin_unlock_irqrestore(&vio_cmo.lock, flags);
0685 }
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697
0698
0699
0700
0701 static int vio_cmo_bus_probe(struct vio_dev *viodev)
0702 {
0703 struct vio_cmo_dev_entry *dev_ent;
0704 struct device *dev = &viodev->dev;
0705 struct iommu_table *tbl;
0706 struct vio_driver *viodrv = to_vio_driver(dev->driver);
0707 unsigned long flags;
0708 size_t size;
0709 bool dma_capable = false;
0710
0711 tbl = get_iommu_table_base(dev);
0712
0713
0714 switch (viodev->family) {
0715 case VDEVICE:
0716 if (of_get_property(viodev->dev.of_node,
0717 "ibm,my-dma-window", NULL))
0718 dma_capable = true;
0719 break;
0720 case PFO:
0721 dma_capable = false;
0722 break;
0723 default:
0724 dev_warn(dev, "unknown device family: %d\n", viodev->family);
0725 BUG();
0726 break;
0727 }
0728
0729
0730 if (dma_capable) {
0731
0732 if (!viodrv->get_desired_dma) {
0733 dev_err(dev, "%s: device driver does not support CMO\n",
0734 __func__);
0735 return -EINVAL;
0736 }
0737
0738 viodev->cmo.desired =
0739 IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev), tbl);
0740 if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
0741 viodev->cmo.desired = VIO_CMO_MIN_ENT;
0742 size = VIO_CMO_MIN_ENT;
0743
0744 dev_ent = kmalloc(sizeof(struct vio_cmo_dev_entry),
0745 GFP_KERNEL);
0746 if (!dev_ent)
0747 return -ENOMEM;
0748
0749 dev_ent->viodev = viodev;
0750 spin_lock_irqsave(&vio_cmo.lock, flags);
0751 list_add(&dev_ent->list, &vio_cmo.device_list);
0752 } else {
0753 viodev->cmo.desired = 0;
0754 size = 0;
0755 spin_lock_irqsave(&vio_cmo.lock, flags);
0756 }
0757
0758
0759
0760
0761
0762
0763
0764 if (vio_cmo.min == ((vio_cmo_num_OF_devs() + 1) *
0765 VIO_CMO_MIN_ENT)) {
0766
0767 if (size)
0768 vio_cmo.desired += (viodev->cmo.desired -
0769 VIO_CMO_MIN_ENT);
0770 } else {
0771 size_t tmp;
0772
0773 tmp = vio_cmo.spare + vio_cmo.excess.free;
0774 if (tmp < size) {
0775 dev_err(dev, "%s: insufficient free "
0776 "entitlement to add device. "
0777 "Need %lu, have %lu\n", __func__,
0778 size, (vio_cmo.spare + tmp));
0779 spin_unlock_irqrestore(&vio_cmo.lock, flags);
0780 return -ENOMEM;
0781 }
0782
0783
0784 tmp = min(size, vio_cmo.excess.free);
0785 vio_cmo.excess.free -= tmp;
0786 vio_cmo.excess.size -= tmp;
0787 vio_cmo.reserve.size += tmp;
0788
0789
0790 vio_cmo.spare -= size - tmp;
0791
0792
0793 vio_cmo.min += size;
0794 vio_cmo.desired += viodev->cmo.desired;
0795 }
0796 spin_unlock_irqrestore(&vio_cmo.lock, flags);
0797 return 0;
0798 }
0799
0800
0801
0802
0803
0804
0805
0806
0807
0808
0809
0810 static void vio_cmo_bus_remove(struct vio_dev *viodev)
0811 {
0812 struct vio_cmo_dev_entry *dev_ent;
0813 unsigned long flags;
0814 size_t tmp;
0815
0816 spin_lock_irqsave(&vio_cmo.lock, flags);
0817 if (viodev->cmo.allocated) {
0818 dev_err(&viodev->dev, "%s: device had %lu bytes of IO "
0819 "allocated after remove operation.\n",
0820 __func__, viodev->cmo.allocated);
0821 BUG();
0822 }
0823
0824
0825
0826
0827
0828 list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
0829 if (viodev == dev_ent->viodev) {
0830 list_del(&dev_ent->list);
0831 kfree(dev_ent);
0832 break;
0833 }
0834
0835
0836
0837
0838
0839
0840 if (viodev->cmo.entitled) {
0841
0842
0843
0844
0845
0846 vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT);
0847
0848
0849
0850
0851
0852
0853 viodev->cmo.entitled -= VIO_CMO_MIN_ENT;
0854
0855
0856 if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) {
0857 tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT -
0858 vio_cmo.spare));
0859 vio_cmo.spare += tmp;
0860 viodev->cmo.entitled -= tmp;
0861 }
0862
0863
0864 vio_cmo.excess.size += viodev->cmo.entitled;
0865 vio_cmo.excess.free += viodev->cmo.entitled;
0866 vio_cmo.reserve.size -= viodev->cmo.entitled;
0867
0868
0869
0870
0871
0872
0873 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
0874 viodev->cmo.desired = VIO_CMO_MIN_ENT;
0875 atomic_set(&viodev->cmo.allocs_failed, 0);
0876 }
0877
0878 spin_unlock_irqrestore(&vio_cmo.lock, flags);
0879 }
0880
0881 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
0882 {
0883 set_dma_ops(&viodev->dev, &vio_dma_mapping_ops);
0884 }
0885
0886
0887
0888
0889
0890
0891
0892
0893 static void vio_cmo_bus_init(void)
0894 {
0895 struct hvcall_mpp_data mpp_data;
0896 int err;
0897
0898 memset(&vio_cmo, 0, sizeof(struct vio_cmo));
0899 spin_lock_init(&vio_cmo.lock);
0900 INIT_LIST_HEAD(&vio_cmo.device_list);
0901 INIT_DELAYED_WORK(&vio_cmo.balance_q, vio_cmo_balance);
0902
0903
0904 err = h_get_mpp(&mpp_data);
0905
0906
0907
0908
0909
0910 if (err != H_SUCCESS) {
0911 printk(KERN_ERR "%s: unable to determine system IO "\
0912 "entitlement. (%d)\n", __func__, err);
0913 vio_cmo.entitled = 0;
0914 } else {
0915 vio_cmo.entitled = mpp_data.entitled_mem;
0916 }
0917
0918
0919 vio_cmo.spare = VIO_CMO_MIN_ENT;
0920 vio_cmo.reserve.size = vio_cmo.spare;
0921 vio_cmo.reserve.size += (vio_cmo_num_OF_devs() *
0922 VIO_CMO_MIN_ENT);
0923 if (vio_cmo.reserve.size > vio_cmo.entitled) {
0924 printk(KERN_ERR "%s: insufficient system entitlement\n",
0925 __func__);
0926 panic("%s: Insufficient system entitlement", __func__);
0927 }
0928
0929
0930 vio_cmo.excess.size = vio_cmo.entitled - vio_cmo.reserve.size;
0931 vio_cmo.excess.free = vio_cmo.excess.size;
0932 vio_cmo.min = vio_cmo.reserve.size;
0933 vio_cmo.desired = vio_cmo.reserve.size;
0934 }
0935
0936
0937
0938 #define viodev_cmo_rd_attr(name) \
0939 static ssize_t cmo_##name##_show(struct device *dev, \
0940 struct device_attribute *attr, \
0941 char *buf) \
0942 { \
0943 return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name); \
0944 }
0945
0946 static ssize_t cmo_allocs_failed_show(struct device *dev,
0947 struct device_attribute *attr, char *buf)
0948 {
0949 struct vio_dev *viodev = to_vio_dev(dev);
0950 return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed));
0951 }
0952
0953 static ssize_t cmo_allocs_failed_store(struct device *dev,
0954 struct device_attribute *attr, const char *buf, size_t count)
0955 {
0956 struct vio_dev *viodev = to_vio_dev(dev);
0957 atomic_set(&viodev->cmo.allocs_failed, 0);
0958 return count;
0959 }
0960
0961 static ssize_t cmo_desired_store(struct device *dev,
0962 struct device_attribute *attr, const char *buf, size_t count)
0963 {
0964 struct vio_dev *viodev = to_vio_dev(dev);
0965 size_t new_desired;
0966 int ret;
0967
0968 ret = kstrtoul(buf, 10, &new_desired);
0969 if (ret)
0970 return ret;
0971
0972 vio_cmo_set_dev_desired(viodev, new_desired);
0973 return count;
0974 }
0975
0976 viodev_cmo_rd_attr(desired);
0977 viodev_cmo_rd_attr(entitled);
0978 viodev_cmo_rd_attr(allocated);
0979
0980 static ssize_t name_show(struct device *, struct device_attribute *, char *);
0981 static ssize_t devspec_show(struct device *, struct device_attribute *, char *);
0982 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
0983 char *buf);
0984
0985 static struct device_attribute dev_attr_name;
0986 static struct device_attribute dev_attr_devspec;
0987 static struct device_attribute dev_attr_modalias;
0988
0989 static DEVICE_ATTR_RO(cmo_entitled);
0990 static DEVICE_ATTR_RO(cmo_allocated);
0991 static DEVICE_ATTR_RW(cmo_desired);
0992 static DEVICE_ATTR_RW(cmo_allocs_failed);
0993
0994 static struct attribute *vio_cmo_dev_attrs[] = {
0995 &dev_attr_name.attr,
0996 &dev_attr_devspec.attr,
0997 &dev_attr_modalias.attr,
0998 &dev_attr_cmo_entitled.attr,
0999 &dev_attr_cmo_allocated.attr,
1000 &dev_attr_cmo_desired.attr,
1001 &dev_attr_cmo_allocs_failed.attr,
1002 NULL,
1003 };
1004 ATTRIBUTE_GROUPS(vio_cmo_dev);
1005
1006
1007
1008 #define viobus_cmo_rd_attr(name) \
1009 static ssize_t cmo_bus_##name##_show(struct bus_type *bt, char *buf) \
1010 { \
1011 return sprintf(buf, "%lu\n", vio_cmo.name); \
1012 } \
1013 static struct bus_attribute bus_attr_cmo_bus_##name = \
1014 __ATTR(cmo_##name, S_IRUGO, cmo_bus_##name##_show, NULL)
1015
1016 #define viobus_cmo_pool_rd_attr(name, var) \
1017 static ssize_t \
1018 cmo_##name##_##var##_show(struct bus_type *bt, char *buf) \
1019 { \
1020 return sprintf(buf, "%lu\n", vio_cmo.name.var); \
1021 } \
1022 static BUS_ATTR_RO(cmo_##name##_##var)
1023
1024 viobus_cmo_rd_attr(entitled);
1025 viobus_cmo_rd_attr(spare);
1026 viobus_cmo_rd_attr(min);
1027 viobus_cmo_rd_attr(desired);
1028 viobus_cmo_rd_attr(curr);
1029 viobus_cmo_pool_rd_attr(reserve, size);
1030 viobus_cmo_pool_rd_attr(excess, size);
1031 viobus_cmo_pool_rd_attr(excess, free);
1032
1033 static ssize_t cmo_high_show(struct bus_type *bt, char *buf)
1034 {
1035 return sprintf(buf, "%lu\n", vio_cmo.high);
1036 }
1037
1038 static ssize_t cmo_high_store(struct bus_type *bt, const char *buf,
1039 size_t count)
1040 {
1041 unsigned long flags;
1042
1043 spin_lock_irqsave(&vio_cmo.lock, flags);
1044 vio_cmo.high = vio_cmo.curr;
1045 spin_unlock_irqrestore(&vio_cmo.lock, flags);
1046
1047 return count;
1048 }
1049 static BUS_ATTR_RW(cmo_high);
1050
1051 static struct attribute *vio_bus_attrs[] = {
1052 &bus_attr_cmo_bus_entitled.attr,
1053 &bus_attr_cmo_bus_spare.attr,
1054 &bus_attr_cmo_bus_min.attr,
1055 &bus_attr_cmo_bus_desired.attr,
1056 &bus_attr_cmo_bus_curr.attr,
1057 &bus_attr_cmo_high.attr,
1058 &bus_attr_cmo_reserve_size.attr,
1059 &bus_attr_cmo_excess_size.attr,
1060 &bus_attr_cmo_excess_free.attr,
1061 NULL,
1062 };
1063 ATTRIBUTE_GROUPS(vio_bus);
1064
1065 static void __init vio_cmo_sysfs_init(void)
1066 {
1067 vio_bus_type.dev_groups = vio_cmo_dev_groups;
1068 vio_bus_type.bus_groups = vio_bus_groups;
1069 }
1070 #else
1071 int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; }
1072 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {}
1073 static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; }
1074 static void vio_cmo_bus_remove(struct vio_dev *viodev) {}
1075 static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {}
1076 static void vio_cmo_bus_init(void) {}
1077 static void __init vio_cmo_sysfs_init(void) { }
1078 #endif
1079 EXPORT_SYMBOL(vio_cmo_entitlement_update);
1080 EXPORT_SYMBOL(vio_cmo_set_dev_desired);
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112 int vio_h_cop_sync(struct vio_dev *vdev, struct vio_pfo_op *op)
1113 {
1114 struct device *dev = &vdev->dev;
1115 unsigned long deadline = 0;
1116 long hret = 0;
1117 int ret = 0;
1118
1119 if (op->timeout)
1120 deadline = jiffies + msecs_to_jiffies(op->timeout);
1121
1122 while (true) {
1123 hret = plpar_hcall_norets(H_COP, op->flags,
1124 vdev->resource_id,
1125 op->in, op->inlen, op->out,
1126 op->outlen, op->csbcpb);
1127
1128 if (hret == H_SUCCESS ||
1129 (hret != H_NOT_ENOUGH_RESOURCES &&
1130 hret != H_BUSY && hret != H_RESOURCE) ||
1131 (op->timeout && time_after(deadline, jiffies)))
1132 break;
1133
1134 dev_dbg(dev, "%s: hcall ret(%ld), retrying.\n", __func__, hret);
1135 }
1136
1137 switch (hret) {
1138 case H_SUCCESS:
1139 ret = 0;
1140 break;
1141 case H_OP_MODE:
1142 case H_TOO_BIG:
1143 ret = -E2BIG;
1144 break;
1145 case H_RESCINDED:
1146 ret = -EACCES;
1147 break;
1148 case H_HARDWARE:
1149 ret = -EPERM;
1150 break;
1151 case H_NOT_ENOUGH_RESOURCES:
1152 case H_RESOURCE:
1153 case H_BUSY:
1154 ret = -EBUSY;
1155 break;
1156 default:
1157 ret = -EINVAL;
1158 break;
1159 }
1160
1161 if (ret)
1162 dev_dbg(dev, "%s: Sync h_cop_op failure (ret:%d) (hret:%ld)\n",
1163 __func__, ret, hret);
1164
1165 op->hcall_err = hret;
1166 return ret;
1167 }
1168 EXPORT_SYMBOL(vio_h_cop_sync);
1169
1170 static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
1171 {
1172 const __be32 *dma_window;
1173 struct iommu_table *tbl;
1174 unsigned long offset, size;
1175
1176 dma_window = of_get_property(dev->dev.of_node,
1177 "ibm,my-dma-window", NULL);
1178 if (!dma_window)
1179 return NULL;
1180
1181 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
1182 if (tbl == NULL)
1183 return NULL;
1184
1185 kref_init(&tbl->it_kref);
1186
1187 of_parse_dma_window(dev->dev.of_node, dma_window,
1188 &tbl->it_index, &offset, &size);
1189
1190
1191 tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
1192 tbl->it_size = size >> tbl->it_page_shift;
1193
1194 tbl->it_offset = offset >> tbl->it_page_shift;
1195 tbl->it_busno = 0;
1196 tbl->it_type = TCE_VB;
1197 tbl->it_blocksize = 16;
1198
1199 if (firmware_has_feature(FW_FEATURE_LPAR))
1200 tbl->it_ops = &iommu_table_lpar_multi_ops;
1201 else
1202 tbl->it_ops = &iommu_table_pseries_ops;
1203
1204 return iommu_init_table(tbl, -1, 0, 0);
1205 }
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217 static const struct vio_device_id *vio_match_device(
1218 const struct vio_device_id *ids, const struct vio_dev *dev)
1219 {
1220 while (ids->type[0] != '\0') {
1221 if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) &&
1222 of_device_is_compatible(dev->dev.of_node,
1223 ids->compat))
1224 return ids;
1225 ids++;
1226 }
1227 return NULL;
1228 }
1229
1230
1231
1232
1233
1234
1235 static int vio_bus_probe(struct device *dev)
1236 {
1237 struct vio_dev *viodev = to_vio_dev(dev);
1238 struct vio_driver *viodrv = to_vio_driver(dev->driver);
1239 const struct vio_device_id *id;
1240 int error = -ENODEV;
1241
1242 if (!viodrv->probe)
1243 return error;
1244
1245 id = vio_match_device(viodrv->id_table, viodev);
1246 if (id) {
1247 memset(&viodev->cmo, 0, sizeof(viodev->cmo));
1248 if (firmware_has_feature(FW_FEATURE_CMO)) {
1249 error = vio_cmo_bus_probe(viodev);
1250 if (error)
1251 return error;
1252 }
1253 error = viodrv->probe(viodev, id);
1254 if (error && firmware_has_feature(FW_FEATURE_CMO))
1255 vio_cmo_bus_remove(viodev);
1256 }
1257
1258 return error;
1259 }
1260
1261
1262 static void vio_bus_remove(struct device *dev)
1263 {
1264 struct vio_dev *viodev = to_vio_dev(dev);
1265 struct vio_driver *viodrv = to_vio_driver(dev->driver);
1266 struct device *devptr;
1267
1268
1269
1270
1271
1272 devptr = get_device(dev);
1273
1274 if (viodrv->remove)
1275 viodrv->remove(viodev);
1276
1277 if (firmware_has_feature(FW_FEATURE_CMO))
1278 vio_cmo_bus_remove(viodev);
1279
1280 put_device(devptr);
1281 }
1282
1283 static void vio_bus_shutdown(struct device *dev)
1284 {
1285 struct vio_dev *viodev = to_vio_dev(dev);
1286 struct vio_driver *viodrv;
1287
1288 if (dev->driver) {
1289 viodrv = to_vio_driver(dev->driver);
1290 if (viodrv->shutdown)
1291 viodrv->shutdown(viodev);
1292 else if (kexec_in_progress)
1293 vio_bus_remove(dev);
1294 }
1295 }
1296
1297
1298
1299
1300
1301 int __vio_register_driver(struct vio_driver *viodrv, struct module *owner,
1302 const char *mod_name)
1303 {
1304
1305 if (!machine_is(pseries))
1306 return -ENODEV;
1307
1308 pr_debug("%s: driver %s registering\n", __func__, viodrv->name);
1309
1310
1311 viodrv->driver.name = viodrv->name;
1312 viodrv->driver.pm = viodrv->pm;
1313 viodrv->driver.bus = &vio_bus_type;
1314 viodrv->driver.owner = owner;
1315 viodrv->driver.mod_name = mod_name;
1316
1317 return driver_register(&viodrv->driver);
1318 }
1319 EXPORT_SYMBOL(__vio_register_driver);
1320
1321
1322
1323
1324
1325 void vio_unregister_driver(struct vio_driver *viodrv)
1326 {
1327 driver_unregister(&viodrv->driver);
1328 }
1329 EXPORT_SYMBOL(vio_unregister_driver);
1330
1331
1332 static void vio_dev_release(struct device *dev)
1333 {
1334 struct iommu_table *tbl = get_iommu_table_base(dev);
1335
1336 if (tbl)
1337 iommu_tce_table_put(tbl);
1338 of_node_put(dev->of_node);
1339 kfree(to_vio_dev(dev));
1340 }
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351 struct vio_dev *vio_register_device_node(struct device_node *of_node)
1352 {
1353 struct vio_dev *viodev;
1354 struct device_node *parent_node;
1355 const __be32 *prop;
1356 enum vio_dev_family family;
1357
1358
1359
1360
1361
1362 parent_node = of_get_parent(of_node);
1363 if (parent_node) {
1364 if (of_node_is_type(parent_node, "ibm,platform-facilities"))
1365 family = PFO;
1366 else if (of_node_is_type(parent_node, "vdevice"))
1367 family = VDEVICE;
1368 else {
1369 pr_warn("%s: parent(%pOF) of %pOFn not recognized.\n",
1370 __func__,
1371 parent_node,
1372 of_node);
1373 of_node_put(parent_node);
1374 return NULL;
1375 }
1376 of_node_put(parent_node);
1377 } else {
1378 pr_warn("%s: could not determine the parent of node %pOFn.\n",
1379 __func__, of_node);
1380 return NULL;
1381 }
1382
1383 if (family == PFO) {
1384 if (of_get_property(of_node, "interrupt-controller", NULL)) {
1385 pr_debug("%s: Skipping the interrupt controller %pOFn.\n",
1386 __func__, of_node);
1387 return NULL;
1388 }
1389 }
1390
1391
1392 viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL);
1393 if (viodev == NULL) {
1394 pr_warn("%s: allocation failure for VIO device.\n", __func__);
1395 return NULL;
1396 }
1397
1398
1399 viodev->family = family;
1400 if (viodev->family == VDEVICE) {
1401 unsigned int unit_address;
1402
1403 viodev->type = of_node_get_device_type(of_node);
1404 if (!viodev->type) {
1405 pr_warn("%s: node %pOFn is missing the 'device_type' "
1406 "property.\n", __func__, of_node);
1407 goto out;
1408 }
1409
1410 prop = of_get_property(of_node, "reg", NULL);
1411 if (prop == NULL) {
1412 pr_warn("%s: node %pOFn missing 'reg'\n",
1413 __func__, of_node);
1414 goto out;
1415 }
1416 unit_address = of_read_number(prop, 1);
1417 dev_set_name(&viodev->dev, "%x", unit_address);
1418 viodev->irq = irq_of_parse_and_map(of_node, 0);
1419 viodev->unit_address = unit_address;
1420 } else {
1421
1422
1423
1424 prop = of_get_property(of_node, "ibm,resource-id", NULL);
1425 if (prop != NULL)
1426 viodev->resource_id = of_read_number(prop, 1);
1427
1428 dev_set_name(&viodev->dev, "%pOFn", of_node);
1429 viodev->type = dev_name(&viodev->dev);
1430 viodev->irq = 0;
1431 }
1432
1433 viodev->name = of_node->name;
1434 viodev->dev.of_node = of_node_get(of_node);
1435
1436 set_dev_node(&viodev->dev, of_node_to_nid(of_node));
1437
1438
1439 viodev->dev.parent = &vio_bus_device.dev;
1440 viodev->dev.bus = &vio_bus_type;
1441 viodev->dev.release = vio_dev_release;
1442
1443 if (of_get_property(viodev->dev.of_node, "ibm,my-dma-window", NULL)) {
1444 if (firmware_has_feature(FW_FEATURE_CMO))
1445 vio_cmo_set_dma_ops(viodev);
1446 else
1447 set_dma_ops(&viodev->dev, &dma_iommu_ops);
1448
1449 set_iommu_table_base(&viodev->dev,
1450 vio_build_iommu_table(viodev));
1451
1452
1453
1454 viodev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
1455 viodev->dev.dma_mask = &viodev->dev.coherent_dma_mask;
1456 }
1457
1458
1459 if (device_register(&viodev->dev)) {
1460 printk(KERN_ERR "%s: failed to register device %s\n",
1461 __func__, dev_name(&viodev->dev));
1462 put_device(&viodev->dev);
1463 return NULL;
1464 }
1465
1466 return viodev;
1467
1468 out:
1469 kfree(viodev);
1470
1471 return NULL;
1472 }
1473 EXPORT_SYMBOL(vio_register_device_node);
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483 static void __init vio_bus_scan_register_devices(char *root_name)
1484 {
1485 struct device_node *node_root, *node_child;
1486
1487 if (!root_name)
1488 return;
1489
1490 node_root = of_find_node_by_name(NULL, root_name);
1491 if (node_root) {
1492
1493
1494
1495
1496
1497 node_child = of_get_next_child(node_root, NULL);
1498 while (node_child) {
1499 vio_register_device_node(node_child);
1500 node_child = of_get_next_child(node_root, node_child);
1501 }
1502 of_node_put(node_root);
1503 }
1504 }
1505
1506
1507
1508
1509 static int __init vio_bus_init(void)
1510 {
1511 int err;
1512
1513 if (firmware_has_feature(FW_FEATURE_CMO))
1514 vio_cmo_sysfs_init();
1515
1516 err = bus_register(&vio_bus_type);
1517 if (err) {
1518 printk(KERN_ERR "failed to register VIO bus\n");
1519 return err;
1520 }
1521
1522
1523
1524
1525
1526 err = device_register(&vio_bus_device.dev);
1527 if (err) {
1528 printk(KERN_WARNING "%s: device_register returned %i\n",
1529 __func__, err);
1530 return err;
1531 }
1532
1533 if (firmware_has_feature(FW_FEATURE_CMO))
1534 vio_cmo_bus_init();
1535
1536 return 0;
1537 }
1538 machine_postcore_initcall(pseries, vio_bus_init);
1539
1540 static int __init vio_device_init(void)
1541 {
1542 vio_bus_scan_register_devices("vdevice");
1543 vio_bus_scan_register_devices("ibm,platform-facilities");
1544
1545 return 0;
1546 }
1547 machine_device_initcall(pseries, vio_device_init);
1548
1549 static ssize_t name_show(struct device *dev,
1550 struct device_attribute *attr, char *buf)
1551 {
1552 return sprintf(buf, "%s\n", to_vio_dev(dev)->name);
1553 }
1554 static DEVICE_ATTR_RO(name);
1555
1556 static ssize_t devspec_show(struct device *dev,
1557 struct device_attribute *attr, char *buf)
1558 {
1559 struct device_node *of_node = dev->of_node;
1560
1561 return sprintf(buf, "%pOF\n", of_node);
1562 }
1563 static DEVICE_ATTR_RO(devspec);
1564
1565 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
1566 char *buf)
1567 {
1568 const struct vio_dev *vio_dev = to_vio_dev(dev);
1569 struct device_node *dn;
1570 const char *cp;
1571
1572 dn = dev->of_node;
1573 if (!dn) {
1574 strcpy(buf, "\n");
1575 return strlen(buf);
1576 }
1577 cp = of_get_property(dn, "compatible", NULL);
1578 if (!cp) {
1579 strcpy(buf, "\n");
1580 return strlen(buf);
1581 }
1582
1583 return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
1584 }
1585 static DEVICE_ATTR_RO(modalias);
1586
1587 static struct attribute *vio_dev_attrs[] = {
1588 &dev_attr_name.attr,
1589 &dev_attr_devspec.attr,
1590 &dev_attr_modalias.attr,
1591 NULL,
1592 };
1593 ATTRIBUTE_GROUPS(vio_dev);
1594
1595 void vio_unregister_device(struct vio_dev *viodev)
1596 {
1597 device_unregister(&viodev->dev);
1598 if (viodev->family == VDEVICE)
1599 irq_dispose_mapping(viodev->irq);
1600 }
1601 EXPORT_SYMBOL(vio_unregister_device);
1602
1603 static int vio_bus_match(struct device *dev, struct device_driver *drv)
1604 {
1605 const struct vio_dev *vio_dev = to_vio_dev(dev);
1606 struct vio_driver *vio_drv = to_vio_driver(drv);
1607 const struct vio_device_id *ids = vio_drv->id_table;
1608
1609 return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL);
1610 }
1611
1612 static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env)
1613 {
1614 const struct vio_dev *vio_dev = to_vio_dev(dev);
1615 struct device_node *dn;
1616 const char *cp;
1617
1618 dn = dev->of_node;
1619 if (!dn)
1620 return -ENODEV;
1621 cp = of_get_property(dn, "compatible", NULL);
1622 if (!cp)
1623 return -ENODEV;
1624
1625 add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp);
1626 return 0;
1627 }
1628
1629 struct bus_type vio_bus_type = {
1630 .name = "vio",
1631 .dev_groups = vio_dev_groups,
1632 .uevent = vio_hotplug,
1633 .match = vio_bus_match,
1634 .probe = vio_bus_probe,
1635 .remove = vio_bus_remove,
1636 .shutdown = vio_bus_shutdown,
1637 };
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648 const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length)
1649 {
1650 return of_get_property(vdev->dev.of_node, which, length);
1651 }
1652 EXPORT_SYMBOL(vio_get_attribute);
1653
1654
1655
1656
1657 static struct vio_dev *vio_find_name(const char *name)
1658 {
1659 struct device *found;
1660
1661 found = bus_find_device_by_name(&vio_bus_type, NULL, name);
1662 if (!found)
1663 return NULL;
1664
1665 return to_vio_dev(found);
1666 }
1667
1668
1669
1670
1671
1672
1673
1674
1675 struct vio_dev *vio_find_node(struct device_node *vnode)
1676 {
1677 char kobj_name[20];
1678 struct device_node *vnode_parent;
1679
1680 vnode_parent = of_get_parent(vnode);
1681 if (!vnode_parent)
1682 return NULL;
1683
1684
1685 if (of_node_is_type(vnode_parent, "vdevice")) {
1686 const __be32 *prop;
1687
1688 prop = of_get_property(vnode, "reg", NULL);
1689 if (!prop)
1690 goto out;
1691 snprintf(kobj_name, sizeof(kobj_name), "%x",
1692 (uint32_t)of_read_number(prop, 1));
1693 } else if (of_node_is_type(vnode_parent, "ibm,platform-facilities"))
1694 snprintf(kobj_name, sizeof(kobj_name), "%pOFn", vnode);
1695 else
1696 goto out;
1697
1698 of_node_put(vnode_parent);
1699 return vio_find_name(kobj_name);
1700 out:
1701 of_node_put(vnode_parent);
1702 return NULL;
1703 }
1704 EXPORT_SYMBOL(vio_find_node);
1705
1706 int vio_enable_interrupts(struct vio_dev *dev)
1707 {
1708 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE);
1709 if (rc != H_SUCCESS)
1710 printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc);
1711 return rc;
1712 }
1713 EXPORT_SYMBOL(vio_enable_interrupts);
1714
1715 int vio_disable_interrupts(struct vio_dev *dev)
1716 {
1717 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE);
1718 if (rc != H_SUCCESS)
1719 printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc);
1720 return rc;
1721 }
1722 EXPORT_SYMBOL(vio_disable_interrupts);
1723
1724 static int __init vio_init(void)
1725 {
1726 dma_debug_add_bus(&vio_bus_type);
1727 return 0;
1728 }
1729 machine_fs_initcall(pseries, vio_init);