0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031 #include <linux/types.h>
0032 #include <linux/kernel.h>
0033 #include <linux/init.h>
0034 #include <linux/mm.h>
0035 #include <linux/spinlock.h>
0036 #include <linux/slab.h>
0037 #include <linux/string.h>
0038 #include <linux/pci.h>
0039 #include <linux/reboot.h>
0040 #include <linux/proc_fs.h>
0041 #include <linux/seq_file.h>
0042 #include <linux/dma-map-ops.h>
0043 #include <linux/scatterlist.h>
0044 #include <linux/iommu-helper.h>
0045 #include <linux/export.h>
0046
0047 #include <asm/byteorder.h>
0048 #include <asm/cache.h> /* for L1_CACHE_BYTES */
0049 #include <linux/uaccess.h>
0050 #include <asm/page.h>
0051 #include <asm/dma.h>
0052 #include <asm/io.h>
0053 #include <asm/hardware.h> /* for register_module() */
0054 #include <asm/parisc-device.h>
0055
0056 #include "iommu.h"
0057
0058
0059
0060
0061
0062 #define MODULE_NAME "ccio"
0063
0064 #undef DEBUG_CCIO_RES
0065 #undef DEBUG_CCIO_RUN
0066 #undef DEBUG_CCIO_INIT
0067 #undef DEBUG_CCIO_RUN_SG
0068
0069 #ifdef CONFIG_PROC_FS
0070
0071 #undef CCIO_COLLECT_STATS
0072 #endif
0073
0074 #include <asm/runway.h> /* for proc_runway_root */
0075
0076 #ifdef DEBUG_CCIO_INIT
0077 #define DBG_INIT(x...) printk(x)
0078 #else
0079 #define DBG_INIT(x...)
0080 #endif
0081
0082 #ifdef DEBUG_CCIO_RUN
0083 #define DBG_RUN(x...) printk(x)
0084 #else
0085 #define DBG_RUN(x...)
0086 #endif
0087
0088 #ifdef DEBUG_CCIO_RES
0089 #define DBG_RES(x...) printk(x)
0090 #else
0091 #define DBG_RES(x...)
0092 #endif
0093
0094 #ifdef DEBUG_CCIO_RUN_SG
0095 #define DBG_RUN_SG(x...) printk(x)
0096 #else
0097 #define DBG_RUN_SG(x...)
0098 #endif
0099
0100 #define CCIO_INLINE inline
0101 #define WRITE_U32(value, addr) __raw_writel(value, addr)
0102 #define READ_U32(addr) __raw_readl(addr)
0103
0104 #define U2_IOA_RUNWAY 0x580
0105 #define U2_BC_GSC 0x501
0106 #define UTURN_IOA_RUNWAY 0x581
0107 #define UTURN_BC_GSC 0x502
0108
0109 #define IOA_NORMAL_MODE 0x00020080
0110 #define CMD_TLB_DIRECT_WRITE 35
0111 #define CMD_TLB_PURGE 33
0112
0113 struct ioa_registers {
0114
0115 int32_t unused1[12];
0116 uint32_t io_command;
0117 uint32_t io_status;
0118 uint32_t io_control;
0119 int32_t unused2[1];
0120
0121
0122 uint32_t io_err_resp;
0123 uint32_t io_err_info;
0124 uint32_t io_err_req;
0125 uint32_t io_err_resp_hi;
0126 uint32_t io_tlb_entry_m;
0127 uint32_t io_tlb_entry_l;
0128 uint32_t unused3[1];
0129 uint32_t io_pdir_base;
0130 uint32_t io_io_low_hv;
0131 uint32_t io_io_high_hv;
0132 uint32_t unused4[1];
0133 uint32_t io_chain_id_mask;
0134 uint32_t unused5[2];
0135 uint32_t io_io_low;
0136 uint32_t io_io_high;
0137 };
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225 struct ioc {
0226 struct ioa_registers __iomem *ioc_regs;
0227 u8 *res_map;
0228 u64 *pdir_base;
0229 u32 pdir_size;
0230 u32 res_hint;
0231
0232 u32 res_size;
0233 spinlock_t res_lock;
0234
0235 #ifdef CCIO_COLLECT_STATS
0236 #define CCIO_SEARCH_SAMPLE 0x100
0237 unsigned long avg_search[CCIO_SEARCH_SAMPLE];
0238 unsigned long avg_idx;
0239 unsigned long used_pages;
0240 unsigned long msingle_calls;
0241 unsigned long msingle_pages;
0242 unsigned long msg_calls;
0243 unsigned long msg_pages;
0244 unsigned long usingle_calls;
0245 unsigned long usingle_pages;
0246 unsigned long usg_calls;
0247 unsigned long usg_pages;
0248 #endif
0249 unsigned short cujo20_bug;
0250
0251
0252 u32 chainid_shift;
0253 struct ioc *next;
0254 const char *name;
0255 unsigned int hw_path;
0256 struct pci_dev *fake_pci_dev;
0257 struct resource mmio_region[2];
0258 };
0259
0260 static struct ioc *ioc_list;
0261 static int ioc_count;
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278 #define IOVP_SIZE PAGE_SIZE
0279 #define IOVP_SHIFT PAGE_SHIFT
0280 #define IOVP_MASK PAGE_MASK
0281
0282
0283 #define CCIO_IOVA(iovp,offset) ((iovp) | (offset))
0284 #define CCIO_IOVP(iova) ((iova) & IOVP_MASK)
0285
0286 #define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
0287 #define MKIOVP(pdir_idx) ((long)(pdir_idx) << IOVP_SHIFT)
0288 #define MKIOVA(iovp,offset) (dma_addr_t)((long)iovp | (long)offset)
0289
0290
0291
0292
0293
0294
0295 #define CCIO_SEARCH_LOOP(ioc, res_idx, mask, size) \
0296 for (; res_ptr < res_end; ++res_ptr) { \
0297 int ret;\
0298 unsigned int idx;\
0299 idx = (unsigned int)((unsigned long)res_ptr - (unsigned long)ioc->res_map); \
0300 ret = iommu_is_span_boundary(idx << 3, pages_needed, 0, boundary_size);\
0301 if ((0 == (*res_ptr & mask)) && !ret) { \
0302 *res_ptr |= mask; \
0303 res_idx = idx;\
0304 ioc->res_hint = res_idx + (size >> 3); \
0305 goto resource_found; \
0306 } \
0307 }
0308
0309 #define CCIO_FIND_FREE_MAPPING(ioa, res_idx, mask, size) \
0310 u##size *res_ptr = (u##size *)&((ioc)->res_map[ioa->res_hint & ~((size >> 3) - 1)]); \
0311 u##size *res_end = (u##size *)&(ioc)->res_map[ioa->res_size]; \
0312 CCIO_SEARCH_LOOP(ioc, res_idx, mask, size); \
0313 res_ptr = (u##size *)&(ioc)->res_map[0]; \
0314 CCIO_SEARCH_LOOP(ioa, res_idx, mask, size);
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339 static int
0340 ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
0341 {
0342 unsigned int pages_needed = size >> IOVP_SHIFT;
0343 unsigned int res_idx;
0344 unsigned long boundary_size;
0345 #ifdef CCIO_COLLECT_STATS
0346 unsigned long cr_start = mfctl(16);
0347 #endif
0348
0349 BUG_ON(pages_needed == 0);
0350 BUG_ON((pages_needed * IOVP_SIZE) > DMA_CHUNK_SIZE);
0351
0352 DBG_RES("%s() size: %d pages_needed %d\n",
0353 __func__, size, pages_needed);
0354
0355
0356
0357
0358
0359
0360 boundary_size = dma_get_seg_boundary_nr_pages(dev, IOVP_SHIFT);
0361
0362 if (pages_needed <= 8) {
0363
0364
0365
0366
0367
0368 #if 0
0369
0370
0371
0372
0373 unsigned long mask = ~(~0UL >> pages_needed);
0374 CCIO_FIND_FREE_MAPPING(ioc, res_idx, mask, 8);
0375 #else
0376 CCIO_FIND_FREE_MAPPING(ioc, res_idx, 0xff, 8);
0377 #endif
0378 } else if (pages_needed <= 16) {
0379 CCIO_FIND_FREE_MAPPING(ioc, res_idx, 0xffff, 16);
0380 } else if (pages_needed <= 32) {
0381 CCIO_FIND_FREE_MAPPING(ioc, res_idx, ~(unsigned int)0, 32);
0382 #ifdef __LP64__
0383 } else if (pages_needed <= 64) {
0384 CCIO_FIND_FREE_MAPPING(ioc, res_idx, ~0UL, 64);
0385 #endif
0386 } else {
0387 panic("%s: %s() Too many pages to map. pages_needed: %u\n",
0388 __FILE__, __func__, pages_needed);
0389 }
0390
0391 panic("%s: %s() I/O MMU is out of mapping resources.\n", __FILE__,
0392 __func__);
0393
0394 resource_found:
0395
0396 DBG_RES("%s() res_idx %d res_hint: %d\n",
0397 __func__, res_idx, ioc->res_hint);
0398
0399 #ifdef CCIO_COLLECT_STATS
0400 {
0401 unsigned long cr_end = mfctl(16);
0402 unsigned long tmp = cr_end - cr_start;
0403
0404 cr_start = (cr_end < cr_start) ? -(tmp) : (tmp);
0405 }
0406 ioc->avg_search[ioc->avg_idx++] = cr_start;
0407 ioc->avg_idx &= CCIO_SEARCH_SAMPLE - 1;
0408 ioc->used_pages += pages_needed;
0409 #endif
0410
0411
0412
0413 return res_idx << 3;
0414 }
0415
0416 #define CCIO_FREE_MAPPINGS(ioc, res_idx, mask, size) \
0417 u##size *res_ptr = (u##size *)&((ioc)->res_map[res_idx]); \
0418 BUG_ON((*res_ptr & mask) != mask); \
0419 *res_ptr &= ~(mask);
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430 static void
0431 ccio_free_range(struct ioc *ioc, dma_addr_t iova, unsigned long pages_mapped)
0432 {
0433 unsigned long iovp = CCIO_IOVP(iova);
0434 unsigned int res_idx = PDIR_INDEX(iovp) >> 3;
0435
0436 BUG_ON(pages_mapped == 0);
0437 BUG_ON((pages_mapped * IOVP_SIZE) > DMA_CHUNK_SIZE);
0438 BUG_ON(pages_mapped > BITS_PER_LONG);
0439
0440 DBG_RES("%s(): res_idx: %d pages_mapped %d\n",
0441 __func__, res_idx, pages_mapped);
0442
0443 #ifdef CCIO_COLLECT_STATS
0444 ioc->used_pages -= pages_mapped;
0445 #endif
0446
0447 if(pages_mapped <= 8) {
0448 #if 0
0449
0450 unsigned long mask = ~(~0UL >> pages_mapped);
0451 CCIO_FREE_MAPPINGS(ioc, res_idx, mask, 8);
0452 #else
0453 CCIO_FREE_MAPPINGS(ioc, res_idx, 0xffUL, 8);
0454 #endif
0455 } else if(pages_mapped <= 16) {
0456 CCIO_FREE_MAPPINGS(ioc, res_idx, 0xffffUL, 16);
0457 } else if(pages_mapped <= 32) {
0458 CCIO_FREE_MAPPINGS(ioc, res_idx, ~(unsigned int)0, 32);
0459 #ifdef __LP64__
0460 } else if(pages_mapped <= 64) {
0461 CCIO_FREE_MAPPINGS(ioc, res_idx, ~0UL, 64);
0462 #endif
0463 } else {
0464 panic("%s:%s() Too many pages to unmap.\n", __FILE__,
0465 __func__);
0466 }
0467 }
0468
0469
0470
0471
0472
0473
0474
0475 typedef unsigned long space_t;
0476 #define KERNEL_SPACE 0
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504 #define IOPDIR_VALID 0x01UL
0505 #define HINT_SAFE_DMA 0x02UL
0506 #ifdef CONFIG_EISA
0507 #define HINT_STOP_MOST 0x04UL
0508 #else
0509 #define HINT_STOP_MOST 0x00UL
0510 #endif
0511 #define HINT_UDPATE_ENB 0x08UL
0512 #define HINT_PREFETCH 0x10UL
0513
0514
0515
0516
0517
0518
0519
0520 static u32 hint_lookup[] = {
0521 [DMA_BIDIRECTIONAL] = HINT_STOP_MOST | HINT_SAFE_DMA | IOPDIR_VALID,
0522 [DMA_TO_DEVICE] = HINT_STOP_MOST | HINT_PREFETCH | IOPDIR_VALID,
0523 [DMA_FROM_DEVICE] = HINT_STOP_MOST | IOPDIR_VALID,
0524 };
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555 static void CCIO_INLINE
0556 ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
0557 unsigned long hints)
0558 {
0559 register unsigned long pa;
0560 register unsigned long ci;
0561
0562
0563 BUG_ON(sid != KERNEL_SPACE);
0564
0565
0566
0567
0568
0569
0570 pa = lpa(vba);
0571 asm volatile("depw %1,31,12,%0" : "+r" (pa) : "r" (hints));
0572 ((u32 *)pdir_ptr)[1] = (u32) pa;
0573
0574
0575
0576
0577
0578 #ifdef __LP64__
0579
0580
0581
0582
0583
0584 asm volatile ("extrd,u %1,15,4,%0" : "=r" (ci) : "r" (pa));
0585 asm volatile ("extrd,u %1,31,16,%0" : "+r" (pa) : "r" (pa));
0586 asm volatile ("depd %1,35,4,%0" : "+r" (pa) : "r" (ci));
0587 #else
0588 pa = 0;
0589 #endif
0590
0591
0592
0593
0594
0595 asm volatile ("lci %%r0(%1), %0" : "=r" (ci) : "r" (vba));
0596 asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci));
0597 asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci));
0598
0599 ((u32 *)pdir_ptr)[0] = (u32) pa;
0600
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611
0612 asm_io_fdc(pdir_ptr);
0613 asm_io_sync();
0614 }
0615
0616
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626 static CCIO_INLINE void
0627 ccio_clear_io_tlb(struct ioc *ioc, dma_addr_t iovp, size_t byte_cnt)
0628 {
0629 u32 chain_size = 1 << ioc->chainid_shift;
0630
0631 iovp &= IOVP_MASK;
0632 byte_cnt += chain_size;
0633
0634 while(byte_cnt > chain_size) {
0635 WRITE_U32(CMD_TLB_PURGE | iovp, &ioc->ioc_regs->io_command);
0636 iovp += chain_size;
0637 byte_cnt -= chain_size;
0638 }
0639 }
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657
0658
0659 static CCIO_INLINE void
0660 ccio_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
0661 {
0662 u32 iovp = (u32)CCIO_IOVP(iova);
0663 size_t saved_byte_cnt;
0664
0665
0666 saved_byte_cnt = byte_cnt = ALIGN(byte_cnt, IOVP_SIZE);
0667
0668 while(byte_cnt > 0) {
0669
0670 unsigned int idx = PDIR_INDEX(iovp);
0671 char *pdir_ptr = (char *) &(ioc->pdir_base[idx]);
0672
0673 BUG_ON(idx >= (ioc->pdir_size / sizeof(u64)));
0674 pdir_ptr[7] = 0;
0675
0676
0677
0678
0679
0680 asm_io_fdc(pdir_ptr);
0681
0682 iovp += IOVP_SIZE;
0683 byte_cnt -= IOVP_SIZE;
0684 }
0685
0686 asm_io_sync();
0687 ccio_clear_io_tlb(ioc, CCIO_IOVP(iova), saved_byte_cnt);
0688 }
0689
0690
0691
0692
0693
0694
0695
0696
0697
0698
0699
0700
0701 static int
0702 ccio_dma_supported(struct device *dev, u64 mask)
0703 {
0704 if(dev == NULL) {
0705 printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n");
0706 BUG();
0707 return 0;
0708 }
0709
0710
0711 return (int)(mask >= 0xffffffffUL);
0712 }
0713
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723 static dma_addr_t
0724 ccio_map_single(struct device *dev, void *addr, size_t size,
0725 enum dma_data_direction direction)
0726 {
0727 int idx;
0728 struct ioc *ioc;
0729 unsigned long flags;
0730 dma_addr_t iovp;
0731 dma_addr_t offset;
0732 u64 *pdir_start;
0733 unsigned long hint = hint_lookup[(int)direction];
0734
0735 BUG_ON(!dev);
0736 ioc = GET_IOC(dev);
0737 if (!ioc)
0738 return DMA_MAPPING_ERROR;
0739
0740 BUG_ON(size <= 0);
0741
0742
0743 offset = ((unsigned long) addr) & ~IOVP_MASK;
0744
0745
0746 size = ALIGN(size + offset, IOVP_SIZE);
0747 spin_lock_irqsave(&ioc->res_lock, flags);
0748
0749 #ifdef CCIO_COLLECT_STATS
0750 ioc->msingle_calls++;
0751 ioc->msingle_pages += size >> IOVP_SHIFT;
0752 #endif
0753
0754 idx = ccio_alloc_range(ioc, dev, size);
0755 iovp = (dma_addr_t)MKIOVP(idx);
0756
0757 pdir_start = &(ioc->pdir_base[idx]);
0758
0759 DBG_RUN("%s() 0x%p -> 0x%lx size: %0x%x\n",
0760 __func__, addr, (long)iovp | offset, size);
0761
0762
0763 if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES))
0764 hint |= HINT_SAFE_DMA;
0765
0766 while(size > 0) {
0767 ccio_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long)addr, hint);
0768
0769 DBG_RUN(" pdir %p %08x%08x\n",
0770 pdir_start,
0771 (u32) (((u32 *) pdir_start)[0]),
0772 (u32) (((u32 *) pdir_start)[1]));
0773 ++pdir_start;
0774 addr += IOVP_SIZE;
0775 size -= IOVP_SIZE;
0776 }
0777
0778 spin_unlock_irqrestore(&ioc->res_lock, flags);
0779
0780
0781 return CCIO_IOVA(iovp, offset);
0782 }
0783
0784
0785 static dma_addr_t
0786 ccio_map_page(struct device *dev, struct page *page, unsigned long offset,
0787 size_t size, enum dma_data_direction direction,
0788 unsigned long attrs)
0789 {
0790 return ccio_map_single(dev, page_address(page) + offset, size,
0791 direction);
0792 }
0793
0794
0795
0796
0797
0798
0799
0800
0801
0802 static void
0803 ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
0804 enum dma_data_direction direction, unsigned long attrs)
0805 {
0806 struct ioc *ioc;
0807 unsigned long flags;
0808 dma_addr_t offset = iova & ~IOVP_MASK;
0809
0810 BUG_ON(!dev);
0811 ioc = GET_IOC(dev);
0812 if (!ioc) {
0813 WARN_ON(!ioc);
0814 return;
0815 }
0816
0817 DBG_RUN("%s() iovp 0x%lx/%x\n",
0818 __func__, (long)iova, size);
0819
0820 iova ^= offset;
0821 size += offset;
0822 size = ALIGN(size, IOVP_SIZE);
0823
0824 spin_lock_irqsave(&ioc->res_lock, flags);
0825
0826 #ifdef CCIO_COLLECT_STATS
0827 ioc->usingle_calls++;
0828 ioc->usingle_pages += size >> IOVP_SHIFT;
0829 #endif
0830
0831 ccio_mark_invalid(ioc, iova, size);
0832 ccio_free_range(ioc, iova, (size >> IOVP_SHIFT));
0833 spin_unlock_irqrestore(&ioc->res_lock, flags);
0834 }
0835
0836
0837
0838
0839
0840
0841
0842
0843
0844 static void *
0845 ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag,
0846 unsigned long attrs)
0847 {
0848 void *ret;
0849 #if 0
0850
0851
0852
0853 if(!hwdev) {
0854
0855 *dma_handle = 0;
0856 return 0;
0857 }
0858 #endif
0859 ret = (void *) __get_free_pages(flag, get_order(size));
0860
0861 if (ret) {
0862 memset(ret, 0, size);
0863 *dma_handle = ccio_map_single(dev, ret, size, DMA_BIDIRECTIONAL);
0864 }
0865
0866 return ret;
0867 }
0868
0869
0870
0871
0872
0873
0874
0875
0876
0877
0878 static void
0879 ccio_free(struct device *dev, size_t size, void *cpu_addr,
0880 dma_addr_t dma_handle, unsigned long attrs)
0881 {
0882 ccio_unmap_page(dev, dma_handle, size, 0, 0);
0883 free_pages((unsigned long)cpu_addr, get_order(size));
0884 }
0885
0886
0887
0888
0889
0890
0891 #define PIDE_FLAG 0x80000000UL
0892
0893 #ifdef CCIO_COLLECT_STATS
0894 #define IOMMU_MAP_STATS
0895 #endif
0896 #include "iommu-helpers.h"
0897
0898
0899
0900
0901
0902
0903
0904
0905
0906
0907 static int
0908 ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
0909 enum dma_data_direction direction, unsigned long attrs)
0910 {
0911 struct ioc *ioc;
0912 int coalesced, filled = 0;
0913 unsigned long flags;
0914 unsigned long hint = hint_lookup[(int)direction];
0915 unsigned long prev_len = 0, current_len = 0;
0916 int i;
0917
0918 BUG_ON(!dev);
0919 ioc = GET_IOC(dev);
0920 if (!ioc)
0921 return -EINVAL;
0922
0923 DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
0924
0925
0926 if (nents == 1) {
0927 sg_dma_address(sglist) = ccio_map_single(dev,
0928 sg_virt(sglist), sglist->length,
0929 direction);
0930 sg_dma_len(sglist) = sglist->length;
0931 return 1;
0932 }
0933
0934 for(i = 0; i < nents; i++)
0935 prev_len += sglist[i].length;
0936
0937 spin_lock_irqsave(&ioc->res_lock, flags);
0938
0939 #ifdef CCIO_COLLECT_STATS
0940 ioc->msg_calls++;
0941 #endif
0942
0943
0944
0945
0946
0947
0948
0949
0950
0951 coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, ccio_alloc_range);
0952
0953
0954
0955
0956
0957
0958
0959
0960
0961 filled = iommu_fill_pdir(ioc, sglist, nents, hint, ccio_io_pdir_entry);
0962
0963 spin_unlock_irqrestore(&ioc->res_lock, flags);
0964
0965 BUG_ON(coalesced != filled);
0966
0967 DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
0968
0969 for (i = 0; i < filled; i++)
0970 current_len += sg_dma_len(sglist + i);
0971
0972 BUG_ON(current_len != prev_len);
0973
0974 return filled;
0975 }
0976
0977
0978
0979
0980
0981
0982
0983
0984
0985
0986 static void
0987 ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
0988 enum dma_data_direction direction, unsigned long attrs)
0989 {
0990 struct ioc *ioc;
0991
0992 BUG_ON(!dev);
0993 ioc = GET_IOC(dev);
0994 if (!ioc) {
0995 WARN_ON(!ioc);
0996 return;
0997 }
0998
0999 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
1000 __func__, nents, sg_virt(sglist), sglist->length);
1001
1002 #ifdef CCIO_COLLECT_STATS
1003 ioc->usg_calls++;
1004 #endif
1005
1006 while (nents && sg_dma_len(sglist)) {
1007
1008 #ifdef CCIO_COLLECT_STATS
1009 ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
1010 #endif
1011 ccio_unmap_page(dev, sg_dma_address(sglist),
1012 sg_dma_len(sglist), direction, 0);
1013 ++sglist;
1014 nents--;
1015 }
1016
1017 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
1018 }
1019
1020 static const struct dma_map_ops ccio_ops = {
1021 .dma_supported = ccio_dma_supported,
1022 .alloc = ccio_alloc,
1023 .free = ccio_free,
1024 .map_page = ccio_map_page,
1025 .unmap_page = ccio_unmap_page,
1026 .map_sg = ccio_map_sg,
1027 .unmap_sg = ccio_unmap_sg,
1028 .get_sgtable = dma_common_get_sgtable,
1029 .alloc_pages = dma_common_alloc_pages,
1030 .free_pages = dma_common_free_pages,
1031 };
1032
1033 #ifdef CONFIG_PROC_FS
1034 static int ccio_proc_info(struct seq_file *m, void *p)
1035 {
1036 struct ioc *ioc = ioc_list;
1037
1038 while (ioc != NULL) {
1039 unsigned int total_pages = ioc->res_size << 3;
1040 #ifdef CCIO_COLLECT_STATS
1041 unsigned long avg = 0, min, max;
1042 int j;
1043 #endif
1044
1045 seq_printf(m, "%s\n", ioc->name);
1046
1047 seq_printf(m, "Cujo 2.0 bug : %s\n",
1048 (ioc->cujo20_bug ? "yes" : "no"));
1049
1050 seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n",
1051 total_pages * 8, total_pages);
1052
1053 #ifdef CCIO_COLLECT_STATS
1054 seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n",
1055 total_pages - ioc->used_pages, ioc->used_pages,
1056 (int)(ioc->used_pages * 100 / total_pages));
1057 #endif
1058
1059 seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n",
1060 ioc->res_size, total_pages);
1061
1062 #ifdef CCIO_COLLECT_STATS
1063 min = max = ioc->avg_search[0];
1064 for(j = 0; j < CCIO_SEARCH_SAMPLE; ++j) {
1065 avg += ioc->avg_search[j];
1066 if(ioc->avg_search[j] > max)
1067 max = ioc->avg_search[j];
1068 if(ioc->avg_search[j] < min)
1069 min = ioc->avg_search[j];
1070 }
1071 avg /= CCIO_SEARCH_SAMPLE;
1072 seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
1073 min, avg, max);
1074
1075 seq_printf(m, "pci_map_single(): %8ld calls %8ld pages (avg %d/1000)\n",
1076 ioc->msingle_calls, ioc->msingle_pages,
1077 (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
1078
1079
1080 min = ioc->usingle_calls - ioc->usg_calls;
1081 max = ioc->usingle_pages - ioc->usg_pages;
1082 seq_printf(m, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n",
1083 min, max, (int)((max * 1000)/min));
1084
1085 seq_printf(m, "pci_map_sg() : %8ld calls %8ld pages (avg %d/1000)\n",
1086 ioc->msg_calls, ioc->msg_pages,
1087 (int)((ioc->msg_pages * 1000)/ioc->msg_calls));
1088
1089 seq_printf(m, "pci_unmap_sg() : %8ld calls %8ld pages (avg %d/1000)\n\n\n",
1090 ioc->usg_calls, ioc->usg_pages,
1091 (int)((ioc->usg_pages * 1000)/ioc->usg_calls));
1092 #endif
1093
1094 ioc = ioc->next;
1095 }
1096
1097 return 0;
1098 }
1099
1100 static int ccio_proc_bitmap_info(struct seq_file *m, void *p)
1101 {
1102 struct ioc *ioc = ioc_list;
1103
1104 while (ioc != NULL) {
1105 seq_hex_dump(m, " ", DUMP_PREFIX_NONE, 32, 4, ioc->res_map,
1106 ioc->res_size, false);
1107 seq_putc(m, '\n');
1108 ioc = ioc->next;
1109 break;
1110 }
1111
1112 return 0;
1113 }
1114 #endif
1115
1116
1117
1118
1119
1120
1121
1122
1123 static struct ioc * ccio_find_ioc(int hw_path)
1124 {
1125 int i;
1126 struct ioc *ioc;
1127
1128 ioc = ioc_list;
1129 for (i = 0; i < ioc_count; i++) {
1130 if (ioc->hw_path == hw_path)
1131 return ioc;
1132
1133 ioc = ioc->next;
1134 }
1135
1136 return NULL;
1137 }
1138
1139
1140
1141
1142
1143
1144
1145
1146 void * ccio_get_iommu(const struct parisc_device *dev)
1147 {
1148 dev = find_pa_parent_type(dev, HPHW_IOA);
1149 if (!dev)
1150 return NULL;
1151
1152 return ccio_find_ioc(dev->hw_path);
1153 }
1154
1155 #define CUJO_20_STEP 0x10000000
1156
1157
1158
1159
1160
1161 void __init ccio_cujo20_fixup(struct parisc_device *cujo, u32 iovp)
1162 {
1163 unsigned int idx;
1164 struct parisc_device *dev = parisc_parent(cujo);
1165 struct ioc *ioc = ccio_get_iommu(dev);
1166 u8 *res_ptr;
1167
1168 ioc->cujo20_bug = 1;
1169 res_ptr = ioc->res_map;
1170 idx = PDIR_INDEX(iovp) >> 3;
1171
1172 while (idx < ioc->res_size) {
1173 res_ptr[idx] |= 0xff;
1174 idx += PDIR_INDEX(CUJO_20_STEP) >> 3;
1175 }
1176 }
1177
1178 #if 0
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191 static int
1192 ccio_get_iotlb_size(struct parisc_device *dev)
1193 {
1194 if (dev->spa_shift == 0) {
1195 panic("%s() : Can't determine I/O TLB size.\n", __func__);
1196 }
1197 return (1 << dev->spa_shift);
1198 }
1199 #else
1200
1201
1202 #define CCIO_CHAINID_SHIFT 8
1203 #define CCIO_CHAINID_MASK 0xff
1204 #endif
1205
1206
1207 static const struct parisc_device_id ccio_tbl[] __initconst = {
1208 { HPHW_IOA, HVERSION_REV_ANY_ID, U2_IOA_RUNWAY, 0xb },
1209 { HPHW_IOA, HVERSION_REV_ANY_ID, UTURN_IOA_RUNWAY, 0xb },
1210 { 0, }
1211 };
1212
1213 static int ccio_probe(struct parisc_device *dev);
1214
1215 static struct parisc_driver ccio_driver __refdata = {
1216 .name = "ccio",
1217 .id_table = ccio_tbl,
1218 .probe = ccio_probe,
1219 };
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229 static void __init
1230 ccio_ioc_init(struct ioc *ioc)
1231 {
1232 int i;
1233 unsigned int iov_order;
1234 u32 iova_space_size;
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247 iova_space_size = (u32) (totalram_pages() / count_parisc_driver(&ccio_driver));
1248
1249
1250
1251 if (iova_space_size < (1 << (20 - PAGE_SHIFT))) {
1252 iova_space_size = 1 << (20 - PAGE_SHIFT);
1253 #ifdef __LP64__
1254 } else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) {
1255 iova_space_size = 1 << (30 - PAGE_SHIFT);
1256 #endif
1257 }
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272 iov_order = get_order(iova_space_size << PAGE_SHIFT);
1273
1274
1275 iova_space_size = 1 << (iov_order + PAGE_SHIFT);
1276
1277 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
1278
1279 BUG_ON(ioc->pdir_size > 8 * 1024 * 1024);
1280
1281
1282 BUG_ON((1 << get_order(ioc->pdir_size)) != (ioc->pdir_size >> PAGE_SHIFT));
1283
1284 DBG_INIT("%s() hpa 0x%p mem %luMB IOV %dMB (%d bits)\n",
1285 __func__, ioc->ioc_regs,
1286 (unsigned long) totalram_pages() >> (20 - PAGE_SHIFT),
1287 iova_space_size>>20,
1288 iov_order + PAGE_SHIFT);
1289
1290 ioc->pdir_base = (u64 *)__get_free_pages(GFP_KERNEL,
1291 get_order(ioc->pdir_size));
1292 if(NULL == ioc->pdir_base) {
1293 panic("%s() could not allocate I/O Page Table\n", __func__);
1294 }
1295 memset(ioc->pdir_base, 0, ioc->pdir_size);
1296
1297 BUG_ON((((unsigned long)ioc->pdir_base) & PAGE_MASK) != (unsigned long)ioc->pdir_base);
1298 DBG_INIT(" base %p\n", ioc->pdir_base);
1299
1300
1301 ioc->res_size = (ioc->pdir_size / sizeof(u64)) >> 3;
1302 DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size);
1303
1304 ioc->res_map = (u8 *)__get_free_pages(GFP_KERNEL,
1305 get_order(ioc->res_size));
1306 if(NULL == ioc->res_map) {
1307 panic("%s() could not allocate resource map\n", __func__);
1308 }
1309 memset(ioc->res_map, 0, ioc->res_size);
1310
1311
1312 ioc->res_hint = 16;
1313
1314
1315 spin_lock_init(&ioc->res_lock);
1316
1317
1318
1319
1320
1321 ioc->chainid_shift = get_order(iova_space_size) + PAGE_SHIFT - CCIO_CHAINID_SHIFT;
1322 DBG_INIT(" chainid_shift 0x%x\n", ioc->chainid_shift);
1323
1324
1325
1326
1327 WRITE_U32(CCIO_CHAINID_MASK << ioc->chainid_shift,
1328 &ioc->ioc_regs->io_chain_id_mask);
1329
1330 WRITE_U32(virt_to_phys(ioc->pdir_base),
1331 &ioc->ioc_regs->io_pdir_base);
1332
1333
1334
1335
1336 WRITE_U32(IOA_NORMAL_MODE, &ioc->ioc_regs->io_control);
1337
1338
1339
1340
1341 WRITE_U32(0, &ioc->ioc_regs->io_tlb_entry_m);
1342 WRITE_U32(0, &ioc->ioc_regs->io_tlb_entry_l);
1343
1344 for(i = 1 << CCIO_CHAINID_SHIFT; i ; i--) {
1345 WRITE_U32((CMD_TLB_DIRECT_WRITE | (i << ioc->chainid_shift)),
1346 &ioc->ioc_regs->io_command);
1347 }
1348 }
1349
1350 static void __init
1351 ccio_init_resource(struct resource *res, char *name, void __iomem *ioaddr)
1352 {
1353 int result;
1354
1355 res->parent = NULL;
1356 res->flags = IORESOURCE_MEM;
1357
1358
1359
1360
1361
1362 res->start = (unsigned long)((signed) READ_U32(ioaddr) << 16);
1363 res->end = (unsigned long)((signed) (READ_U32(ioaddr + 4) << 16) - 1);
1364 res->name = name;
1365
1366
1367
1368 if (res->end + 1 == res->start)
1369 return;
1370
1371
1372
1373
1374
1375
1376 result = insert_resource(&iomem_resource, res);
1377 if (result < 0) {
1378 printk(KERN_ERR "%s() failed to claim CCIO bus address space (%08lx,%08lx)\n",
1379 __func__, (unsigned long)res->start, (unsigned long)res->end);
1380 }
1381 }
1382
1383 static int __init ccio_init_resources(struct ioc *ioc)
1384 {
1385 struct resource *res = ioc->mmio_region;
1386 char *name = kmalloc(14, GFP_KERNEL);
1387 if (unlikely(!name))
1388 return -ENOMEM;
1389 snprintf(name, 14, "GSC Bus [%d/]", ioc->hw_path);
1390
1391 ccio_init_resource(res, name, &ioc->ioc_regs->io_io_low);
1392 ccio_init_resource(res + 1, name, &ioc->ioc_regs->io_io_low_hv);
1393 return 0;
1394 }
1395
1396 static int new_ioc_area(struct resource *res, unsigned long size,
1397 unsigned long min, unsigned long max, unsigned long align)
1398 {
1399 if (max <= min)
1400 return -EBUSY;
1401
1402 res->start = (max - size + 1) &~ (align - 1);
1403 res->end = res->start + size;
1404
1405
1406
1407
1408
1409 if (!insert_resource(&iomem_resource, res))
1410 return 0;
1411
1412 return new_ioc_area(res, size, min, max - size, align);
1413 }
1414
1415 static int expand_ioc_area(struct resource *res, unsigned long size,
1416 unsigned long min, unsigned long max, unsigned long align)
1417 {
1418 unsigned long start, len;
1419
1420 if (!res->parent)
1421 return new_ioc_area(res, size, min, max, align);
1422
1423 start = (res->start - size) &~ (align - 1);
1424 len = res->end - start + 1;
1425 if (start >= min) {
1426 if (!adjust_resource(res, start, len))
1427 return 0;
1428 }
1429
1430 start = res->start;
1431 len = ((size + res->end + align) &~ (align - 1)) - start;
1432 if (start + len <= max) {
1433 if (!adjust_resource(res, start, len))
1434 return 0;
1435 }
1436
1437 return -EBUSY;
1438 }
1439
1440
1441
1442
1443
1444
1445
1446
1447 int ccio_allocate_resource(const struct parisc_device *dev,
1448 struct resource *res, unsigned long size,
1449 unsigned long min, unsigned long max, unsigned long align)
1450 {
1451 struct resource *parent = &iomem_resource;
1452 struct ioc *ioc = ccio_get_iommu(dev);
1453 if (!ioc)
1454 goto out;
1455
1456 parent = ioc->mmio_region;
1457 if (parent->parent &&
1458 !allocate_resource(parent, res, size, min, max, align, NULL, NULL))
1459 return 0;
1460
1461 if ((parent + 1)->parent &&
1462 !allocate_resource(parent + 1, res, size, min, max, align,
1463 NULL, NULL))
1464 return 0;
1465
1466 if (!expand_ioc_area(parent, size, min, max, align)) {
1467 __raw_writel(((parent->start)>>16) | 0xffff0000,
1468 &ioc->ioc_regs->io_io_low);
1469 __raw_writel(((parent->end)>>16) | 0xffff0000,
1470 &ioc->ioc_regs->io_io_high);
1471 } else if (!expand_ioc_area(parent + 1, size, min, max, align)) {
1472 parent++;
1473 __raw_writel(((parent->start)>>16) | 0xffff0000,
1474 &ioc->ioc_regs->io_io_low_hv);
1475 __raw_writel(((parent->end)>>16) | 0xffff0000,
1476 &ioc->ioc_regs->io_io_high_hv);
1477 } else {
1478 return -EBUSY;
1479 }
1480
1481 out:
1482 return allocate_resource(parent, res, size, min, max, align, NULL,NULL);
1483 }
1484
1485 int ccio_request_resource(const struct parisc_device *dev,
1486 struct resource *res)
1487 {
1488 struct resource *parent;
1489 struct ioc *ioc = ccio_get_iommu(dev);
1490
1491 if (!ioc) {
1492 parent = &iomem_resource;
1493 } else if ((ioc->mmio_region->start <= res->start) &&
1494 (res->end <= ioc->mmio_region->end)) {
1495 parent = ioc->mmio_region;
1496 } else if (((ioc->mmio_region + 1)->start <= res->start) &&
1497 (res->end <= (ioc->mmio_region + 1)->end)) {
1498 parent = ioc->mmio_region + 1;
1499 } else {
1500 return -EBUSY;
1501 }
1502
1503
1504
1505
1506
1507
1508 return insert_resource(parent, res);
1509 }
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519 static int __init ccio_probe(struct parisc_device *dev)
1520 {
1521 int i;
1522 struct ioc *ioc, **ioc_p = &ioc_list;
1523 struct pci_hba_data *hba;
1524
1525 ioc = kzalloc(sizeof(struct ioc), GFP_KERNEL);
1526 if (ioc == NULL) {
1527 printk(KERN_ERR MODULE_NAME ": memory allocation failure\n");
1528 return -ENOMEM;
1529 }
1530
1531 ioc->name = dev->id.hversion == U2_IOA_RUNWAY ? "U2" : "UTurn";
1532
1533 printk(KERN_INFO "Found %s at 0x%lx\n", ioc->name,
1534 (unsigned long)dev->hpa.start);
1535
1536 for (i = 0; i < ioc_count; i++) {
1537 ioc_p = &(*ioc_p)->next;
1538 }
1539 *ioc_p = ioc;
1540
1541 ioc->hw_path = dev->hw_path;
1542 ioc->ioc_regs = ioremap(dev->hpa.start, 4096);
1543 if (!ioc->ioc_regs) {
1544 kfree(ioc);
1545 return -ENOMEM;
1546 }
1547 ccio_ioc_init(ioc);
1548 if (ccio_init_resources(ioc)) {
1549 iounmap(ioc->ioc_regs);
1550 kfree(ioc);
1551 return -ENOMEM;
1552 }
1553 hppa_dma_ops = &ccio_ops;
1554
1555 hba = kzalloc(sizeof(*hba), GFP_KERNEL);
1556
1557 BUG_ON(hba == NULL);
1558
1559 hba->iommu = ioc;
1560 dev->dev.platform_data = hba;
1561
1562 #ifdef CONFIG_PROC_FS
1563 if (ioc_count == 0) {
1564 proc_create_single(MODULE_NAME, 0, proc_runway_root,
1565 ccio_proc_info);
1566 proc_create_single(MODULE_NAME"-bitmap", 0, proc_runway_root,
1567 ccio_proc_bitmap_info);
1568 }
1569 #endif
1570 ioc_count++;
1571 return 0;
1572 }
1573
1574
1575
1576
1577
1578
1579 void __init ccio_init(void)
1580 {
1581 register_parisc_driver(&ccio_driver);
1582 }
1583