0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019 #include <linux/types.h>
0020 #include <linux/kernel.h>
0021 #include <linux/spinlock.h>
0022 #include <linux/slab.h>
0023 #include <linux/init.h>
0024
0025 #include <linux/mm.h>
0026 #include <linux/string.h>
0027 #include <linux/pci.h>
0028 #include <linux/dma-map-ops.h>
0029 #include <linux/scatterlist.h>
0030 #include <linux/iommu-helper.h>
0031
0032 #include <asm/byteorder.h>
0033 #include <asm/io.h>
0034 #include <asm/dma.h> /* for DMA_CHUNK_SIZE */
0035
0036 #include <asm/hardware.h> /* for register_parisc_driver() stuff */
0037
0038 #include <linux/proc_fs.h>
0039 #include <linux/seq_file.h>
0040 #include <linux/module.h>
0041
0042 #include <asm/ropes.h>
0043 #include <asm/mckinley.h> /* for proc_mckinley_root */
0044 #include <asm/runway.h> /* for proc_runway_root */
0045 #include <asm/page.h> /* for PAGE0 */
0046 #include <asm/pdc.h> /* for PDC_MODEL_* */
0047 #include <asm/pdcpat.h> /* for is_pdc_pat() */
0048 #include <asm/parisc-device.h>
0049
0050 #include "iommu.h"
0051
0052 #define MODULE_NAME "SBA"
0053
0054
0055
0056
0057
0058
0059 #undef DEBUG_SBA_INIT
0060 #undef DEBUG_SBA_RUN
0061 #undef DEBUG_SBA_RUN_SG
0062 #undef DEBUG_SBA_RESOURCE
0063 #undef ASSERT_PDIR_SANITY
0064 #undef DEBUG_LARGE_SG_ENTRIES
0065 #undef DEBUG_DMB_TRAP
0066
0067 #ifdef DEBUG_SBA_INIT
0068 #define DBG_INIT(x...) printk(x)
0069 #else
0070 #define DBG_INIT(x...)
0071 #endif
0072
0073 #ifdef DEBUG_SBA_RUN
0074 #define DBG_RUN(x...) printk(x)
0075 #else
0076 #define DBG_RUN(x...)
0077 #endif
0078
0079 #ifdef DEBUG_SBA_RUN_SG
0080 #define DBG_RUN_SG(x...) printk(x)
0081 #else
0082 #define DBG_RUN_SG(x...)
0083 #endif
0084
0085
0086 #ifdef DEBUG_SBA_RESOURCE
0087 #define DBG_RES(x...) printk(x)
0088 #else
0089 #define DBG_RES(x...)
0090 #endif
0091
0092 #define SBA_INLINE __inline__
0093
0094 #define DEFAULT_DMA_HINT_REG 0
0095
0096 struct sba_device *sba_list;
0097 EXPORT_SYMBOL_GPL(sba_list);
0098
0099 static unsigned long ioc_needs_fdc = 0;
0100
0101
0102 static unsigned int global_ioc_cnt = 0;
0103
0104
0105 static unsigned long piranha_bad_128k = 0;
0106
0107
0108 #define SBA_DEV(d) ((struct sba_device *) (d))
0109
0110 #ifdef CONFIG_AGP_PARISC
0111 #define SBA_AGP_SUPPORT
0112 #endif
0113
0114 #ifdef SBA_AGP_SUPPORT
0115 static int sba_reserve_agpgart = 1;
0116 module_param(sba_reserve_agpgart, int, 0444);
0117 MODULE_PARM_DESC(sba_reserve_agpgart, "Reserve half of IO pdir as AGPGART");
0118 #endif
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129 #define READ_REG32(addr) readl(addr)
0130 #define READ_REG64(addr) readq(addr)
0131 #define WRITE_REG32(val, addr) writel((val), (addr))
0132 #define WRITE_REG64(val, addr) writeq((val), (addr))
0133
0134 #ifdef CONFIG_64BIT
0135 #define READ_REG(addr) READ_REG64(addr)
0136 #define WRITE_REG(value, addr) WRITE_REG64(value, addr)
0137 #else
0138 #define READ_REG(addr) READ_REG32(addr)
0139 #define WRITE_REG(value, addr) WRITE_REG32(value, addr)
0140 #endif
0141
0142 #ifdef DEBUG_SBA_INIT
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153 static void
0154 sba_dump_ranges(void __iomem *hpa)
0155 {
0156 DBG_INIT("SBA at 0x%p\n", hpa);
0157 DBG_INIT("IOS_DIST_BASE : %Lx\n", READ_REG64(hpa+IOS_DIST_BASE));
0158 DBG_INIT("IOS_DIST_MASK : %Lx\n", READ_REG64(hpa+IOS_DIST_MASK));
0159 DBG_INIT("IOS_DIST_ROUTE : %Lx\n", READ_REG64(hpa+IOS_DIST_ROUTE));
0160 DBG_INIT("\n");
0161 DBG_INIT("IOS_DIRECT_BASE : %Lx\n", READ_REG64(hpa+IOS_DIRECT_BASE));
0162 DBG_INIT("IOS_DIRECT_MASK : %Lx\n", READ_REG64(hpa+IOS_DIRECT_MASK));
0163 DBG_INIT("IOS_DIRECT_ROUTE: %Lx\n", READ_REG64(hpa+IOS_DIRECT_ROUTE));
0164 }
0165
0166
0167
0168
0169
0170
0171
0172 static void sba_dump_tlb(void __iomem *hpa)
0173 {
0174 DBG_INIT("IO TLB at 0x%p\n", hpa);
0175 DBG_INIT("IOC_IBASE : 0x%Lx\n", READ_REG64(hpa+IOC_IBASE));
0176 DBG_INIT("IOC_IMASK : 0x%Lx\n", READ_REG64(hpa+IOC_IMASK));
0177 DBG_INIT("IOC_TCNFG : 0x%Lx\n", READ_REG64(hpa+IOC_TCNFG));
0178 DBG_INIT("IOC_PDIR_BASE: 0x%Lx\n", READ_REG64(hpa+IOC_PDIR_BASE));
0179 DBG_INIT("\n");
0180 }
0181 #else
0182 #define sba_dump_ranges(x)
0183 #define sba_dump_tlb(x)
0184 #endif
0185
0186
0187 #ifdef ASSERT_PDIR_SANITY
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197 static void
0198 sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
0199 {
0200
0201 u64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]);
0202 unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]);
0203 uint rcnt;
0204
0205 printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
0206 msg,
0207 rptr, pide & (BITS_PER_LONG - 1), *rptr);
0208
0209 rcnt = 0;
0210 while (rcnt < BITS_PER_LONG) {
0211 printk(KERN_DEBUG "%s %2d %p %016Lx\n",
0212 (rcnt == (pide & (BITS_PER_LONG - 1)))
0213 ? " -->" : " ",
0214 rcnt, ptr, *ptr );
0215 rcnt++;
0216 ptr++;
0217 }
0218 printk(KERN_DEBUG "%s", msg);
0219 }
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229 static int
0230 sba_check_pdir(struct ioc *ioc, char *msg)
0231 {
0232 u32 *rptr_end = (u32 *) &(ioc->res_map[ioc->res_size]);
0233 u32 *rptr = (u32 *) ioc->res_map;
0234 u64 *pptr = ioc->pdir_base;
0235 uint pide = 0;
0236
0237 while (rptr < rptr_end) {
0238 u32 rval = *rptr;
0239 int rcnt = 32;
0240
0241 while (rcnt) {
0242
0243 u32 pde = ((u32) (((char *)pptr)[7])) << 24;
0244 if ((rval ^ pde) & 0x80000000)
0245 {
0246
0247
0248
0249
0250 sba_dump_pdir_entry(ioc, msg, pide);
0251 return(1);
0252 }
0253 rcnt--;
0254 rval <<= 1;
0255 pptr++;
0256 pide++;
0257 }
0258 rptr++;
0259 }
0260
0261 return 0;
0262 }
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273 static void
0274 sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
0275 {
0276 while (nents-- > 0) {
0277 printk(KERN_DEBUG " %d : %08lx/%05x %p/%05x\n",
0278 nents,
0279 (unsigned long) sg_dma_address(startsg),
0280 sg_dma_len(startsg),
0281 sg_virt(startsg), startsg->length);
0282 startsg++;
0283 }
0284 }
0285
0286 #endif
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300 #define PAGES_PER_RANGE 1
0301
0302
0303
0304 #ifdef ZX1_SUPPORT
0305
0306 #define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset))
0307 #define SBA_IOVP(ioc,iova) ((iova) & (ioc)->iovp_mask)
0308 #else
0309
0310 #define SBA_IOVA(ioc,iovp,offset,hint_reg) ((iovp) | (offset))
0311 #define SBA_IOVP(ioc,iova) (iova)
0312 #endif
0313
0314 #define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
0315
0316 #define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n)))
0317 #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
0318
0319 static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
0320 unsigned int bitshiftcnt)
0321 {
0322 return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
0323 + bitshiftcnt;
0324 }
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335 static SBA_INLINE unsigned long
0336 sba_search_bitmap(struct ioc *ioc, struct device *dev,
0337 unsigned long bits_wanted)
0338 {
0339 unsigned long *res_ptr = ioc->res_hint;
0340 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
0341 unsigned long pide = ~0UL, tpide;
0342 unsigned long boundary_size;
0343 unsigned long shift;
0344 int ret;
0345
0346 boundary_size = dma_get_seg_boundary_nr_pages(dev, IOVP_SHIFT);
0347
0348 #if defined(ZX1_SUPPORT)
0349 BUG_ON(ioc->ibase & ~IOVP_MASK);
0350 shift = ioc->ibase >> IOVP_SHIFT;
0351 #else
0352 shift = 0;
0353 #endif
0354
0355 if (bits_wanted > (BITS_PER_LONG/2)) {
0356
0357 for(; res_ptr < res_end; ++res_ptr) {
0358 tpide = ptr_to_pide(ioc, res_ptr, 0);
0359 ret = iommu_is_span_boundary(tpide, bits_wanted,
0360 shift,
0361 boundary_size);
0362 if ((*res_ptr == 0) && !ret) {
0363 *res_ptr = RESMAP_MASK(bits_wanted);
0364 pide = tpide;
0365 break;
0366 }
0367 }
0368
0369 res_ptr++;
0370 ioc->res_bitshift = 0;
0371 } else {
0372
0373
0374
0375
0376
0377
0378 unsigned long o = 1 << get_order(bits_wanted << PAGE_SHIFT);
0379 uint bitshiftcnt = ALIGN(ioc->res_bitshift, o);
0380 unsigned long mask;
0381
0382 if (bitshiftcnt >= BITS_PER_LONG) {
0383 bitshiftcnt = 0;
0384 res_ptr++;
0385 }
0386 mask = RESMAP_MASK(bits_wanted) >> bitshiftcnt;
0387
0388 DBG_RES("%s() o %ld %p", __func__, o, res_ptr);
0389 while(res_ptr < res_end)
0390 {
0391 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
0392 WARN_ON(mask == 0);
0393 tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
0394 ret = iommu_is_span_boundary(tpide, bits_wanted,
0395 shift,
0396 boundary_size);
0397 if ((((*res_ptr) & mask) == 0) && !ret) {
0398 *res_ptr |= mask;
0399 pide = tpide;
0400 break;
0401 }
0402 mask >>= o;
0403 bitshiftcnt += o;
0404 if (mask == 0) {
0405 mask = RESMAP_MASK(bits_wanted);
0406 bitshiftcnt=0;
0407 res_ptr++;
0408 }
0409 }
0410
0411 ioc->res_bitshift = bitshiftcnt + bits_wanted;
0412 }
0413
0414
0415 if (res_end <= res_ptr) {
0416 ioc->res_hint = (unsigned long *) ioc->res_map;
0417 ioc->res_bitshift = 0;
0418 } else {
0419 ioc->res_hint = res_ptr;
0420 }
0421 return (pide);
0422 }
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433 static int
0434 sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
0435 {
0436 unsigned int pages_needed = size >> IOVP_SHIFT;
0437 #ifdef SBA_COLLECT_STATS
0438 unsigned long cr_start = mfctl(16);
0439 #endif
0440 unsigned long pide;
0441
0442 pide = sba_search_bitmap(ioc, dev, pages_needed);
0443 if (pide >= (ioc->res_size << 3)) {
0444 pide = sba_search_bitmap(ioc, dev, pages_needed);
0445 if (pide >= (ioc->res_size << 3))
0446 panic("%s: I/O MMU @ %p is out of mapping resources\n",
0447 __FILE__, ioc->ioc_hpa);
0448 }
0449
0450 #ifdef ASSERT_PDIR_SANITY
0451
0452 if(0x00 != ((u8 *) ioc->pdir_base)[pide*sizeof(u64) + 7]) {
0453 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
0454 }
0455 #endif
0456
0457 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
0458 __func__, size, pages_needed, pide,
0459 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
0460 ioc->res_bitshift );
0461
0462 #ifdef SBA_COLLECT_STATS
0463 {
0464 unsigned long cr_end = mfctl(16);
0465 unsigned long tmp = cr_end - cr_start;
0466
0467 cr_start = (cr_end < cr_start) ? -(tmp) : (tmp);
0468 }
0469 ioc->avg_search[ioc->avg_idx++] = cr_start;
0470 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
0471
0472 ioc->used_pages += pages_needed;
0473 #endif
0474
0475 return (pide);
0476 }
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487 static SBA_INLINE void
0488 sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
0489 {
0490 unsigned long iovp = SBA_IOVP(ioc, iova);
0491 unsigned int pide = PDIR_INDEX(iovp);
0492 unsigned int ridx = pide >> 3;
0493 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
0494
0495 int bits_not_wanted = size >> IOVP_SHIFT;
0496
0497
0498 unsigned long m = RESMAP_MASK(bits_not_wanted) >> (pide & (BITS_PER_LONG - 1));
0499
0500 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n",
0501 __func__, (uint) iova, size,
0502 bits_not_wanted, m, pide, res_ptr, *res_ptr);
0503
0504 #ifdef SBA_COLLECT_STATS
0505 ioc->used_pages -= bits_not_wanted;
0506 #endif
0507
0508 *res_ptr &= ~m;
0509 }
0510
0511
0512
0513
0514
0515
0516
0517
0518 #ifdef SBA_HINT_SUPPORT
0519 #define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir)
0520 #endif
0521
0522 typedef unsigned long space_t;
0523 #define KERNEL_SPACE 0
0524
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565 static void SBA_INLINE
0566 sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
0567 unsigned long hint)
0568 {
0569 u64 pa;
0570 register unsigned ci;
0571
0572 pa = lpa(vba);
0573 pa &= IOVP_MASK;
0574
0575 asm("lci 0(%1), %0" : "=r" (ci) : "r" (vba));
0576 pa |= (ci >> PAGE_SHIFT) & 0xff;
0577
0578 pa |= SBA_PDIR_VALID_BIT;
0579 *pdir_ptr = cpu_to_le64(pa);
0580
0581
0582
0583
0584
0585
0586 asm_io_fdc(pdir_ptr);
0587 }
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606 static SBA_INLINE void
0607 sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
0608 {
0609 u32 iovp = (u32) SBA_IOVP(ioc,iova);
0610 u64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)];
0611
0612 #ifdef ASSERT_PDIR_SANITY
0613
0614
0615
0616
0617
0618
0619 if (0x80 != (((u8 *) pdir_ptr)[7])) {
0620 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
0621 }
0622 #endif
0623
0624 if (byte_cnt > IOVP_SIZE)
0625 {
0626 #if 0
0627 unsigned long entries_per_cacheline = ioc_needs_fdc ?
0628 L1_CACHE_ALIGN(((unsigned long) pdir_ptr))
0629 - (unsigned long) pdir_ptr;
0630 : 262144;
0631 #endif
0632
0633
0634 iovp |= get_order(byte_cnt) + PAGE_SHIFT;
0635
0636 do {
0637
0638 ((u8 *) pdir_ptr)[7] = 0;
0639 asm_io_fdc(pdir_ptr);
0640 if (ioc_needs_fdc) {
0641 #if 0
0642 entries_per_cacheline = L1_CACHE_SHIFT - 3;
0643 #endif
0644 }
0645 pdir_ptr++;
0646 byte_cnt -= IOVP_SIZE;
0647 } while (byte_cnt > IOVP_SIZE);
0648 } else
0649 iovp |= IOVP_SHIFT;
0650
0651
0652
0653
0654
0655
0656
0657
0658 ((u8 *) pdir_ptr)[7] = 0;
0659 asm_io_fdc(pdir_ptr);
0660
0661 WRITE_REG( SBA_IOVA(ioc, iovp, 0, 0), ioc->ioc_hpa+IOC_PCOM);
0662 }
0663
0664
0665
0666
0667
0668
0669
0670
0671 static int sba_dma_supported( struct device *dev, u64 mask)
0672 {
0673 struct ioc *ioc;
0674
0675 if (dev == NULL) {
0676 printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n");
0677 BUG();
0678 return(0);
0679 }
0680
0681 ioc = GET_IOC(dev);
0682 if (!ioc)
0683 return 0;
0684
0685
0686
0687
0688
0689 return((int)(mask >= (ioc->ibase - 1 +
0690 (ioc->pdir_size / sizeof(u64) * IOVP_SIZE) )));
0691 }
0692
0693
0694
0695
0696
0697
0698
0699
0700
0701
0702
0703 static dma_addr_t
0704 sba_map_single(struct device *dev, void *addr, size_t size,
0705 enum dma_data_direction direction)
0706 {
0707 struct ioc *ioc;
0708 unsigned long flags;
0709 dma_addr_t iovp;
0710 dma_addr_t offset;
0711 u64 *pdir_start;
0712 int pide;
0713
0714 ioc = GET_IOC(dev);
0715 if (!ioc)
0716 return DMA_MAPPING_ERROR;
0717
0718
0719 offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
0720
0721
0722 size = (size + offset + ~IOVP_MASK) & IOVP_MASK;
0723
0724 spin_lock_irqsave(&ioc->res_lock, flags);
0725 #ifdef ASSERT_PDIR_SANITY
0726 sba_check_pdir(ioc,"Check before sba_map_single()");
0727 #endif
0728
0729 #ifdef SBA_COLLECT_STATS
0730 ioc->msingle_calls++;
0731 ioc->msingle_pages += size >> IOVP_SHIFT;
0732 #endif
0733 pide = sba_alloc_range(ioc, dev, size);
0734 iovp = (dma_addr_t) pide << IOVP_SHIFT;
0735
0736 DBG_RUN("%s() 0x%p -> 0x%lx\n",
0737 __func__, addr, (long) iovp | offset);
0738
0739 pdir_start = &(ioc->pdir_base[pide]);
0740
0741 while (size > 0) {
0742 sba_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long) addr, 0);
0743
0744 DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n",
0745 pdir_start,
0746 (u8) (((u8 *) pdir_start)[7]),
0747 (u8) (((u8 *) pdir_start)[6]),
0748 (u8) (((u8 *) pdir_start)[5]),
0749 (u8) (((u8 *) pdir_start)[4]),
0750 (u8) (((u8 *) pdir_start)[3]),
0751 (u8) (((u8 *) pdir_start)[2]),
0752 (u8) (((u8 *) pdir_start)[1]),
0753 (u8) (((u8 *) pdir_start)[0])
0754 );
0755
0756 addr += IOVP_SIZE;
0757 size -= IOVP_SIZE;
0758 pdir_start++;
0759 }
0760
0761
0762 asm_io_sync();
0763
0764 #ifdef ASSERT_PDIR_SANITY
0765 sba_check_pdir(ioc,"Check after sba_map_single()");
0766 #endif
0767 spin_unlock_irqrestore(&ioc->res_lock, flags);
0768
0769
0770 return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG);
0771 }
0772
0773
0774 static dma_addr_t
0775 sba_map_page(struct device *dev, struct page *page, unsigned long offset,
0776 size_t size, enum dma_data_direction direction,
0777 unsigned long attrs)
0778 {
0779 return sba_map_single(dev, page_address(page) + offset, size,
0780 direction);
0781 }
0782
0783
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793 static void
0794 sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
0795 enum dma_data_direction direction, unsigned long attrs)
0796 {
0797 struct ioc *ioc;
0798 #if DELAYED_RESOURCE_CNT > 0
0799 struct sba_dma_pair *d;
0800 #endif
0801 unsigned long flags;
0802 dma_addr_t offset;
0803
0804 DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
0805
0806 ioc = GET_IOC(dev);
0807 if (!ioc) {
0808 WARN_ON(!ioc);
0809 return;
0810 }
0811 offset = iova & ~IOVP_MASK;
0812 iova ^= offset;
0813 size += offset;
0814 size = ALIGN(size, IOVP_SIZE);
0815
0816 spin_lock_irqsave(&ioc->res_lock, flags);
0817
0818 #ifdef SBA_COLLECT_STATS
0819 ioc->usingle_calls++;
0820 ioc->usingle_pages += size >> IOVP_SHIFT;
0821 #endif
0822
0823 sba_mark_invalid(ioc, iova, size);
0824
0825 #if DELAYED_RESOURCE_CNT > 0
0826
0827
0828
0829 d = &(ioc->saved[ioc->saved_cnt]);
0830 d->iova = iova;
0831 d->size = size;
0832 if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) {
0833 int cnt = ioc->saved_cnt;
0834 while (cnt--) {
0835 sba_free_range(ioc, d->iova, d->size);
0836 d--;
0837 }
0838 ioc->saved_cnt = 0;
0839
0840 READ_REG(ioc->ioc_hpa+IOC_PCOM);
0841 }
0842 #else
0843 sba_free_range(ioc, iova, size);
0844
0845
0846 asm_io_sync();
0847
0848 READ_REG(ioc->ioc_hpa+IOC_PCOM);
0849 #endif
0850
0851 spin_unlock_irqrestore(&ioc->res_lock, flags);
0852
0853
0854
0855
0856
0857
0858
0859
0860
0861 }
0862
0863
0864
0865
0866
0867
0868
0869
0870
0871
0872 static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
0873 gfp_t gfp, unsigned long attrs)
0874 {
0875 void *ret;
0876
0877 if (!hwdev) {
0878
0879 *dma_handle = 0;
0880 return NULL;
0881 }
0882
0883 ret = (void *) __get_free_pages(gfp, get_order(size));
0884
0885 if (ret) {
0886 memset(ret, 0, size);
0887 *dma_handle = sba_map_single(hwdev, ret, size, 0);
0888 }
0889
0890 return ret;
0891 }
0892
0893
0894
0895
0896
0897
0898
0899
0900
0901
0902
0903 static void
0904 sba_free(struct device *hwdev, size_t size, void *vaddr,
0905 dma_addr_t dma_handle, unsigned long attrs)
0906 {
0907 sba_unmap_page(hwdev, dma_handle, size, 0, 0);
0908 free_pages((unsigned long) vaddr, get_order(size));
0909 }
0910
0911
0912
0913
0914
0915
0916
0917 #define PIDE_FLAG 0x80000000UL
0918
0919 #ifdef SBA_COLLECT_STATS
0920 #define IOMMU_MAP_STATS
0921 #endif
0922 #include "iommu-helpers.h"
0923
0924 #ifdef DEBUG_LARGE_SG_ENTRIES
0925 int dump_run_sg = 0;
0926 #endif
0927
0928
0929
0930
0931
0932
0933
0934
0935
0936
0937
0938 static int
0939 sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
0940 enum dma_data_direction direction, unsigned long attrs)
0941 {
0942 struct ioc *ioc;
0943 int coalesced, filled = 0;
0944 unsigned long flags;
0945
0946 DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
0947
0948 ioc = GET_IOC(dev);
0949 if (!ioc)
0950 return -EINVAL;
0951
0952
0953 if (nents == 1) {
0954 sg_dma_address(sglist) = sba_map_single(dev, sg_virt(sglist),
0955 sglist->length, direction);
0956 sg_dma_len(sglist) = sglist->length;
0957 return 1;
0958 }
0959
0960 spin_lock_irqsave(&ioc->res_lock, flags);
0961
0962 #ifdef ASSERT_PDIR_SANITY
0963 if (sba_check_pdir(ioc,"Check before sba_map_sg()"))
0964 {
0965 sba_dump_sg(ioc, sglist, nents);
0966 panic("Check before sba_map_sg()");
0967 }
0968 #endif
0969
0970 #ifdef SBA_COLLECT_STATS
0971 ioc->msg_calls++;
0972 #endif
0973
0974
0975
0976
0977
0978
0979
0980
0981
0982 coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, sba_alloc_range);
0983
0984
0985
0986
0987
0988
0989
0990
0991
0992 filled = iommu_fill_pdir(ioc, sglist, nents, 0, sba_io_pdir_entry);
0993
0994
0995 asm_io_sync();
0996
0997 #ifdef ASSERT_PDIR_SANITY
0998 if (sba_check_pdir(ioc,"Check after sba_map_sg()"))
0999 {
1000 sba_dump_sg(ioc, sglist, nents);
1001 panic("Check after sba_map_sg()\n");
1002 }
1003 #endif
1004
1005 spin_unlock_irqrestore(&ioc->res_lock, flags);
1006
1007 DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
1008
1009 return filled;
1010 }
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022 static void
1023 sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
1024 enum dma_data_direction direction, unsigned long attrs)
1025 {
1026 struct ioc *ioc;
1027 #ifdef ASSERT_PDIR_SANITY
1028 unsigned long flags;
1029 #endif
1030
1031 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
1032 __func__, nents, sg_virt(sglist), sglist->length);
1033
1034 ioc = GET_IOC(dev);
1035 if (!ioc) {
1036 WARN_ON(!ioc);
1037 return;
1038 }
1039
1040 #ifdef SBA_COLLECT_STATS
1041 ioc->usg_calls++;
1042 #endif
1043
1044 #ifdef ASSERT_PDIR_SANITY
1045 spin_lock_irqsave(&ioc->res_lock, flags);
1046 sba_check_pdir(ioc,"Check before sba_unmap_sg()");
1047 spin_unlock_irqrestore(&ioc->res_lock, flags);
1048 #endif
1049
1050 while (nents && sg_dma_len(sglist)) {
1051
1052 sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist),
1053 direction, 0);
1054 #ifdef SBA_COLLECT_STATS
1055 ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;
1056 ioc->usingle_calls--;
1057 #endif
1058 ++sglist;
1059 nents--;
1060 }
1061
1062 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
1063
1064 #ifdef ASSERT_PDIR_SANITY
1065 spin_lock_irqsave(&ioc->res_lock, flags);
1066 sba_check_pdir(ioc,"Check after sba_unmap_sg()");
1067 spin_unlock_irqrestore(&ioc->res_lock, flags);
1068 #endif
1069
1070 }
1071
1072 static const struct dma_map_ops sba_ops = {
1073 .dma_supported = sba_dma_supported,
1074 .alloc = sba_alloc,
1075 .free = sba_free,
1076 .map_page = sba_map_page,
1077 .unmap_page = sba_unmap_page,
1078 .map_sg = sba_map_sg,
1079 .unmap_sg = sba_unmap_sg,
1080 .get_sgtable = dma_common_get_sgtable,
1081 .alloc_pages = dma_common_alloc_pages,
1082 .free_pages = dma_common_free_pages,
1083 };
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095 static void
1096 sba_get_pat_resources(struct sba_device *sba_dev)
1097 {
1098 #if 0
1099
1100
1101
1102
1103
1104
1105 PAT_MOD(mod)->mod_info.mod_pages = PAT_GET_MOD_PAGES(temp);
1106 FIXME : ???
1107 PAT_MOD(mod)->mod_info.dvi = PAT_GET_DVI(temp);
1108 Tells where the dvi bits are located in the address.
1109 PAT_MOD(mod)->mod_info.ioc = PAT_GET_IOC(temp);
1110 FIXME : ???
1111 #endif
1112 }
1113
1114
1115
1116
1117
1118
1119
1120 #define PIRANHA_ADDR_MASK 0x00160000UL
1121 #define PIRANHA_ADDR_VAL 0x00060000UL
1122 static void *
1123 sba_alloc_pdir(unsigned int pdir_size)
1124 {
1125 unsigned long pdir_base;
1126 unsigned long pdir_order = get_order(pdir_size);
1127
1128 pdir_base = __get_free_pages(GFP_KERNEL, pdir_order);
1129 if (NULL == (void *) pdir_base) {
1130 panic("%s() could not allocate I/O Page Table\n",
1131 __func__);
1132 }
1133
1134
1135
1136
1137
1138
1139
1140 if ( ((boot_cpu_data.pdc.cpuid >> 5) & 0x7f) != 0x13
1141 || (boot_cpu_data.pdc.versions > 0x202)
1142 || (boot_cpu_data.pdc.capabilities & 0x08L) )
1143 return (void *) pdir_base;
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163 if (pdir_order <= (19-12)) {
1164 if (((virt_to_phys(pdir_base)+pdir_size-1) & PIRANHA_ADDR_MASK) == PIRANHA_ADDR_VAL) {
1165
1166 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, (19-12));
1167
1168 free_pages(pdir_base, pdir_order);
1169
1170 pdir_base = new_pdir;
1171
1172
1173 while (pdir_order < (19-12)) {
1174 new_pdir += pdir_size;
1175 free_pages(new_pdir, pdir_order);
1176 pdir_order +=1;
1177 pdir_size <<=1;
1178 }
1179 }
1180 } else {
1181
1182
1183
1184
1185 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, pdir_order+1);
1186
1187
1188 free_pages( pdir_base, pdir_order);
1189
1190
1191 free_pages(new_pdir, 20-12);
1192
1193 pdir_base = new_pdir + 1024*1024;
1194
1195 if (pdir_order > (20-12)) {
1196
1197
1198
1199
1200
1201
1202 piranha_bad_128k = 1;
1203
1204 new_pdir += 3*1024*1024;
1205
1206 free_pages(new_pdir, 20-12);
1207
1208
1209 free_pages(new_pdir - 128*1024 , 17-12);
1210
1211 pdir_size -= 128*1024;
1212 }
1213 }
1214
1215 memset((void *) pdir_base, 0, pdir_size);
1216 return (void *) pdir_base;
1217 }
1218
1219 struct ibase_data_struct {
1220 struct ioc *ioc;
1221 int ioc_num;
1222 };
1223
1224 static int setup_ibase_imask_callback(struct device *dev, void *data)
1225 {
1226
1227 extern void lba_set_iregs(struct parisc_device *, u32, u32);
1228 struct parisc_device *lba = to_parisc_device(dev);
1229 struct ibase_data_struct *ibd = data;
1230 int rope_num = (lba->hpa.start >> 13) & 0xf;
1231 if (rope_num >> 3 == ibd->ioc_num)
1232 lba_set_iregs(lba, ibd->ioc->ibase, ibd->ioc->imask);
1233 return 0;
1234 }
1235
1236
1237 static void
1238 setup_ibase_imask(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1239 {
1240 struct ibase_data_struct ibase_data = {
1241 .ioc = ioc,
1242 .ioc_num = ioc_num,
1243 };
1244
1245 device_for_each_child(&sba->dev, &ibase_data,
1246 setup_ibase_imask_callback);
1247 }
1248
1249 #ifdef SBA_AGP_SUPPORT
1250 static int
1251 sba_ioc_find_quicksilver(struct device *dev, void *data)
1252 {
1253 int *agp_found = data;
1254 struct parisc_device *lba = to_parisc_device(dev);
1255
1256 if (IS_QUICKSILVER(lba))
1257 *agp_found = 1;
1258 return 0;
1259 }
1260 #endif
1261
1262 static void
1263 sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1264 {
1265 u32 iova_space_mask;
1266 u32 iova_space_size;
1267 int iov_order, tcnfg;
1268 #ifdef SBA_AGP_SUPPORT
1269 int agp_found = 0;
1270 #endif
1271
1272
1273
1274
1275
1276 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1fffffULL;
1277 iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1;
1278
1279 if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) {
1280 printk("WARNING: IOV space overlaps local config and interrupt message, truncating\n");
1281 iova_space_size /= 2;
1282 }
1283
1284
1285
1286
1287
1288 iov_order = get_order(iova_space_size >> (IOVP_SHIFT - PAGE_SHIFT));
1289 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
1290
1291 DBG_INIT("%s() hpa 0x%p IOV %dMB (%d bits)\n",
1292 __func__, ioc->ioc_hpa, iova_space_size >> 20,
1293 iov_order + PAGE_SHIFT);
1294
1295 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
1296 get_order(ioc->pdir_size));
1297 if (!ioc->pdir_base)
1298 panic("Couldn't allocate I/O Page Table\n");
1299
1300 memset(ioc->pdir_base, 0, ioc->pdir_size);
1301
1302 DBG_INIT("%s() pdir %p size %x\n",
1303 __func__, ioc->pdir_base, ioc->pdir_size);
1304
1305 #ifdef SBA_HINT_SUPPORT
1306 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
1307 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
1308
1309 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n",
1310 ioc->hint_shift_pdir, ioc->hint_mask_pdir);
1311 #endif
1312
1313 WARN_ON((((unsigned long) ioc->pdir_base) & PAGE_MASK) != (unsigned long) ioc->pdir_base);
1314 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1315
1316
1317 iova_space_mask = 0xffffffff;
1318 iova_space_mask <<= (iov_order + PAGE_SHIFT);
1319 ioc->imask = iova_space_mask;
1320 #ifdef ZX1_SUPPORT
1321 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
1322 #endif
1323 sba_dump_tlb(ioc->ioc_hpa);
1324
1325 setup_ibase_imask(sba, ioc, ioc_num);
1326
1327 WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK);
1328
1329 #ifdef CONFIG_64BIT
1330
1331
1332
1333
1334 ioc->imask |= 0xFFFFFFFF00000000UL;
1335 #endif
1336
1337
1338 switch (PAGE_SHIFT) {
1339 case 12: tcnfg = 0; break;
1340 case 13: tcnfg = 1; break;
1341 case 14: tcnfg = 2; break;
1342 case 16: tcnfg = 3; break;
1343 default:
1344 panic(__FILE__ "Unsupported system page size %d",
1345 1 << PAGE_SHIFT);
1346 break;
1347 }
1348 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
1349
1350
1351
1352
1353
1354 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
1355
1356
1357
1358
1359
1360 WRITE_REG(ioc->ibase | 31, ioc->ioc_hpa + IOC_PCOM);
1361
1362 #ifdef SBA_AGP_SUPPORT
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372 device_for_each_child(&sba->dev, &agp_found, sba_ioc_find_quicksilver);
1373
1374 if (agp_found && sba_reserve_agpgart) {
1375 printk(KERN_INFO "%s: reserving %dMb of IOVA space for agpgart\n",
1376 __func__, (iova_space_size/2) >> 20);
1377 ioc->pdir_size /= 2;
1378 ioc->pdir_base[PDIR_INDEX(iova_space_size/2)] = SBA_AGPGART_COOKIE;
1379 }
1380 #endif
1381 }
1382
1383 static void
1384 sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1385 {
1386 u32 iova_space_size, iova_space_mask;
1387 unsigned int pdir_size, iov_order, tcnfg;
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403 iova_space_size = (u32) (totalram_pages()/global_ioc_cnt);
1404
1405
1406 if (iova_space_size < (1 << (20 - PAGE_SHIFT))) {
1407 iova_space_size = 1 << (20 - PAGE_SHIFT);
1408 }
1409 else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) {
1410 iova_space_size = 1 << (30 - PAGE_SHIFT);
1411 }
1412
1413
1414
1415
1416
1417
1418 iov_order = get_order(iova_space_size << PAGE_SHIFT);
1419
1420
1421 iova_space_size = 1 << (iov_order + PAGE_SHIFT);
1422
1423 ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64);
1424
1425 DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n",
1426 __func__,
1427 ioc->ioc_hpa,
1428 (unsigned long) totalram_pages() >> (20 - PAGE_SHIFT),
1429 iova_space_size>>20,
1430 iov_order + PAGE_SHIFT);
1431
1432 ioc->pdir_base = sba_alloc_pdir(pdir_size);
1433
1434 DBG_INIT("%s() pdir %p size %x\n",
1435 __func__, ioc->pdir_base, pdir_size);
1436
1437 #ifdef SBA_HINT_SUPPORT
1438
1439 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
1440 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
1441
1442 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n",
1443 ioc->hint_shift_pdir, ioc->hint_mask_pdir);
1444 #endif
1445
1446 WRITE_REG64(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1447
1448
1449 iova_space_mask = 0xffffffff;
1450 iova_space_mask <<= (iov_order + PAGE_SHIFT);
1451
1452
1453
1454
1455
1456 ioc->ibase = 0;
1457 ioc->imask = iova_space_mask;
1458 #ifdef ZX1_SUPPORT
1459 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
1460 #endif
1461
1462 DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n",
1463 __func__, ioc->ibase, ioc->imask);
1464
1465
1466
1467
1468
1469
1470
1471 setup_ibase_imask(sba, ioc, ioc_num);
1472
1473
1474
1475
1476 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE);
1477 WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK);
1478
1479
1480 switch (PAGE_SHIFT) {
1481 case 12: tcnfg = 0; break;
1482 case 13: tcnfg = 1; break;
1483 case 14: tcnfg = 2; break;
1484 case 16: tcnfg = 3; break;
1485 default:
1486 panic(__FILE__ "Unsupported system page size %d",
1487 1 << PAGE_SHIFT);
1488 break;
1489 }
1490
1491 WRITE_REG(tcnfg, ioc->ioc_hpa+IOC_TCNFG);
1492
1493
1494
1495
1496
1497 WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM);
1498
1499 ioc->ibase = 0;
1500
1501 DBG_INIT("%s() DONE\n", __func__);
1502 }
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517 static void __iomem *ioc_remap(struct sba_device *sba_dev, unsigned int offset)
1518 {
1519 return ioremap(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE);
1520 }
1521
1522 static void sba_hw_init(struct sba_device *sba_dev)
1523 {
1524 int i;
1525 int num_ioc;
1526 u64 ioc_ctl;
1527
1528 if (!is_pdc_pat()) {
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546 if (PAGE0->mem_kbd.cl_class == CL_KEYBD) {
1547 pdc_io_reset_devices();
1548 }
1549
1550 }
1551
1552
1553 #if 0
1554 printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
1555 PAGE0->mem_boot.spa, PAGE0->mem_boot.pad, PAGE0->mem_boot.cl_class);
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566 if ((PAGE0->mem_boot.cl_class != CL_RANDOM)
1567 && (PAGE0->mem_boot.cl_class != CL_SEQU)) {
1568 pdc_io_reset();
1569 }
1570 #endif
1571
1572 if (!IS_PLUTO(sba_dev->dev)) {
1573 ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL);
1574 DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->",
1575 __func__, sba_dev->sba_hpa, ioc_ctl);
1576 ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE);
1577 ioc_ctl |= IOC_CTRL_DD | IOC_CTRL_D4 | IOC_CTRL_TC;
1578
1579
1580
1581 WRITE_REG(ioc_ctl, sba_dev->sba_hpa+IOC_CTRL);
1582
1583 #ifdef DEBUG_SBA_INIT
1584 ioc_ctl = READ_REG64(sba_dev->sba_hpa+IOC_CTRL);
1585 DBG_INIT(" 0x%Lx\n", ioc_ctl);
1586 #endif
1587 }
1588
1589 if (IS_ASTRO(sba_dev->dev)) {
1590 int err;
1591 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, ASTRO_IOC_OFFSET);
1592 num_ioc = 1;
1593
1594 sba_dev->chip_resv.name = "Astro Intr Ack";
1595 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfef00000UL;
1596 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff000000UL - 1) ;
1597 err = request_resource(&iomem_resource, &(sba_dev->chip_resv));
1598 BUG_ON(err < 0);
1599
1600 } else if (IS_PLUTO(sba_dev->dev)) {
1601 int err;
1602
1603 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, PLUTO_IOC_OFFSET);
1604 num_ioc = 1;
1605
1606 sba_dev->chip_resv.name = "Pluto Intr/PIOP/VGA";
1607 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfee00000UL;
1608 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff200000UL - 1);
1609 err = request_resource(&iomem_resource, &(sba_dev->chip_resv));
1610 WARN_ON(err < 0);
1611
1612 sba_dev->iommu_resv.name = "IOVA Space";
1613 sba_dev->iommu_resv.start = 0x40000000UL;
1614 sba_dev->iommu_resv.end = 0x50000000UL - 1;
1615 err = request_resource(&iomem_resource, &(sba_dev->iommu_resv));
1616 WARN_ON(err < 0);
1617 } else {
1618
1619 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(0));
1620 sba_dev->ioc[1].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(1));
1621 num_ioc = 2;
1622
1623
1624 }
1625
1626
1627 sba_dev->num_ioc = num_ioc;
1628 for (i = 0; i < num_ioc; i++) {
1629 void __iomem *ioc_hpa = sba_dev->ioc[i].ioc_hpa;
1630 unsigned int j;
1631
1632 for (j=0; j < sizeof(u64) * ROPES_PER_IOC; j+=sizeof(u64)) {
1633
1634
1635
1636
1637
1638
1639
1640 if (IS_PLUTO(sba_dev->dev)) {
1641 void __iomem *rope_cfg;
1642 unsigned long cfg_val;
1643
1644 rope_cfg = ioc_hpa + IOC_ROPE0_CFG + j;
1645 cfg_val = READ_REG(rope_cfg);
1646 cfg_val &= ~IOC_ROPE_AO;
1647 WRITE_REG(cfg_val, rope_cfg);
1648 }
1649
1650
1651
1652
1653 WRITE_REG(HF_ENABLE, ioc_hpa + ROPE0_CTL + j);
1654 }
1655
1656
1657 READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
1658
1659 DBG_INIT(" ioc[%d] ROPE_CFG 0x%Lx ROPE_DBG 0x%Lx\n",
1660 i,
1661 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40),
1662 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50)
1663 );
1664 DBG_INIT(" STATUS_CONTROL 0x%Lx FLUSH_CTRL 0x%Lx\n",
1665 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108),
1666 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400)
1667 );
1668
1669 if (IS_PLUTO(sba_dev->dev)) {
1670 sba_ioc_init_pluto(sba_dev->dev, &(sba_dev->ioc[i]), i);
1671 } else {
1672 sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i);
1673 }
1674 }
1675 }
1676
1677 static void
1678 sba_common_init(struct sba_device *sba_dev)
1679 {
1680 int i;
1681
1682
1683
1684
1685 sba_dev->next = sba_list;
1686 sba_list = sba_dev;
1687
1688 for(i=0; i< sba_dev->num_ioc; i++) {
1689 int res_size;
1690 #ifdef DEBUG_DMB_TRAP
1691 extern void iterate_pages(unsigned long , unsigned long ,
1692 void (*)(pte_t * , unsigned long),
1693 unsigned long );
1694 void set_data_memory_break(pte_t * , unsigned long);
1695 #endif
1696
1697 res_size = sba_dev->ioc[i].pdir_size/sizeof(u64);
1698
1699
1700 if (piranha_bad_128k) {
1701 res_size -= (128*1024)/sizeof(u64);
1702 }
1703
1704 res_size >>= 3;
1705 DBG_INIT("%s() res_size 0x%x\n",
1706 __func__, res_size);
1707
1708 sba_dev->ioc[i].res_size = res_size;
1709 sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size));
1710
1711 #ifdef DEBUG_DMB_TRAP
1712 iterate_pages( sba_dev->ioc[i].res_map, res_size,
1713 set_data_memory_break, 0);
1714 #endif
1715
1716 if (NULL == sba_dev->ioc[i].res_map)
1717 {
1718 panic("%s:%s() could not allocate resource map\n",
1719 __FILE__, __func__ );
1720 }
1721
1722 memset(sba_dev->ioc[i].res_map, 0, res_size);
1723
1724 sba_dev->ioc[i].res_hint = (unsigned long *)
1725 &(sba_dev->ioc[i].res_map[L1_CACHE_BYTES]);
1726
1727 #ifdef ASSERT_PDIR_SANITY
1728
1729 sba_dev->ioc[i].res_map[0] = 0x80;
1730 sba_dev->ioc[i].pdir_base[0] = 0xeeffc0addbba0080ULL;
1731 #endif
1732
1733
1734 if (piranha_bad_128k) {
1735
1736
1737 int idx_start = (1408*1024/sizeof(u64)) >> 3;
1738 int idx_end = (1536*1024/sizeof(u64)) >> 3;
1739 long *p_start = (long *) &(sba_dev->ioc[i].res_map[idx_start]);
1740 long *p_end = (long *) &(sba_dev->ioc[i].res_map[idx_end]);
1741
1742
1743 while (p_start < p_end)
1744 *p_start++ = -1;
1745
1746 }
1747
1748 #ifdef DEBUG_DMB_TRAP
1749 iterate_pages( sba_dev->ioc[i].res_map, res_size,
1750 set_data_memory_break, 0);
1751 iterate_pages( sba_dev->ioc[i].pdir_base, sba_dev->ioc[i].pdir_size,
1752 set_data_memory_break, 0);
1753 #endif
1754
1755 DBG_INIT("%s() %d res_map %x %p\n",
1756 __func__, i, res_size, sba_dev->ioc[i].res_map);
1757 }
1758
1759 spin_lock_init(&sba_dev->sba_lock);
1760 ioc_needs_fdc = boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC;
1761
1762 #ifdef DEBUG_SBA_INIT
1763
1764
1765
1766
1767
1768 if (ioc_needs_fdc) {
1769 printk(KERN_INFO MODULE_NAME " FDC/SYNC required.\n");
1770 } else {
1771 printk(KERN_INFO MODULE_NAME " IOC has cache coherent PDIR.\n");
1772 }
1773 #endif
1774 }
1775
1776 #ifdef CONFIG_PROC_FS
1777 static int sba_proc_info(struct seq_file *m, void *p)
1778 {
1779 struct sba_device *sba_dev = sba_list;
1780 struct ioc *ioc = &sba_dev->ioc[0];
1781 int total_pages = (int) (ioc->res_size << 3);
1782 #ifdef SBA_COLLECT_STATS
1783 unsigned long avg = 0, min, max;
1784 #endif
1785 int i;
1786
1787 seq_printf(m, "%s rev %d.%d\n",
1788 sba_dev->name,
1789 (sba_dev->hw_rev & 0x7) + 1,
1790 (sba_dev->hw_rev & 0x18) >> 3);
1791 seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n",
1792 (int)((ioc->res_size << 3) * sizeof(u64)),
1793 total_pages);
1794
1795 seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n",
1796 ioc->res_size, ioc->res_size << 3);
1797
1798 seq_printf(m, "LMMIO_BASE/MASK/ROUTE %08x %08x %08x\n",
1799 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_BASE),
1800 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_MASK),
1801 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_ROUTE));
1802
1803 for (i=0; i<4; i++)
1804 seq_printf(m, "DIR%d_BASE/MASK/ROUTE %08x %08x %08x\n",
1805 i,
1806 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_BASE + i*0x18),
1807 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_MASK + i*0x18),
1808 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_ROUTE + i*0x18));
1809
1810 #ifdef SBA_COLLECT_STATS
1811 seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n",
1812 total_pages - ioc->used_pages, ioc->used_pages,
1813 (int)(ioc->used_pages * 100 / total_pages));
1814
1815 min = max = ioc->avg_search[0];
1816 for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
1817 avg += ioc->avg_search[i];
1818 if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
1819 if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
1820 }
1821 avg /= SBA_SEARCH_SAMPLE;
1822 seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
1823 min, avg, max);
1824
1825 seq_printf(m, "pci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n",
1826 ioc->msingle_calls, ioc->msingle_pages,
1827 (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
1828
1829
1830 min = ioc->usingle_calls;
1831 max = ioc->usingle_pages - ioc->usg_pages;
1832 seq_printf(m, "pci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n",
1833 min, max, (int)((max * 1000)/min));
1834
1835 seq_printf(m, "pci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
1836 ioc->msg_calls, ioc->msg_pages,
1837 (int)((ioc->msg_pages * 1000)/ioc->msg_calls));
1838
1839 seq_printf(m, "pci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
1840 ioc->usg_calls, ioc->usg_pages,
1841 (int)((ioc->usg_pages * 1000)/ioc->usg_calls));
1842 #endif
1843
1844 return 0;
1845 }
1846
1847 static int
1848 sba_proc_bitmap_info(struct seq_file *m, void *p)
1849 {
1850 struct sba_device *sba_dev = sba_list;
1851 struct ioc *ioc = &sba_dev->ioc[0];
1852
1853 seq_hex_dump(m, " ", DUMP_PREFIX_NONE, 32, 4, ioc->res_map,
1854 ioc->res_size, false);
1855 seq_putc(m, '\n');
1856
1857 return 0;
1858 }
1859 #endif
1860
1861 static const struct parisc_device_id sba_tbl[] __initconst = {
1862 { HPHW_IOA, HVERSION_REV_ANY_ID, ASTRO_RUNWAY_PORT, 0xb },
1863 { HPHW_BCPORT, HVERSION_REV_ANY_ID, IKE_MERCED_PORT, 0xc },
1864 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REO_MERCED_PORT, 0xc },
1865 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REOG_MERCED_PORT, 0xc },
1866 { HPHW_IOA, HVERSION_REV_ANY_ID, PLUTO_MCKINLEY_PORT, 0xc },
1867 { 0, }
1868 };
1869
1870 static int sba_driver_callback(struct parisc_device *);
1871
1872 static struct parisc_driver sba_driver __refdata = {
1873 .name = MODULE_NAME,
1874 .id_table = sba_tbl,
1875 .probe = sba_driver_callback,
1876 };
1877
1878
1879
1880
1881
1882
1883 static int __init sba_driver_callback(struct parisc_device *dev)
1884 {
1885 struct sba_device *sba_dev;
1886 u32 func_class;
1887 int i;
1888 char *version;
1889 void __iomem *sba_addr = ioremap(dev->hpa.start, SBA_FUNC_SIZE);
1890 #ifdef CONFIG_PROC_FS
1891 struct proc_dir_entry *root;
1892 #endif
1893
1894 sba_dump_ranges(sba_addr);
1895
1896
1897 func_class = READ_REG(sba_addr + SBA_FCLASS);
1898
1899 if (IS_ASTRO(dev)) {
1900 unsigned long fclass;
1901 static char astro_rev[]="Astro ?.?";
1902
1903
1904 fclass = READ_REG(sba_addr);
1905
1906 astro_rev[6] = '1' + (char) (fclass & 0x7);
1907 astro_rev[8] = '0' + (char) ((fclass & 0x18) >> 3);
1908 version = astro_rev;
1909
1910 } else if (IS_IKE(dev)) {
1911 static char ike_rev[] = "Ike rev ?";
1912 ike_rev[8] = '0' + (char) (func_class & 0xff);
1913 version = ike_rev;
1914 } else if (IS_PLUTO(dev)) {
1915 static char pluto_rev[]="Pluto ?.?";
1916 pluto_rev[6] = '0' + (char) ((func_class & 0xf0) >> 4);
1917 pluto_rev[8] = '0' + (char) (func_class & 0x0f);
1918 version = pluto_rev;
1919 } else {
1920 static char reo_rev[] = "REO rev ?";
1921 reo_rev[8] = '0' + (char) (func_class & 0xff);
1922 version = reo_rev;
1923 }
1924
1925 if (!global_ioc_cnt) {
1926 global_ioc_cnt = count_parisc_driver(&sba_driver);
1927
1928
1929 if ((!IS_ASTRO(dev)) || (!IS_PLUTO(dev)))
1930 global_ioc_cnt *= 2;
1931 }
1932
1933 printk(KERN_INFO "%s found %s at 0x%llx\n",
1934 MODULE_NAME, version, (unsigned long long)dev->hpa.start);
1935
1936 sba_dev = kzalloc(sizeof(struct sba_device), GFP_KERNEL);
1937 if (!sba_dev) {
1938 printk(KERN_ERR MODULE_NAME " - couldn't alloc sba_device\n");
1939 return -ENOMEM;
1940 }
1941
1942 parisc_set_drvdata(dev, sba_dev);
1943
1944 for(i=0; i<MAX_IOC; i++)
1945 spin_lock_init(&(sba_dev->ioc[i].res_lock));
1946
1947 sba_dev->dev = dev;
1948 sba_dev->hw_rev = func_class;
1949 sba_dev->name = dev->name;
1950 sba_dev->sba_hpa = sba_addr;
1951
1952 sba_get_pat_resources(sba_dev);
1953 sba_hw_init(sba_dev);
1954 sba_common_init(sba_dev);
1955
1956 hppa_dma_ops = &sba_ops;
1957
1958 #ifdef CONFIG_PROC_FS
1959 switch (dev->id.hversion) {
1960 case PLUTO_MCKINLEY_PORT:
1961 root = proc_mckinley_root;
1962 break;
1963 case ASTRO_RUNWAY_PORT:
1964 case IKE_MERCED_PORT:
1965 default:
1966 root = proc_runway_root;
1967 break;
1968 }
1969
1970 proc_create_single("sba_iommu", 0, root, sba_proc_info);
1971 proc_create_single("sba_iommu-bitmap", 0, root, sba_proc_bitmap_info);
1972 #endif
1973 return 0;
1974 }
1975
1976
1977
1978
1979
1980
1981 void __init sba_init(void)
1982 {
1983 register_parisc_driver(&sba_driver);
1984 }
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994 void * sba_get_iommu(struct parisc_device *pci_hba)
1995 {
1996 struct parisc_device *sba_dev = parisc_parent(pci_hba);
1997 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
1998 char t = sba_dev->id.hw_type;
1999 int iocnum = (pci_hba->hw_path >> 3);
2000
2001 WARN_ON((t != HPHW_IOA) && (t != HPHW_BCPORT));
2002
2003 return &(sba->ioc[iocnum]);
2004 }
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015 void sba_directed_lmmio(struct parisc_device *pci_hba, struct resource *r)
2016 {
2017 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2018 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
2019 char t = sba_dev->id.hw_type;
2020 int i;
2021 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1));
2022
2023 BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT));
2024
2025 r->start = r->end = 0;
2026
2027
2028 for (i=0; i<4; i++) {
2029 int base, size;
2030 void __iomem *reg = sba->sba_hpa + i*0x18;
2031
2032 base = READ_REG32(reg + LMMIO_DIRECT0_BASE);
2033 if ((base & 1) == 0)
2034 continue;
2035
2036 size = READ_REG32(reg + LMMIO_DIRECT0_ROUTE);
2037
2038 if ((size & (ROPES_PER_IOC-1)) != rope)
2039 continue;
2040
2041 r->start = (base & ~1UL) | PCI_F_EXTEND;
2042 size = ~ READ_REG32(reg + LMMIO_DIRECT0_MASK);
2043 r->end = r->start + size;
2044 r->flags = IORESOURCE_MEM;
2045 }
2046 }
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058 void sba_distributed_lmmio(struct parisc_device *pci_hba, struct resource *r )
2059 {
2060 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2061 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
2062 char t = sba_dev->id.hw_type;
2063 int base, size;
2064 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1));
2065
2066 BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT));
2067
2068 r->start = r->end = 0;
2069
2070 base = READ_REG32(sba->sba_hpa + LMMIO_DIST_BASE);
2071 if ((base & 1) == 0) {
2072 BUG();
2073 return;
2074 }
2075
2076 r->start = (base & ~1UL) | PCI_F_EXTEND;
2077
2078 size = (~READ_REG32(sba->sba_hpa + LMMIO_DIST_MASK)) / ROPES_PER_IOC;
2079 r->start += rope * (size + 1);
2080 r->end = r->start + size;
2081 r->flags = IORESOURCE_MEM;
2082 }