0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056 #include <linux/module.h>
0057 #include <linux/kernel.h>
0058 #include <linux/skbuff.h>
0059 #include <linux/pci.h>
0060 #include <linux/errno.h>
0061 #include <linux/types.h>
0062 #include <linux/string.h>
0063 #include <linux/delay.h>
0064 #include <linux/init.h>
0065 #include <linux/mm.h>
0066 #include <linux/sched.h>
0067 #include <linux/timer.h>
0068 #include <linux/interrupt.h>
0069 #include <linux/dma-mapping.h>
0070 #include <linux/bitmap.h>
0071 #include <linux/slab.h>
0072 #include <asm/io.h>
0073 #include <asm/byteorder.h>
0074 #include <linux/uaccess.h>
0075
0076 #include <linux/atmdev.h>
0077 #include <linux/atm.h>
0078 #include <linux/sonet.h>
0079
0080 #undef USE_SCATTERGATHER
0081 #undef USE_CHECKSUM_HW
0082
0083
0084 #include "he.h"
0085 #include "suni.h"
0086 #include <linux/atm_he.h>
0087
0088 #define hprintk(fmt,args...) printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
0089
0090 #ifdef HE_DEBUG
0091 #define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
0092 #else
0093 #define HPRINTK(fmt,args...) do { } while (0)
0094 #endif
0095
0096
0097
0098 static int he_open(struct atm_vcc *vcc);
0099 static void he_close(struct atm_vcc *vcc);
0100 static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
0101 static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
0102 static irqreturn_t he_irq_handler(int irq, void *dev_id);
0103 static void he_tasklet(unsigned long data);
0104 static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
0105 static int he_start(struct atm_dev *dev);
0106 static void he_stop(struct he_dev *dev);
0107 static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
0108 static unsigned char he_phy_get(struct atm_dev *, unsigned long);
0109
0110 static u8 read_prom_byte(struct he_dev *he_dev, int addr);
0111
0112
0113
0114 static struct he_dev *he_devs;
0115 static bool disable64;
0116 static short nvpibits = -1;
0117 static short nvcibits = -1;
0118 static short rx_skb_reserve = 16;
0119 static bool irq_coalesce = true;
0120 static bool sdh;
0121
0122
0123 static unsigned int readtab[] = {
0124 CS_HIGH | CLK_HIGH,
0125 CS_LOW | CLK_LOW,
0126 CLK_HIGH,
0127 CLK_LOW,
0128 CLK_HIGH,
0129 CLK_LOW,
0130 CLK_HIGH,
0131 CLK_LOW,
0132 CLK_HIGH,
0133 CLK_LOW,
0134 CLK_HIGH,
0135 CLK_LOW,
0136 CLK_HIGH,
0137 CLK_LOW | SI_HIGH,
0138 CLK_HIGH | SI_HIGH,
0139 CLK_LOW | SI_HIGH,
0140 CLK_HIGH | SI_HIGH
0141 };
0142
0143
0144 static unsigned int clocktab[] = {
0145 CLK_LOW,
0146 CLK_HIGH,
0147 CLK_LOW,
0148 CLK_HIGH,
0149 CLK_LOW,
0150 CLK_HIGH,
0151 CLK_LOW,
0152 CLK_HIGH,
0153 CLK_LOW,
0154 CLK_HIGH,
0155 CLK_LOW,
0156 CLK_HIGH,
0157 CLK_LOW,
0158 CLK_HIGH,
0159 CLK_LOW,
0160 CLK_HIGH,
0161 CLK_LOW
0162 };
0163
0164 static const struct atmdev_ops he_ops =
0165 {
0166 .open = he_open,
0167 .close = he_close,
0168 .ioctl = he_ioctl,
0169 .send = he_send,
0170 .phy_put = he_phy_put,
0171 .phy_get = he_phy_get,
0172 .proc_read = he_proc_read,
0173 .owner = THIS_MODULE
0174 };
0175
0176 #define he_writel(dev, val, reg) do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
0177 #define he_readl(dev, reg) readl((dev)->membase + (reg))
0178
0179
0180
0181 static __inline__ void
0182 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
0183 unsigned flags)
0184 {
0185 he_writel(he_dev, val, CON_DAT);
0186 (void) he_readl(he_dev, CON_DAT);
0187 he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
0188 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
0189 }
0190
0191 #define he_writel_rcm(dev, val, reg) \
0192 he_writel_internal(dev, val, reg, CON_CTL_RCM)
0193
0194 #define he_writel_tcm(dev, val, reg) \
0195 he_writel_internal(dev, val, reg, CON_CTL_TCM)
0196
0197 #define he_writel_mbox(dev, val, reg) \
0198 he_writel_internal(dev, val, reg, CON_CTL_MBOX)
0199
0200 static unsigned
0201 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
0202 {
0203 he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
0204 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
0205 return he_readl(he_dev, CON_DAT);
0206 }
0207
0208 #define he_readl_rcm(dev, reg) \
0209 he_readl_internal(dev, reg, CON_CTL_RCM)
0210
0211 #define he_readl_tcm(dev, reg) \
0212 he_readl_internal(dev, reg, CON_CTL_TCM)
0213
0214 #define he_readl_mbox(dev, reg) \
0215 he_readl_internal(dev, reg, CON_CTL_MBOX)
0216
0217
0218
0219
0220 #define he_mkcid(dev, vpi, vci) (((vpi << (dev)->vcibits) | vci) & 0x1fff)
0221
0222
0223
0224 #define he_writel_tsr0(dev, val, cid) \
0225 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
0226 #define he_readl_tsr0(dev, cid) \
0227 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
0228
0229 #define he_writel_tsr1(dev, val, cid) \
0230 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
0231
0232 #define he_writel_tsr2(dev, val, cid) \
0233 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
0234
0235 #define he_writel_tsr3(dev, val, cid) \
0236 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
0237
0238 #define he_writel_tsr4(dev, val, cid) \
0239 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249 #define he_writel_tsr4_upper(dev, val, cid) \
0250 he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
0251 CON_CTL_TCM \
0252 | CON_BYTE_DISABLE_2 \
0253 | CON_BYTE_DISABLE_1 \
0254 | CON_BYTE_DISABLE_0)
0255
0256 #define he_readl_tsr4(dev, cid) \
0257 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
0258
0259 #define he_writel_tsr5(dev, val, cid) \
0260 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
0261
0262 #define he_writel_tsr6(dev, val, cid) \
0263 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
0264
0265 #define he_writel_tsr7(dev, val, cid) \
0266 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
0267
0268
0269 #define he_writel_tsr8(dev, val, cid) \
0270 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
0271
0272 #define he_writel_tsr9(dev, val, cid) \
0273 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
0274
0275 #define he_writel_tsr10(dev, val, cid) \
0276 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
0277
0278 #define he_writel_tsr11(dev, val, cid) \
0279 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
0280
0281
0282 #define he_writel_tsr12(dev, val, cid) \
0283 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
0284
0285 #define he_writel_tsr13(dev, val, cid) \
0286 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
0287
0288
0289 #define he_writel_tsr14(dev, val, cid) \
0290 he_writel_tcm(dev, val, CONFIG_TSRD | cid)
0291
0292 #define he_writel_tsr14_upper(dev, val, cid) \
0293 he_writel_internal(dev, val, CONFIG_TSRD | cid, \
0294 CON_CTL_TCM \
0295 | CON_BYTE_DISABLE_2 \
0296 | CON_BYTE_DISABLE_1 \
0297 | CON_BYTE_DISABLE_0)
0298
0299
0300
0301 #define he_writel_rsr0(dev, val, cid) \
0302 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
0303 #define he_readl_rsr0(dev, cid) \
0304 he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
0305
0306 #define he_writel_rsr1(dev, val, cid) \
0307 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
0308
0309 #define he_writel_rsr2(dev, val, cid) \
0310 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
0311
0312 #define he_writel_rsr3(dev, val, cid) \
0313 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
0314
0315 #define he_writel_rsr4(dev, val, cid) \
0316 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
0317
0318 #define he_writel_rsr5(dev, val, cid) \
0319 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
0320
0321 #define he_writel_rsr6(dev, val, cid) \
0322 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
0323
0324 #define he_writel_rsr7(dev, val, cid) \
0325 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
0326
0327 static __inline__ struct atm_vcc*
0328 __find_vcc(struct he_dev *he_dev, unsigned cid)
0329 {
0330 struct hlist_head *head;
0331 struct atm_vcc *vcc;
0332 struct sock *s;
0333 short vpi;
0334 int vci;
0335
0336 vpi = cid >> he_dev->vcibits;
0337 vci = cid & ((1 << he_dev->vcibits) - 1);
0338 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
0339
0340 sk_for_each(s, head) {
0341 vcc = atm_sk(s);
0342 if (vcc->dev == he_dev->atm_dev &&
0343 vcc->vci == vci && vcc->vpi == vpi &&
0344 vcc->qos.rxtp.traffic_class != ATM_NONE) {
0345 return vcc;
0346 }
0347 }
0348 return NULL;
0349 }
0350
0351 static int he_init_one(struct pci_dev *pci_dev,
0352 const struct pci_device_id *pci_ent)
0353 {
0354 struct atm_dev *atm_dev = NULL;
0355 struct he_dev *he_dev = NULL;
0356 int err = 0;
0357
0358 printk(KERN_INFO "ATM he driver\n");
0359
0360 if (pci_enable_device(pci_dev))
0361 return -EIO;
0362 if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)) != 0) {
0363 printk(KERN_WARNING "he: no suitable dma available\n");
0364 err = -EIO;
0365 goto init_one_failure;
0366 }
0367
0368 atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &he_ops, -1, NULL);
0369 if (!atm_dev) {
0370 err = -ENODEV;
0371 goto init_one_failure;
0372 }
0373 pci_set_drvdata(pci_dev, atm_dev);
0374
0375 he_dev = kzalloc(sizeof(struct he_dev),
0376 GFP_KERNEL);
0377 if (!he_dev) {
0378 err = -ENOMEM;
0379 goto init_one_failure;
0380 }
0381 he_dev->pci_dev = pci_dev;
0382 he_dev->atm_dev = atm_dev;
0383 he_dev->atm_dev->dev_data = he_dev;
0384 atm_dev->dev_data = he_dev;
0385 he_dev->number = atm_dev->number;
0386 tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
0387 spin_lock_init(&he_dev->global_lock);
0388
0389 if (he_start(atm_dev)) {
0390 he_stop(he_dev);
0391 err = -ENODEV;
0392 goto init_one_failure;
0393 }
0394 he_dev->next = NULL;
0395 if (he_devs)
0396 he_dev->next = he_devs;
0397 he_devs = he_dev;
0398 return 0;
0399
0400 init_one_failure:
0401 if (atm_dev)
0402 atm_dev_deregister(atm_dev);
0403 kfree(he_dev);
0404 pci_disable_device(pci_dev);
0405 return err;
0406 }
0407
0408 static void he_remove_one(struct pci_dev *pci_dev)
0409 {
0410 struct atm_dev *atm_dev;
0411 struct he_dev *he_dev;
0412
0413 atm_dev = pci_get_drvdata(pci_dev);
0414 he_dev = HE_DEV(atm_dev);
0415
0416
0417
0418 he_stop(he_dev);
0419 atm_dev_deregister(atm_dev);
0420 kfree(he_dev);
0421
0422 pci_disable_device(pci_dev);
0423 }
0424
0425
0426 static unsigned
0427 rate_to_atmf(unsigned rate)
0428 {
0429 #define NONZERO (1 << 14)
0430
0431 unsigned exp = 0;
0432
0433 if (rate == 0)
0434 return 0;
0435
0436 rate <<= 9;
0437 while (rate > 0x3ff) {
0438 ++exp;
0439 rate >>= 1;
0440 }
0441
0442 return (NONZERO | (exp << 9) | (rate & 0x1ff));
0443 }
0444
0445 static void he_init_rx_lbfp0(struct he_dev *he_dev)
0446 {
0447 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
0448 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
0449 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
0450 unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
0451
0452 lbufd_index = 0;
0453 lbm_offset = he_readl(he_dev, RCMLBM_BA);
0454
0455 he_writel(he_dev, lbufd_index, RLBF0_H);
0456
0457 for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
0458 lbufd_index += 2;
0459 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
0460
0461 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
0462 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
0463
0464 if (++lbuf_count == lbufs_per_row) {
0465 lbuf_count = 0;
0466 row_offset += he_dev->bytes_per_row;
0467 }
0468 lbm_offset += 4;
0469 }
0470
0471 he_writel(he_dev, lbufd_index - 2, RLBF0_T);
0472 he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
0473 }
0474
0475 static void he_init_rx_lbfp1(struct he_dev *he_dev)
0476 {
0477 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
0478 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
0479 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
0480 unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
0481
0482 lbufd_index = 1;
0483 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
0484
0485 he_writel(he_dev, lbufd_index, RLBF1_H);
0486
0487 for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
0488 lbufd_index += 2;
0489 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
0490
0491 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
0492 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
0493
0494 if (++lbuf_count == lbufs_per_row) {
0495 lbuf_count = 0;
0496 row_offset += he_dev->bytes_per_row;
0497 }
0498 lbm_offset += 4;
0499 }
0500
0501 he_writel(he_dev, lbufd_index - 2, RLBF1_T);
0502 he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
0503 }
0504
0505 static void he_init_tx_lbfp(struct he_dev *he_dev)
0506 {
0507 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
0508 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
0509 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
0510 unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
0511
0512 lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
0513 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
0514
0515 he_writel(he_dev, lbufd_index, TLBF_H);
0516
0517 for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
0518 lbufd_index += 1;
0519 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
0520
0521 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
0522 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
0523
0524 if (++lbuf_count == lbufs_per_row) {
0525 lbuf_count = 0;
0526 row_offset += he_dev->bytes_per_row;
0527 }
0528 lbm_offset += 2;
0529 }
0530
0531 he_writel(he_dev, lbufd_index - 1, TLBF_T);
0532 }
0533
0534 static int he_init_tpdrq(struct he_dev *he_dev)
0535 {
0536 he_dev->tpdrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
0537 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
0538 &he_dev->tpdrq_phys,
0539 GFP_KERNEL);
0540 if (he_dev->tpdrq_base == NULL) {
0541 hprintk("failed to alloc tpdrq\n");
0542 return -ENOMEM;
0543 }
0544
0545 he_dev->tpdrq_tail = he_dev->tpdrq_base;
0546 he_dev->tpdrq_head = he_dev->tpdrq_base;
0547
0548 he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
0549 he_writel(he_dev, 0, TPDRQ_T);
0550 he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
0551
0552 return 0;
0553 }
0554
0555 static void he_init_cs_block(struct he_dev *he_dev)
0556 {
0557 unsigned clock, rate, delta;
0558 int reg;
0559
0560
0561
0562 for (reg = 0; reg < 0x20; ++reg)
0563 he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
0564
0565
0566
0567 clock = he_is622(he_dev) ? 66667000 : 50000000;
0568 rate = he_dev->atm_dev->link_rate;
0569 delta = rate / 16 / 2;
0570
0571 for (reg = 0; reg < 0x10; ++reg) {
0572
0573
0574
0575
0576
0577 unsigned period = clock / rate;
0578
0579 he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
0580 rate -= delta;
0581 }
0582
0583 if (he_is622(he_dev)) {
0584
0585 he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
0586 he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
0587 he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
0588 he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
0589 he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
0590
0591
0592 he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
0593 he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
0594 he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
0595 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
0596 he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
0597 he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
0598
0599 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
0600
0601
0602 he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
0603 he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
0604 he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
0605 he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
0606 he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
0607 he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
0608
0609
0610 he_writel_mbox(he_dev, 0x5, CS_OTPPER);
0611 he_writel_mbox(he_dev, 0x14, CS_OTWPER);
0612 } else {
0613
0614 he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
0615 he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
0616 he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
0617 he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
0618 he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
0619
0620
0621 he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
0622 he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
0623 he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
0624 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
0625 he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
0626 he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
0627
0628 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
0629
0630
0631 he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
0632 he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
0633 he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
0634 he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
0635 he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
0636 he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
0637
0638
0639 he_writel_mbox(he_dev, 0x6, CS_OTPPER);
0640 he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
0641 }
0642
0643 he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
0644
0645 for (reg = 0; reg < 0x8; ++reg)
0646 he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
0647
0648 }
0649
0650 static int he_init_cs_block_rcm(struct he_dev *he_dev)
0651 {
0652 unsigned (*rategrid)[16][16];
0653 unsigned rate, delta;
0654 int i, j, reg;
0655
0656 unsigned rate_atmf, exp, man;
0657 unsigned long long rate_cps;
0658 int mult, buf, buf_limit = 4;
0659
0660 rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
0661 if (!rategrid)
0662 return -ENOMEM;
0663
0664
0665
0666 for (reg = 0x0; reg < 0xff; ++reg)
0667 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
0668
0669
0670
0671 for (reg = 0x100; reg < 0x1ff; ++reg)
0672 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682 rate = he_dev->atm_dev->link_rate;
0683 delta = rate / 32;
0684
0685
0686
0687
0688
0689
0690
0691
0692 for (j = 0; j < 16; j++) {
0693 (*rategrid)[0][j] = rate;
0694 rate -= delta;
0695 }
0696
0697 for (i = 1; i < 16; i++)
0698 for (j = 0; j < 16; j++)
0699 if (i > 14)
0700 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
0701 else
0702 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
0703
0704
0705
0706
0707
0708
0709
0710
0711
0712 rate_atmf = 0;
0713 while (rate_atmf < 0x400) {
0714 man = (rate_atmf & 0x1f) << 4;
0715 exp = rate_atmf >> 5;
0716
0717
0718
0719
0720
0721 rate_cps = (unsigned long long) (1UL << exp) * (man + 512) >> 9;
0722
0723 if (rate_cps < 10)
0724 rate_cps = 10;
0725
0726 for (i = 255; i > 0; i--)
0727 if ((*rategrid)[i/16][i%16] >= rate_cps)
0728 break;
0729
0730
0731
0732
0733
0734
0735
0736 #ifdef notdef
0737 buf = rate_cps * he_dev->tx_numbuffs /
0738 (he_dev->atm_dev->link_rate * 2);
0739 #else
0740
0741 mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
0742 if (rate_cps > (272ULL * mult))
0743 buf = 4;
0744 else if (rate_cps > (204ULL * mult))
0745 buf = 3;
0746 else if (rate_cps > (136ULL * mult))
0747 buf = 2;
0748 else if (rate_cps > (68ULL * mult))
0749 buf = 1;
0750 else
0751 buf = 0;
0752 #endif
0753 if (buf > buf_limit)
0754 buf = buf_limit;
0755 reg = (reg << 16) | ((i << 8) | buf);
0756
0757 #define RTGTBL_OFFSET 0x400
0758
0759 if (rate_atmf & 0x1)
0760 he_writel_rcm(he_dev, reg,
0761 CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
0762
0763 ++rate_atmf;
0764 }
0765
0766 kfree(rategrid);
0767 return 0;
0768 }
0769
0770 static int he_init_group(struct he_dev *he_dev, int group)
0771 {
0772 struct he_buff *heb, *next;
0773 dma_addr_t mapping;
0774 int i;
0775
0776 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
0777 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
0778 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
0779 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
0780 G0_RBPS_BS + (group * 32));
0781
0782
0783 he_dev->rbpl_table = bitmap_zalloc(RBPL_TABLE_SIZE, GFP_KERNEL);
0784 if (!he_dev->rbpl_table) {
0785 hprintk("unable to allocate rbpl bitmap table\n");
0786 return -ENOMEM;
0787 }
0788
0789
0790 he_dev->rbpl_virt = kmalloc_array(RBPL_TABLE_SIZE,
0791 sizeof(*he_dev->rbpl_virt),
0792 GFP_KERNEL);
0793 if (!he_dev->rbpl_virt) {
0794 hprintk("unable to allocate rbpl virt table\n");
0795 goto out_free_rbpl_table;
0796 }
0797
0798
0799 he_dev->rbpl_pool = dma_pool_create("rbpl", &he_dev->pci_dev->dev,
0800 CONFIG_RBPL_BUFSIZE, 64, 0);
0801 if (he_dev->rbpl_pool == NULL) {
0802 hprintk("unable to create rbpl pool\n");
0803 goto out_free_rbpl_virt;
0804 }
0805
0806 he_dev->rbpl_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
0807 CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
0808 &he_dev->rbpl_phys, GFP_KERNEL);
0809 if (he_dev->rbpl_base == NULL) {
0810 hprintk("failed to alloc rbpl_base\n");
0811 goto out_destroy_rbpl_pool;
0812 }
0813
0814 INIT_LIST_HEAD(&he_dev->rbpl_outstanding);
0815
0816 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
0817
0818 heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL, &mapping);
0819 if (!heb)
0820 goto out_free_rbpl;
0821 heb->mapping = mapping;
0822 list_add(&heb->entry, &he_dev->rbpl_outstanding);
0823
0824 set_bit(i, he_dev->rbpl_table);
0825 he_dev->rbpl_virt[i] = heb;
0826 he_dev->rbpl_hint = i + 1;
0827 he_dev->rbpl_base[i].idx = i << RBP_IDX_OFFSET;
0828 he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data);
0829 }
0830 he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
0831
0832 he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
0833 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
0834 G0_RBPL_T + (group * 32));
0835 he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4,
0836 G0_RBPL_BS + (group * 32));
0837 he_writel(he_dev,
0838 RBP_THRESH(CONFIG_RBPL_THRESH) |
0839 RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
0840 RBP_INT_ENB,
0841 G0_RBPL_QI + (group * 32));
0842
0843
0844
0845 he_dev->rbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
0846 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
0847 &he_dev->rbrq_phys, GFP_KERNEL);
0848 if (he_dev->rbrq_base == NULL) {
0849 hprintk("failed to allocate rbrq\n");
0850 goto out_free_rbpl;
0851 }
0852
0853 he_dev->rbrq_head = he_dev->rbrq_base;
0854 he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
0855 he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
0856 he_writel(he_dev,
0857 RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
0858 G0_RBRQ_Q + (group * 16));
0859 if (irq_coalesce) {
0860 hprintk("coalescing interrupts\n");
0861 he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
0862 G0_RBRQ_I + (group * 16));
0863 } else
0864 he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
0865 G0_RBRQ_I + (group * 16));
0866
0867
0868
0869 he_dev->tbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
0870 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
0871 &he_dev->tbrq_phys, GFP_KERNEL);
0872 if (he_dev->tbrq_base == NULL) {
0873 hprintk("failed to allocate tbrq\n");
0874 goto out_free_rbpq_base;
0875 }
0876
0877 he_dev->tbrq_head = he_dev->tbrq_base;
0878
0879 he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
0880 he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
0881 he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
0882 he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
0883
0884 return 0;
0885
0886 out_free_rbpq_base:
0887 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE *
0888 sizeof(struct he_rbrq), he_dev->rbrq_base,
0889 he_dev->rbrq_phys);
0890 out_free_rbpl:
0891 list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
0892 dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
0893
0894 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE *
0895 sizeof(struct he_rbp), he_dev->rbpl_base,
0896 he_dev->rbpl_phys);
0897 out_destroy_rbpl_pool:
0898 dma_pool_destroy(he_dev->rbpl_pool);
0899 out_free_rbpl_virt:
0900 kfree(he_dev->rbpl_virt);
0901 out_free_rbpl_table:
0902 bitmap_free(he_dev->rbpl_table);
0903
0904 return -ENOMEM;
0905 }
0906
0907 static int he_init_irq(struct he_dev *he_dev)
0908 {
0909 int i;
0910
0911
0912
0913
0914 he_dev->irq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
0915 (CONFIG_IRQ_SIZE + 1) * sizeof(struct he_irq),
0916 &he_dev->irq_phys, GFP_KERNEL);
0917 if (he_dev->irq_base == NULL) {
0918 hprintk("failed to allocate irq\n");
0919 return -ENOMEM;
0920 }
0921 he_dev->irq_tailoffset = (unsigned *)
0922 &he_dev->irq_base[CONFIG_IRQ_SIZE];
0923 *he_dev->irq_tailoffset = 0;
0924 he_dev->irq_head = he_dev->irq_base;
0925 he_dev->irq_tail = he_dev->irq_base;
0926
0927 for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
0928 he_dev->irq_base[i].isw = ITYPE_INVALID;
0929
0930 he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
0931 he_writel(he_dev,
0932 IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
0933 IRQ0_HEAD);
0934 he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
0935 he_writel(he_dev, 0x0, IRQ0_DATA);
0936
0937 he_writel(he_dev, 0x0, IRQ1_BASE);
0938 he_writel(he_dev, 0x0, IRQ1_HEAD);
0939 he_writel(he_dev, 0x0, IRQ1_CNTL);
0940 he_writel(he_dev, 0x0, IRQ1_DATA);
0941
0942 he_writel(he_dev, 0x0, IRQ2_BASE);
0943 he_writel(he_dev, 0x0, IRQ2_HEAD);
0944 he_writel(he_dev, 0x0, IRQ2_CNTL);
0945 he_writel(he_dev, 0x0, IRQ2_DATA);
0946
0947 he_writel(he_dev, 0x0, IRQ3_BASE);
0948 he_writel(he_dev, 0x0, IRQ3_HEAD);
0949 he_writel(he_dev, 0x0, IRQ3_CNTL);
0950 he_writel(he_dev, 0x0, IRQ3_DATA);
0951
0952
0953
0954 he_writel(he_dev, 0x0, GRP_10_MAP);
0955 he_writel(he_dev, 0x0, GRP_32_MAP);
0956 he_writel(he_dev, 0x0, GRP_54_MAP);
0957 he_writel(he_dev, 0x0, GRP_76_MAP);
0958
0959 if (request_irq(he_dev->pci_dev->irq,
0960 he_irq_handler, IRQF_SHARED, DEV_LABEL, he_dev)) {
0961 hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
0962 return -EINVAL;
0963 }
0964
0965 he_dev->irq = he_dev->pci_dev->irq;
0966
0967 return 0;
0968 }
0969
0970 static int he_start(struct atm_dev *dev)
0971 {
0972 struct he_dev *he_dev;
0973 struct pci_dev *pci_dev;
0974 unsigned long membase;
0975
0976 u16 command;
0977 u32 gen_cntl_0, host_cntl, lb_swap;
0978 u8 cache_size, timer;
0979
0980 unsigned err;
0981 unsigned int status, reg;
0982 int i, group;
0983
0984 he_dev = HE_DEV(dev);
0985 pci_dev = he_dev->pci_dev;
0986
0987 membase = pci_resource_start(pci_dev, 0);
0988 HPRINTK("membase = 0x%lx irq = %d.\n", membase, pci_dev->irq);
0989
0990
0991
0992
0993
0994
0995 if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
0996 hprintk("can't read GEN_CNTL_0\n");
0997 return -EINVAL;
0998 }
0999 gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1000 if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1001 hprintk("can't write GEN_CNTL_0.\n");
1002 return -EINVAL;
1003 }
1004
1005 if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1006 hprintk("can't read PCI_COMMAND.\n");
1007 return -EINVAL;
1008 }
1009
1010 command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1011 if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1012 hprintk("can't enable memory.\n");
1013 return -EINVAL;
1014 }
1015
1016 if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1017 hprintk("can't read cache line size?\n");
1018 return -EINVAL;
1019 }
1020
1021 if (cache_size < 16) {
1022 cache_size = 16;
1023 if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1024 hprintk("can't set cache line size to %d\n", cache_size);
1025 }
1026
1027 if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1028 hprintk("can't read latency timer?\n");
1029 return -EINVAL;
1030 }
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040 #define LAT_TIMER 209
1041 if (timer < LAT_TIMER) {
1042 HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1043 timer = LAT_TIMER;
1044 if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1045 hprintk("can't set latency timer to %d\n", timer);
1046 }
1047
1048 if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1049 hprintk("can't set up page mapping\n");
1050 return -EINVAL;
1051 }
1052
1053
1054 he_writel(he_dev, 0x0, RESET_CNTL);
1055 he_writel(he_dev, 0xff, RESET_CNTL);
1056
1057 msleep(16);
1058 status = he_readl(he_dev, RESET_CNTL);
1059 if ((status & BOARD_RST_STATUS) == 0) {
1060 hprintk("reset failed\n");
1061 return -EINVAL;
1062 }
1063
1064
1065 host_cntl = he_readl(he_dev, HOST_CNTL);
1066 if (host_cntl & PCI_BUS_SIZE64)
1067 gen_cntl_0 |= ENBL_64;
1068 else
1069 gen_cntl_0 &= ~ENBL_64;
1070
1071 if (disable64 == 1) {
1072 hprintk("disabling 64-bit pci bus transfers\n");
1073 gen_cntl_0 &= ~ENBL_64;
1074 }
1075
1076 if (gen_cntl_0 & ENBL_64)
1077 hprintk("64-bit transfers enabled\n");
1078
1079 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1080
1081
1082 for (i = 0; i < PROD_ID_LEN; ++i)
1083 he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1084
1085 he_dev->media = read_prom_byte(he_dev, MEDIA);
1086
1087 for (i = 0; i < 6; ++i)
1088 dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1089
1090 hprintk("%s%s, %pM\n", he_dev->prod_id,
1091 he_dev->media & 0x40 ? "SM" : "MM", dev->esi);
1092 he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1093 ATM_OC12_PCR : ATM_OC3_PCR;
1094
1095
1096 lb_swap = he_readl(he_dev, LB_SWAP);
1097 if (he_is622(he_dev))
1098 lb_swap &= ~XFER_SIZE;
1099 else
1100 lb_swap |= XFER_SIZE;
1101 #ifdef __BIG_ENDIAN
1102 lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1103 #else
1104 lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1105 DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1106 #endif
1107 he_writel(he_dev, lb_swap, LB_SWAP);
1108
1109
1110 he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1111
1112
1113 lb_swap |= SWAP_RNUM_MAX(0xf);
1114 he_writel(he_dev, lb_swap, LB_SWAP);
1115
1116
1117 if ((err = he_init_irq(he_dev)) != 0)
1118 return err;
1119
1120
1121 host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1122 QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1123 he_writel(he_dev, host_cntl, HOST_CNTL);
1124
1125 gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1126 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163 he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1164 he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1165
1166 if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1167 hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1168 return -ENODEV;
1169 }
1170
1171 if (nvpibits != -1) {
1172 he_dev->vpibits = nvpibits;
1173 he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1174 }
1175
1176 if (nvcibits != -1) {
1177 he_dev->vcibits = nvcibits;
1178 he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1179 }
1180
1181
1182 if (he_is622(he_dev)) {
1183 he_dev->cells_per_row = 40;
1184 he_dev->bytes_per_row = 2048;
1185 he_dev->r0_numrows = 256;
1186 he_dev->tx_numrows = 512;
1187 he_dev->r1_numrows = 256;
1188 he_dev->r0_startrow = 0;
1189 he_dev->tx_startrow = 256;
1190 he_dev->r1_startrow = 768;
1191 } else {
1192 he_dev->cells_per_row = 20;
1193 he_dev->bytes_per_row = 1024;
1194 he_dev->r0_numrows = 512;
1195 he_dev->tx_numrows = 1018;
1196 he_dev->r1_numrows = 512;
1197 he_dev->r0_startrow = 6;
1198 he_dev->tx_startrow = 518;
1199 he_dev->r1_startrow = 1536;
1200 }
1201
1202 he_dev->cells_per_lbuf = 4;
1203 he_dev->buffer_limit = 4;
1204 he_dev->r0_numbuffs = he_dev->r0_numrows *
1205 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1206 if (he_dev->r0_numbuffs > 2560)
1207 he_dev->r0_numbuffs = 2560;
1208
1209 he_dev->r1_numbuffs = he_dev->r1_numrows *
1210 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1211 if (he_dev->r1_numbuffs > 2560)
1212 he_dev->r1_numbuffs = 2560;
1213
1214 he_dev->tx_numbuffs = he_dev->tx_numrows *
1215 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1216 if (he_dev->tx_numbuffs > 5120)
1217 he_dev->tx_numbuffs = 5120;
1218
1219
1220
1221 he_writel(he_dev,
1222 SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1223 RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1224 (he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1225 (he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1226 LBARB);
1227
1228 he_writel(he_dev, BANK_ON |
1229 (he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1230 SDRAMCON);
1231
1232 he_writel(he_dev,
1233 (he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1234 RM_RW_WAIT(1), RCMCONFIG);
1235 he_writel(he_dev,
1236 (he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1237 TM_RW_WAIT(1), TCMCONFIG);
1238
1239 he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1240
1241 he_writel(he_dev,
1242 (he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1243 (he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1244 RX_VALVP(he_dev->vpibits) |
1245 RX_VALVC(he_dev->vcibits), RC_CONFIG);
1246
1247 he_writel(he_dev, DRF_THRESH(0x20) |
1248 (he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1249 TX_VCI_MASK(he_dev->vcibits) |
1250 LBFREE_CNT(he_dev->tx_numbuffs), TX_CONFIG);
1251
1252 he_writel(he_dev, 0x0, TXAAL5_PROTO);
1253
1254 he_writel(he_dev, PHY_INT_ENB |
1255 (he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1256 RH_CONFIG);
1257
1258
1259
1260 for (i = 0; i < TCM_MEM_SIZE; ++i)
1261 he_writel_tcm(he_dev, 0, i);
1262
1263 for (i = 0; i < RCM_MEM_SIZE; ++i)
1264 he_writel_rcm(he_dev, 0, i);
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297 he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1298 he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1299 he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1300 he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1301 he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331 he_writel(he_dev, 0x08000, RCMLBM_BA);
1332 he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1333 he_writel(he_dev, 0x0d800, RCMABR_BA);
1334
1335
1336
1337 he_init_rx_lbfp0(he_dev);
1338 he_init_rx_lbfp1(he_dev);
1339
1340 he_writel(he_dev, 0x0, RLBC_H);
1341 he_writel(he_dev, 0x0, RLBC_T);
1342 he_writel(he_dev, 0x0, RLBC_H2);
1343
1344 he_writel(he_dev, 512, RXTHRSH);
1345 he_writel(he_dev, 256, LITHRSH);
1346
1347 he_init_tx_lbfp(he_dev);
1348
1349 he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1350
1351
1352
1353 if (he_is622(he_dev)) {
1354 he_writel(he_dev, 0x000f, G0_INMQ_S);
1355 he_writel(he_dev, 0x200f, G0_INMQ_L);
1356
1357 he_writel(he_dev, 0x001f, G1_INMQ_S);
1358 he_writel(he_dev, 0x201f, G1_INMQ_L);
1359
1360 he_writel(he_dev, 0x002f, G2_INMQ_S);
1361 he_writel(he_dev, 0x202f, G2_INMQ_L);
1362
1363 he_writel(he_dev, 0x003f, G3_INMQ_S);
1364 he_writel(he_dev, 0x203f, G3_INMQ_L);
1365
1366 he_writel(he_dev, 0x004f, G4_INMQ_S);
1367 he_writel(he_dev, 0x204f, G4_INMQ_L);
1368
1369 he_writel(he_dev, 0x005f, G5_INMQ_S);
1370 he_writel(he_dev, 0x205f, G5_INMQ_L);
1371
1372 he_writel(he_dev, 0x006f, G6_INMQ_S);
1373 he_writel(he_dev, 0x206f, G6_INMQ_L);
1374
1375 he_writel(he_dev, 0x007f, G7_INMQ_S);
1376 he_writel(he_dev, 0x207f, G7_INMQ_L);
1377 } else {
1378 he_writel(he_dev, 0x0000, G0_INMQ_S);
1379 he_writel(he_dev, 0x0008, G0_INMQ_L);
1380
1381 he_writel(he_dev, 0x0001, G1_INMQ_S);
1382 he_writel(he_dev, 0x0009, G1_INMQ_L);
1383
1384 he_writel(he_dev, 0x0002, G2_INMQ_S);
1385 he_writel(he_dev, 0x000a, G2_INMQ_L);
1386
1387 he_writel(he_dev, 0x0003, G3_INMQ_S);
1388 he_writel(he_dev, 0x000b, G3_INMQ_L);
1389
1390 he_writel(he_dev, 0x0004, G4_INMQ_S);
1391 he_writel(he_dev, 0x000c, G4_INMQ_L);
1392
1393 he_writel(he_dev, 0x0005, G5_INMQ_S);
1394 he_writel(he_dev, 0x000d, G5_INMQ_L);
1395
1396 he_writel(he_dev, 0x0006, G6_INMQ_S);
1397 he_writel(he_dev, 0x000e, G6_INMQ_L);
1398
1399 he_writel(he_dev, 0x0007, G7_INMQ_S);
1400 he_writel(he_dev, 0x000f, G7_INMQ_L);
1401 }
1402
1403
1404
1405 he_writel(he_dev, 0x0, MCC);
1406 he_writel(he_dev, 0x0, OEC);
1407 he_writel(he_dev, 0x0, DCC);
1408 he_writel(he_dev, 0x0, CEC);
1409
1410
1411
1412 he_init_cs_block(he_dev);
1413
1414
1415
1416 if (he_init_cs_block_rcm(he_dev) < 0)
1417 return -ENOMEM;
1418
1419
1420
1421 he_init_tpdrq(he_dev);
1422
1423 he_dev->tpd_pool = dma_pool_create("tpd", &he_dev->pci_dev->dev,
1424 sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1425 if (he_dev->tpd_pool == NULL) {
1426 hprintk("unable to create tpd dma_pool\n");
1427 return -ENOMEM;
1428 }
1429
1430 INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1431
1432 if (he_init_group(he_dev, 0) != 0)
1433 return -ENOMEM;
1434
1435 for (group = 1; group < HE_NUM_GROUPS; ++group) {
1436 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1437 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1438 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1439 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1440 G0_RBPS_BS + (group * 32));
1441
1442 he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1443 he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1444 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1445 G0_RBPL_QI + (group * 32));
1446 he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1447
1448 he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1449 he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1450 he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1451 G0_RBRQ_Q + (group * 16));
1452 he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1453
1454 he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1455 he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1456 he_writel(he_dev, TBRQ_THRESH(0x1),
1457 G0_TBRQ_THRESH + (group * 16));
1458 he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1459 }
1460
1461
1462
1463 he_dev->hsp = dma_alloc_coherent(&he_dev->pci_dev->dev,
1464 sizeof(struct he_hsp),
1465 &he_dev->hsp_phys, GFP_KERNEL);
1466 if (he_dev->hsp == NULL) {
1467 hprintk("failed to allocate host status page\n");
1468 return -ENOMEM;
1469 }
1470 he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1471
1472
1473
1474 #ifdef CONFIG_ATM_HE_USE_SUNI
1475 if (he_isMM(he_dev))
1476 suni_init(he_dev->atm_dev);
1477 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1478 he_dev->atm_dev->phy->start(he_dev->atm_dev);
1479 #endif
1480
1481 if (sdh) {
1482
1483 int val;
1484
1485 val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1486 val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1487 he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1488 he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP);
1489 }
1490
1491
1492
1493 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1494 reg |= TX_ENABLE|ER_ENABLE;
1495 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1496
1497 reg = he_readl(he_dev, RC_CONFIG);
1498 reg |= RX_ENABLE;
1499 he_writel(he_dev, reg, RC_CONFIG);
1500
1501 for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1502 he_dev->cs_stper[i].inuse = 0;
1503 he_dev->cs_stper[i].pcr = -1;
1504 }
1505 he_dev->total_bw = 0;
1506
1507
1508
1509
1510 he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1511 he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1512
1513 he_dev->irq_peak = 0;
1514 he_dev->rbrq_peak = 0;
1515 he_dev->rbpl_peak = 0;
1516 he_dev->tbrq_peak = 0;
1517
1518 HPRINTK("hell bent for leather!\n");
1519
1520 return 0;
1521 }
1522
1523 static void
1524 he_stop(struct he_dev *he_dev)
1525 {
1526 struct he_buff *heb, *next;
1527 struct pci_dev *pci_dev;
1528 u32 gen_cntl_0, reg;
1529 u16 command;
1530
1531 pci_dev = he_dev->pci_dev;
1532
1533
1534
1535 if (he_dev->membase) {
1536 pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1537 gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1538 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1539
1540 tasklet_disable(&he_dev->tasklet);
1541
1542
1543
1544 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1545 reg &= ~(TX_ENABLE|ER_ENABLE);
1546 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1547
1548 reg = he_readl(he_dev, RC_CONFIG);
1549 reg &= ~(RX_ENABLE);
1550 he_writel(he_dev, reg, RC_CONFIG);
1551 }
1552
1553 #ifdef CONFIG_ATM_HE_USE_SUNI
1554 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1555 he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1556 #endif
1557
1558 if (he_dev->irq)
1559 free_irq(he_dev->irq, he_dev);
1560
1561 if (he_dev->irq_base)
1562 dma_free_coherent(&he_dev->pci_dev->dev, (CONFIG_IRQ_SIZE + 1)
1563 * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1564
1565 if (he_dev->hsp)
1566 dma_free_coherent(&he_dev->pci_dev->dev, sizeof(struct he_hsp),
1567 he_dev->hsp, he_dev->hsp_phys);
1568
1569 if (he_dev->rbpl_base) {
1570 list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
1571 dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1572
1573 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE
1574 * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1575 }
1576
1577 kfree(he_dev->rbpl_virt);
1578 bitmap_free(he_dev->rbpl_table);
1579 dma_pool_destroy(he_dev->rbpl_pool);
1580
1581 if (he_dev->rbrq_base)
1582 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1583 he_dev->rbrq_base, he_dev->rbrq_phys);
1584
1585 if (he_dev->tbrq_base)
1586 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1587 he_dev->tbrq_base, he_dev->tbrq_phys);
1588
1589 if (he_dev->tpdrq_base)
1590 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1591 he_dev->tpdrq_base, he_dev->tpdrq_phys);
1592
1593 dma_pool_destroy(he_dev->tpd_pool);
1594
1595 if (he_dev->pci_dev) {
1596 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1597 command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1598 pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1599 }
1600
1601 if (he_dev->membase)
1602 iounmap(he_dev->membase);
1603 }
1604
1605 static struct he_tpd *
1606 __alloc_tpd(struct he_dev *he_dev)
1607 {
1608 struct he_tpd *tpd;
1609 dma_addr_t mapping;
1610
1611 tpd = dma_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC, &mapping);
1612 if (tpd == NULL)
1613 return NULL;
1614
1615 tpd->status = TPD_ADDR(mapping);
1616 tpd->reserved = 0;
1617 tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1618 tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1619 tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1620
1621 return tpd;
1622 }
1623
1624 #define AAL5_LEN(buf,len) \
1625 ((((unsigned char *)(buf))[(len)-6] << 8) | \
1626 (((unsigned char *)(buf))[(len)-5]))
1627
1628
1629
1630
1631
1632
1633
1634 #define TCP_CKSUM(buf,len) \
1635 ((((unsigned char *)(buf))[(len)-2] << 8) | \
1636 (((unsigned char *)(buf))[(len-1)]))
1637
1638 static int
1639 he_service_rbrq(struct he_dev *he_dev, int group)
1640 {
1641 struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1642 ((unsigned long)he_dev->rbrq_base |
1643 he_dev->hsp->group[group].rbrq_tail);
1644 unsigned cid, lastcid = -1;
1645 struct sk_buff *skb;
1646 struct atm_vcc *vcc = NULL;
1647 struct he_vcc *he_vcc;
1648 struct he_buff *heb, *next;
1649 int i;
1650 int pdus_assembled = 0;
1651 int updated = 0;
1652
1653 read_lock(&vcc_sklist_lock);
1654 while (he_dev->rbrq_head != rbrq_tail) {
1655 ++updated;
1656
1657 HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1658 he_dev->rbrq_head, group,
1659 RBRQ_ADDR(he_dev->rbrq_head),
1660 RBRQ_BUFLEN(he_dev->rbrq_head),
1661 RBRQ_CID(he_dev->rbrq_head),
1662 RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1663 RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1664 RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1665 RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1666 RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1667 RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1668
1669 i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET;
1670 heb = he_dev->rbpl_virt[i];
1671
1672 cid = RBRQ_CID(he_dev->rbrq_head);
1673 if (cid != lastcid)
1674 vcc = __find_vcc(he_dev, cid);
1675 lastcid = cid;
1676
1677 if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) {
1678 hprintk("vcc/he_vcc == NULL (cid 0x%x)\n", cid);
1679 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1680 clear_bit(i, he_dev->rbpl_table);
1681 list_del(&heb->entry);
1682 dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1683 }
1684
1685 goto next_rbrq_entry;
1686 }
1687
1688 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1689 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
1690 atomic_inc(&vcc->stats->rx_drop);
1691 goto return_host_buffers;
1692 }
1693
1694 heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1695 clear_bit(i, he_dev->rbpl_table);
1696 list_move_tail(&heb->entry, &he_vcc->buffers);
1697 he_vcc->pdu_len += heb->len;
1698
1699 if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1700 lastcid = -1;
1701 HPRINTK("wake_up rx_waitq (cid 0x%x)\n", cid);
1702 wake_up(&he_vcc->rx_waitq);
1703 goto return_host_buffers;
1704 }
1705
1706 if (!RBRQ_END_PDU(he_dev->rbrq_head))
1707 goto next_rbrq_entry;
1708
1709 if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1710 || RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1711 HPRINTK("%s%s (%d.%d)\n",
1712 RBRQ_CRC_ERR(he_dev->rbrq_head)
1713 ? "CRC_ERR " : "",
1714 RBRQ_LEN_ERR(he_dev->rbrq_head)
1715 ? "LEN_ERR" : "",
1716 vcc->vpi, vcc->vci);
1717 atomic_inc(&vcc->stats->rx_err);
1718 goto return_host_buffers;
1719 }
1720
1721 skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1722 GFP_ATOMIC);
1723 if (!skb) {
1724 HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1725 goto return_host_buffers;
1726 }
1727
1728 if (rx_skb_reserve > 0)
1729 skb_reserve(skb, rx_skb_reserve);
1730
1731 __net_timestamp(skb);
1732
1733 list_for_each_entry(heb, &he_vcc->buffers, entry)
1734 skb_put_data(skb, &heb->data, heb->len);
1735
1736 switch (vcc->qos.aal) {
1737 case ATM_AAL0:
1738
1739 skb->len = ATM_AAL0_SDU;
1740 skb_set_tail_pointer(skb, skb->len);
1741 break;
1742 case ATM_AAL5:
1743
1744
1745 skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1746 skb_set_tail_pointer(skb, skb->len);
1747 #ifdef USE_CHECKSUM_HW
1748 if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1749 skb->ip_summed = CHECKSUM_COMPLETE;
1750 skb->csum = TCP_CKSUM(skb->data,
1751 he_vcc->pdu_len);
1752 }
1753 #endif
1754 break;
1755 }
1756
1757 #ifdef should_never_happen
1758 if (skb->len > vcc->qos.rxtp.max_sdu)
1759 hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)! cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1760 #endif
1761
1762 #ifdef notdef
1763 ATM_SKB(skb)->vcc = vcc;
1764 #endif
1765 spin_unlock(&he_dev->global_lock);
1766 vcc->push(vcc, skb);
1767 spin_lock(&he_dev->global_lock);
1768
1769 atomic_inc(&vcc->stats->rx);
1770
1771 return_host_buffers:
1772 ++pdus_assembled;
1773
1774 list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry)
1775 dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1776 INIT_LIST_HEAD(&he_vcc->buffers);
1777 he_vcc->pdu_len = 0;
1778
1779 next_rbrq_entry:
1780 he_dev->rbrq_head = (struct he_rbrq *)
1781 ((unsigned long) he_dev->rbrq_base |
1782 RBRQ_MASK(he_dev->rbrq_head + 1));
1783
1784 }
1785 read_unlock(&vcc_sklist_lock);
1786
1787 if (updated) {
1788 if (updated > he_dev->rbrq_peak)
1789 he_dev->rbrq_peak = updated;
1790
1791 he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1792 G0_RBRQ_H + (group * 16));
1793 }
1794
1795 return pdus_assembled;
1796 }
1797
1798 static void
1799 he_service_tbrq(struct he_dev *he_dev, int group)
1800 {
1801 struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1802 ((unsigned long)he_dev->tbrq_base |
1803 he_dev->hsp->group[group].tbrq_tail);
1804 struct he_tpd *tpd;
1805 int slot, updated = 0;
1806 struct he_tpd *__tpd;
1807
1808
1809
1810 while (he_dev->tbrq_head != tbrq_tail) {
1811 ++updated;
1812
1813 HPRINTK("tbrq%d 0x%x%s%s\n",
1814 group,
1815 TBRQ_TPD(he_dev->tbrq_head),
1816 TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1817 TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1818 tpd = NULL;
1819 list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1820 if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1821 tpd = __tpd;
1822 list_del(&__tpd->entry);
1823 break;
1824 }
1825 }
1826
1827 if (tpd == NULL) {
1828 hprintk("unable to locate tpd for dma buffer %x\n",
1829 TBRQ_TPD(he_dev->tbrq_head));
1830 goto next_tbrq_entry;
1831 }
1832
1833 if (TBRQ_EOS(he_dev->tbrq_head)) {
1834 HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
1835 he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
1836 if (tpd->vcc)
1837 wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
1838
1839 goto next_tbrq_entry;
1840 }
1841
1842 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
1843 if (tpd->iovec[slot].addr)
1844 dma_unmap_single(&he_dev->pci_dev->dev,
1845 tpd->iovec[slot].addr,
1846 tpd->iovec[slot].len & TPD_LEN_MASK,
1847 DMA_TO_DEVICE);
1848 if (tpd->iovec[slot].len & TPD_LST)
1849 break;
1850
1851 }
1852
1853 if (tpd->skb) {
1854 if (tpd->vcc && tpd->vcc->pop)
1855 tpd->vcc->pop(tpd->vcc, tpd->skb);
1856 else
1857 dev_kfree_skb_any(tpd->skb);
1858 }
1859
1860 next_tbrq_entry:
1861 if (tpd)
1862 dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1863 he_dev->tbrq_head = (struct he_tbrq *)
1864 ((unsigned long) he_dev->tbrq_base |
1865 TBRQ_MASK(he_dev->tbrq_head + 1));
1866 }
1867
1868 if (updated) {
1869 if (updated > he_dev->tbrq_peak)
1870 he_dev->tbrq_peak = updated;
1871
1872 he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
1873 G0_TBRQ_H + (group * 16));
1874 }
1875 }
1876
1877 static void
1878 he_service_rbpl(struct he_dev *he_dev, int group)
1879 {
1880 struct he_rbp *new_tail;
1881 struct he_rbp *rbpl_head;
1882 struct he_buff *heb;
1883 dma_addr_t mapping;
1884 int i;
1885 int moved = 0;
1886
1887 rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1888 RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
1889
1890 for (;;) {
1891 new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1892 RBPL_MASK(he_dev->rbpl_tail+1));
1893
1894
1895 if (new_tail == rbpl_head)
1896 break;
1897
1898 i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint);
1899 if (i > (RBPL_TABLE_SIZE - 1)) {
1900 i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE);
1901 if (i > (RBPL_TABLE_SIZE - 1))
1902 break;
1903 }
1904 he_dev->rbpl_hint = i + 1;
1905
1906 heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC, &mapping);
1907 if (!heb)
1908 break;
1909 heb->mapping = mapping;
1910 list_add(&heb->entry, &he_dev->rbpl_outstanding);
1911 he_dev->rbpl_virt[i] = heb;
1912 set_bit(i, he_dev->rbpl_table);
1913 new_tail->idx = i << RBP_IDX_OFFSET;
1914 new_tail->phys = mapping + offsetof(struct he_buff, data);
1915
1916 he_dev->rbpl_tail = new_tail;
1917 ++moved;
1918 }
1919
1920 if (moved)
1921 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
1922 }
1923
1924 static void
1925 he_tasklet(unsigned long data)
1926 {
1927 unsigned long flags;
1928 struct he_dev *he_dev = (struct he_dev *) data;
1929 int group, type;
1930 int updated = 0;
1931
1932 HPRINTK("tasklet (0x%lx)\n", data);
1933 spin_lock_irqsave(&he_dev->global_lock, flags);
1934
1935 while (he_dev->irq_head != he_dev->irq_tail) {
1936 ++updated;
1937
1938 type = ITYPE_TYPE(he_dev->irq_head->isw);
1939 group = ITYPE_GROUP(he_dev->irq_head->isw);
1940
1941 switch (type) {
1942 case ITYPE_RBRQ_THRESH:
1943 HPRINTK("rbrq%d threshold\n", group);
1944 fallthrough;
1945 case ITYPE_RBRQ_TIMER:
1946 if (he_service_rbrq(he_dev, group))
1947 he_service_rbpl(he_dev, group);
1948 break;
1949 case ITYPE_TBRQ_THRESH:
1950 HPRINTK("tbrq%d threshold\n", group);
1951 fallthrough;
1952 case ITYPE_TPD_COMPLETE:
1953 he_service_tbrq(he_dev, group);
1954 break;
1955 case ITYPE_RBPL_THRESH:
1956 he_service_rbpl(he_dev, group);
1957 break;
1958 case ITYPE_RBPS_THRESH:
1959
1960 break;
1961 case ITYPE_PHY:
1962 HPRINTK("phy interrupt\n");
1963 #ifdef CONFIG_ATM_HE_USE_SUNI
1964 spin_unlock_irqrestore(&he_dev->global_lock, flags);
1965 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
1966 he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
1967 spin_lock_irqsave(&he_dev->global_lock, flags);
1968 #endif
1969 break;
1970 case ITYPE_OTHER:
1971 switch (type|group) {
1972 case ITYPE_PARITY:
1973 hprintk("parity error\n");
1974 break;
1975 case ITYPE_ABORT:
1976 hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
1977 break;
1978 }
1979 break;
1980 case ITYPE_TYPE(ITYPE_INVALID):
1981
1982
1983 HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
1984
1985 he_service_rbrq(he_dev, 0);
1986 he_service_rbpl(he_dev, 0);
1987 he_service_tbrq(he_dev, 0);
1988 break;
1989 default:
1990 hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
1991 }
1992
1993 he_dev->irq_head->isw = ITYPE_INVALID;
1994
1995 he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
1996 }
1997
1998 if (updated) {
1999 if (updated > he_dev->irq_peak)
2000 he_dev->irq_peak = updated;
2001
2002 he_writel(he_dev,
2003 IRQ_SIZE(CONFIG_IRQ_SIZE) |
2004 IRQ_THRESH(CONFIG_IRQ_THRESH) |
2005 IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2006 (void) he_readl(he_dev, INT_FIFO);
2007 }
2008 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2009 }
2010
2011 static irqreturn_t
2012 he_irq_handler(int irq, void *dev_id)
2013 {
2014 unsigned long flags;
2015 struct he_dev *he_dev = (struct he_dev * )dev_id;
2016 int handled = 0;
2017
2018 if (he_dev == NULL)
2019 return IRQ_NONE;
2020
2021 spin_lock_irqsave(&he_dev->global_lock, flags);
2022
2023 he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2024 (*he_dev->irq_tailoffset << 2));
2025
2026 if (he_dev->irq_tail == he_dev->irq_head) {
2027 HPRINTK("tailoffset not updated?\n");
2028 he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2029 ((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2030 (void) he_readl(he_dev, INT_FIFO);
2031 }
2032
2033 #ifdef DEBUG
2034 if (he_dev->irq_head == he_dev->irq_tail )
2035 hprintk("spurious (or shared) interrupt?\n");
2036 #endif
2037
2038 if (he_dev->irq_head != he_dev->irq_tail) {
2039 handled = 1;
2040 tasklet_schedule(&he_dev->tasklet);
2041 he_writel(he_dev, INT_CLEAR_A, INT_FIFO);
2042 (void) he_readl(he_dev, INT_FIFO);
2043 }
2044 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2045 return IRQ_RETVAL(handled);
2046
2047 }
2048
2049 static __inline__ void
2050 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2051 {
2052 struct he_tpdrq *new_tail;
2053
2054 HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2055 tpd, cid, he_dev->tpdrq_tail);
2056
2057
2058 new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2059 TPDRQ_MASK(he_dev->tpdrq_tail+1));
2060
2061
2062
2063
2064
2065
2066
2067
2068 if (new_tail == he_dev->tpdrq_head) {
2069 he_dev->tpdrq_head = (struct he_tpdrq *)
2070 (((unsigned long)he_dev->tpdrq_base) |
2071 TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2072
2073 if (new_tail == he_dev->tpdrq_head) {
2074 int slot;
2075
2076 hprintk("tpdrq full (cid 0x%x)\n", cid);
2077
2078
2079
2080
2081
2082
2083 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2084 if (tpd->iovec[slot].addr)
2085 dma_unmap_single(&he_dev->pci_dev->dev,
2086 tpd->iovec[slot].addr,
2087 tpd->iovec[slot].len & TPD_LEN_MASK,
2088 DMA_TO_DEVICE);
2089 }
2090 if (tpd->skb) {
2091 if (tpd->vcc->pop)
2092 tpd->vcc->pop(tpd->vcc, tpd->skb);
2093 else
2094 dev_kfree_skb_any(tpd->skb);
2095 atomic_inc(&tpd->vcc->stats->tx_err);
2096 }
2097 dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2098 return;
2099 }
2100 }
2101
2102
2103 list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2104 he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2105 he_dev->tpdrq_tail->cid = cid;
2106 wmb();
2107
2108 he_dev->tpdrq_tail = new_tail;
2109
2110 he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2111 (void) he_readl(he_dev, TPDRQ_T);
2112 }
2113
2114 static int
2115 he_open(struct atm_vcc *vcc)
2116 {
2117 unsigned long flags;
2118 struct he_dev *he_dev = HE_DEV(vcc->dev);
2119 struct he_vcc *he_vcc;
2120 int err = 0;
2121 unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2122 short vpi = vcc->vpi;
2123 int vci = vcc->vci;
2124
2125 if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2126 return 0;
2127
2128 HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2129
2130 set_bit(ATM_VF_ADDR, &vcc->flags);
2131
2132 cid = he_mkcid(he_dev, vpi, vci);
2133
2134 he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2135 if (he_vcc == NULL) {
2136 hprintk("unable to allocate he_vcc during open\n");
2137 return -ENOMEM;
2138 }
2139
2140 INIT_LIST_HEAD(&he_vcc->buffers);
2141 he_vcc->pdu_len = 0;
2142 he_vcc->rc_index = -1;
2143
2144 init_waitqueue_head(&he_vcc->rx_waitq);
2145 init_waitqueue_head(&he_vcc->tx_waitq);
2146
2147 vcc->dev_data = he_vcc;
2148
2149 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2150 int pcr_goal;
2151
2152 pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2153 if (pcr_goal == 0)
2154 pcr_goal = he_dev->atm_dev->link_rate;
2155 if (pcr_goal < 0)
2156 pcr_goal = -pcr_goal;
2157
2158 HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2159
2160 switch (vcc->qos.aal) {
2161 case ATM_AAL5:
2162 tsr0_aal = TSR0_AAL5;
2163 tsr4 = TSR4_AAL5;
2164 break;
2165 case ATM_AAL0:
2166 tsr0_aal = TSR0_AAL0_SDU;
2167 tsr4 = TSR4_AAL0_SDU;
2168 break;
2169 default:
2170 err = -EINVAL;
2171 goto open_failed;
2172 }
2173
2174 spin_lock_irqsave(&he_dev->global_lock, flags);
2175 tsr0 = he_readl_tsr0(he_dev, cid);
2176 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2177
2178 if (TSR0_CONN_STATE(tsr0) != 0) {
2179 hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2180 err = -EBUSY;
2181 goto open_failed;
2182 }
2183
2184 switch (vcc->qos.txtp.traffic_class) {
2185 case ATM_UBR:
2186
2187
2188 tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2189 TSR0_USE_WMIN | TSR0_UPDATE_GER;
2190 break;
2191
2192 case ATM_CBR:
2193
2194
2195
2196 if ((he_dev->total_bw + pcr_goal)
2197 > (he_dev->atm_dev->link_rate * 9 / 10))
2198 {
2199 err = -EBUSY;
2200 goto open_failed;
2201 }
2202
2203 spin_lock_irqsave(&he_dev->global_lock, flags);
2204
2205
2206 for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2207 if (he_dev->cs_stper[reg].inuse == 0 ||
2208 he_dev->cs_stper[reg].pcr == pcr_goal)
2209 break;
2210
2211 if (reg == HE_NUM_CS_STPER) {
2212 err = -EBUSY;
2213 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2214 goto open_failed;
2215 }
2216
2217 he_dev->total_bw += pcr_goal;
2218
2219 he_vcc->rc_index = reg;
2220 ++he_dev->cs_stper[reg].inuse;
2221 he_dev->cs_stper[reg].pcr = pcr_goal;
2222
2223 clock = he_is622(he_dev) ? 66667000 : 50000000;
2224 period = clock / pcr_goal;
2225
2226 HPRINTK("rc_index = %d period = %d\n",
2227 reg, period);
2228
2229 he_writel_mbox(he_dev, rate_to_atmf(period/2),
2230 CS_STPER0 + reg);
2231 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2232
2233 tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2234 TSR0_RC_INDEX(reg);
2235
2236 break;
2237 default:
2238 err = -EINVAL;
2239 goto open_failed;
2240 }
2241
2242 spin_lock_irqsave(&he_dev->global_lock, flags);
2243
2244 he_writel_tsr0(he_dev, tsr0, cid);
2245 he_writel_tsr4(he_dev, tsr4 | 1, cid);
2246 he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2247 TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2248 he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2249 he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2250
2251 he_writel_tsr3(he_dev, 0x0, cid);
2252 he_writel_tsr5(he_dev, 0x0, cid);
2253 he_writel_tsr6(he_dev, 0x0, cid);
2254 he_writel_tsr7(he_dev, 0x0, cid);
2255 he_writel_tsr8(he_dev, 0x0, cid);
2256 he_writel_tsr10(he_dev, 0x0, cid);
2257 he_writel_tsr11(he_dev, 0x0, cid);
2258 he_writel_tsr12(he_dev, 0x0, cid);
2259 he_writel_tsr13(he_dev, 0x0, cid);
2260 he_writel_tsr14(he_dev, 0x0, cid);
2261 (void) he_readl_tsr0(he_dev, cid);
2262 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2263 }
2264
2265 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2266 unsigned aal;
2267
2268 HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2269 &HE_VCC(vcc)->rx_waitq);
2270
2271 switch (vcc->qos.aal) {
2272 case ATM_AAL5:
2273 aal = RSR0_AAL5;
2274 break;
2275 case ATM_AAL0:
2276 aal = RSR0_RAWCELL;
2277 break;
2278 default:
2279 err = -EINVAL;
2280 goto open_failed;
2281 }
2282
2283 spin_lock_irqsave(&he_dev->global_lock, flags);
2284
2285 rsr0 = he_readl_rsr0(he_dev, cid);
2286 if (rsr0 & RSR0_OPEN_CONN) {
2287 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2288
2289 hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2290 err = -EBUSY;
2291 goto open_failed;
2292 }
2293
2294 rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY;
2295 rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY;
2296 rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2297 (RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2298
2299 #ifdef USE_CHECKSUM_HW
2300 if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2301 rsr0 |= RSR0_TCP_CKSUM;
2302 #endif
2303
2304 he_writel_rsr4(he_dev, rsr4, cid);
2305 he_writel_rsr1(he_dev, rsr1, cid);
2306
2307
2308 he_writel_rsr0(he_dev,
2309 rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2310 (void) he_readl_rsr0(he_dev, cid);
2311
2312 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2313 }
2314
2315 open_failed:
2316
2317 if (err) {
2318 kfree(he_vcc);
2319 clear_bit(ATM_VF_ADDR, &vcc->flags);
2320 }
2321 else
2322 set_bit(ATM_VF_READY, &vcc->flags);
2323
2324 return err;
2325 }
2326
2327 static void
2328 he_close(struct atm_vcc *vcc)
2329 {
2330 unsigned long flags;
2331 DECLARE_WAITQUEUE(wait, current);
2332 struct he_dev *he_dev = HE_DEV(vcc->dev);
2333 struct he_tpd *tpd;
2334 unsigned cid;
2335 struct he_vcc *he_vcc = HE_VCC(vcc);
2336 #define MAX_RETRY 30
2337 int retry = 0, sleep = 1, tx_inuse;
2338
2339 HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2340
2341 clear_bit(ATM_VF_READY, &vcc->flags);
2342 cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2343
2344 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2345 int timeout;
2346
2347 HPRINTK("close rx cid 0x%x\n", cid);
2348
2349
2350
2351
2352
2353 spin_lock_irqsave(&he_dev->global_lock, flags);
2354 while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2355 HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2356 udelay(250);
2357 }
2358
2359 set_current_state(TASK_UNINTERRUPTIBLE);
2360 add_wait_queue(&he_vcc->rx_waitq, &wait);
2361
2362 he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2363 (void) he_readl_rsr0(he_dev, cid);
2364 he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2365 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2366
2367 timeout = schedule_timeout(30*HZ);
2368
2369 remove_wait_queue(&he_vcc->rx_waitq, &wait);
2370 set_current_state(TASK_RUNNING);
2371
2372 if (timeout == 0)
2373 hprintk("close rx timeout cid 0x%x\n", cid);
2374
2375 HPRINTK("close rx cid 0x%x complete\n", cid);
2376
2377 }
2378
2379 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2380 volatile unsigned tsr4, tsr0;
2381 int timeout;
2382
2383 HPRINTK("close tx cid 0x%x\n", cid);
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394 while (((tx_inuse = refcount_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
2395 (retry < MAX_RETRY)) {
2396 msleep(sleep);
2397 if (sleep < 250)
2398 sleep = sleep * 2;
2399
2400 ++retry;
2401 }
2402
2403 if (tx_inuse > 1)
2404 hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2405
2406
2407
2408 spin_lock_irqsave(&he_dev->global_lock, flags);
2409 he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2410
2411
2412 switch (vcc->qos.txtp.traffic_class) {
2413 case ATM_UBR:
2414 he_writel_tsr1(he_dev,
2415 TSR1_MCR(rate_to_atmf(200000))
2416 | TSR1_PCR(0), cid);
2417 break;
2418 case ATM_CBR:
2419 he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2420 break;
2421 }
2422 (void) he_readl_tsr4(he_dev, cid);
2423
2424 tpd = __alloc_tpd(he_dev);
2425 if (tpd == NULL) {
2426 hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2427 goto close_tx_incomplete;
2428 }
2429 tpd->status |= TPD_EOS | TPD_INT;
2430 tpd->skb = NULL;
2431 tpd->vcc = vcc;
2432 wmb();
2433
2434 set_current_state(TASK_UNINTERRUPTIBLE);
2435 add_wait_queue(&he_vcc->tx_waitq, &wait);
2436 __enqueue_tpd(he_dev, tpd, cid);
2437 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2438
2439 timeout = schedule_timeout(30*HZ);
2440
2441 remove_wait_queue(&he_vcc->tx_waitq, &wait);
2442 set_current_state(TASK_RUNNING);
2443
2444 spin_lock_irqsave(&he_dev->global_lock, flags);
2445
2446 if (timeout == 0) {
2447 hprintk("close tx timeout cid 0x%x\n", cid);
2448 goto close_tx_incomplete;
2449 }
2450
2451 while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2452 HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2453 udelay(250);
2454 }
2455
2456 while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2457 HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2458 udelay(250);
2459 }
2460
2461 close_tx_incomplete:
2462
2463 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2464 int reg = he_vcc->rc_index;
2465
2466 HPRINTK("cs_stper reg = %d\n", reg);
2467
2468 if (he_dev->cs_stper[reg].inuse == 0)
2469 hprintk("cs_stper[%d].inuse = 0!\n", reg);
2470 else
2471 --he_dev->cs_stper[reg].inuse;
2472
2473 he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2474 }
2475 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2476
2477 HPRINTK("close tx cid 0x%x complete\n", cid);
2478 }
2479
2480 kfree(he_vcc);
2481
2482 clear_bit(ATM_VF_ADDR, &vcc->flags);
2483 }
2484
2485 static int
2486 he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2487 {
2488 unsigned long flags;
2489 struct he_dev *he_dev = HE_DEV(vcc->dev);
2490 unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2491 struct he_tpd *tpd;
2492 #ifdef USE_SCATTERGATHER
2493 int i, slot = 0;
2494 #endif
2495
2496 #define HE_TPD_BUFSIZE 0xffff
2497
2498 HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2499
2500 if ((skb->len > HE_TPD_BUFSIZE) ||
2501 ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2502 hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2503 if (vcc->pop)
2504 vcc->pop(vcc, skb);
2505 else
2506 dev_kfree_skb_any(skb);
2507 atomic_inc(&vcc->stats->tx_err);
2508 return -EINVAL;
2509 }
2510
2511 #ifndef USE_SCATTERGATHER
2512 if (skb_shinfo(skb)->nr_frags) {
2513 hprintk("no scatter/gather support\n");
2514 if (vcc->pop)
2515 vcc->pop(vcc, skb);
2516 else
2517 dev_kfree_skb_any(skb);
2518 atomic_inc(&vcc->stats->tx_err);
2519 return -EINVAL;
2520 }
2521 #endif
2522 spin_lock_irqsave(&he_dev->global_lock, flags);
2523
2524 tpd = __alloc_tpd(he_dev);
2525 if (tpd == NULL) {
2526 if (vcc->pop)
2527 vcc->pop(vcc, skb);
2528 else
2529 dev_kfree_skb_any(skb);
2530 atomic_inc(&vcc->stats->tx_err);
2531 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2532 return -ENOMEM;
2533 }
2534
2535 if (vcc->qos.aal == ATM_AAL5)
2536 tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2537 else {
2538 char *pti_clp = (void *) (skb->data + 3);
2539 int clp, pti;
2540
2541 pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2542 clp = (*pti_clp & ATM_HDR_CLP);
2543 tpd->status |= TPD_CELLTYPE(pti);
2544 if (clp)
2545 tpd->status |= TPD_CLP;
2546
2547 skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2548 }
2549
2550 #ifdef USE_SCATTERGATHER
2551 tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev, skb->data,
2552 skb_headlen(skb), DMA_TO_DEVICE);
2553 tpd->iovec[slot].len = skb_headlen(skb);
2554 ++slot;
2555
2556 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2557 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2558
2559 if (slot == TPD_MAXIOV) {
2560 tpd->vcc = vcc;
2561 tpd->skb = NULL;
2562
2563 wmb();
2564
2565 __enqueue_tpd(he_dev, tpd, cid);
2566 tpd = __alloc_tpd(he_dev);
2567 if (tpd == NULL) {
2568 if (vcc->pop)
2569 vcc->pop(vcc, skb);
2570 else
2571 dev_kfree_skb_any(skb);
2572 atomic_inc(&vcc->stats->tx_err);
2573 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2574 return -ENOMEM;
2575 }
2576 tpd->status |= TPD_USERCELL;
2577 slot = 0;
2578 }
2579
2580 tpd->iovec[slot].addr = skb_frag_dma_map(&he_dev->pci_dev->dev,
2581 frag, 0, skb_frag_size(frag), DMA_TO_DEVICE);
2582 tpd->iovec[slot].len = skb_frag_size(frag);
2583 ++slot;
2584
2585 }
2586
2587 tpd->iovec[slot - 1].len |= TPD_LST;
2588 #else
2589 tpd->address0 = dma_map_single(&he_dev->pci_dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
2590 tpd->length0 = skb->len | TPD_LST;
2591 #endif
2592 tpd->status |= TPD_INT;
2593
2594 tpd->vcc = vcc;
2595 tpd->skb = skb;
2596 wmb();
2597 ATM_SKB(skb)->vcc = vcc;
2598
2599 __enqueue_tpd(he_dev, tpd, cid);
2600 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2601
2602 atomic_inc(&vcc->stats->tx);
2603
2604 return 0;
2605 }
2606
2607 static int
2608 he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2609 {
2610 unsigned long flags;
2611 struct he_dev *he_dev = HE_DEV(atm_dev);
2612 struct he_ioctl_reg reg;
2613 int err = 0;
2614
2615 switch (cmd) {
2616 case HE_GET_REG:
2617 if (!capable(CAP_NET_ADMIN))
2618 return -EPERM;
2619
2620 if (copy_from_user(®, arg,
2621 sizeof(struct he_ioctl_reg)))
2622 return -EFAULT;
2623
2624 spin_lock_irqsave(&he_dev->global_lock, flags);
2625 switch (reg.type) {
2626 case HE_REGTYPE_PCI:
2627 if (reg.addr >= HE_REGMAP_SIZE) {
2628 err = -EINVAL;
2629 break;
2630 }
2631
2632 reg.val = he_readl(he_dev, reg.addr);
2633 break;
2634 case HE_REGTYPE_RCM:
2635 reg.val =
2636 he_readl_rcm(he_dev, reg.addr);
2637 break;
2638 case HE_REGTYPE_TCM:
2639 reg.val =
2640 he_readl_tcm(he_dev, reg.addr);
2641 break;
2642 case HE_REGTYPE_MBOX:
2643 reg.val =
2644 he_readl_mbox(he_dev, reg.addr);
2645 break;
2646 default:
2647 err = -EINVAL;
2648 break;
2649 }
2650 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2651 if (err == 0)
2652 if (copy_to_user(arg, ®,
2653 sizeof(struct he_ioctl_reg)))
2654 return -EFAULT;
2655 break;
2656 default:
2657 #ifdef CONFIG_ATM_HE_USE_SUNI
2658 if (atm_dev->phy && atm_dev->phy->ioctl)
2659 err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2660 #else
2661 err = -EINVAL;
2662 #endif
2663 break;
2664 }
2665
2666 return err;
2667 }
2668
2669 static void
2670 he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2671 {
2672 unsigned long flags;
2673 struct he_dev *he_dev = HE_DEV(atm_dev);
2674
2675 HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2676
2677 spin_lock_irqsave(&he_dev->global_lock, flags);
2678 he_writel(he_dev, val, FRAMER + (addr*4));
2679 (void) he_readl(he_dev, FRAMER + (addr*4));
2680 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2681 }
2682
2683
2684 static unsigned char
2685 he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2686 {
2687 unsigned long flags;
2688 struct he_dev *he_dev = HE_DEV(atm_dev);
2689 unsigned reg;
2690
2691 spin_lock_irqsave(&he_dev->global_lock, flags);
2692 reg = he_readl(he_dev, FRAMER + (addr*4));
2693 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2694
2695 HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2696 return reg;
2697 }
2698
2699 static int
2700 he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2701 {
2702 unsigned long flags;
2703 struct he_dev *he_dev = HE_DEV(dev);
2704 int left, i;
2705 #ifdef notdef
2706 struct he_rbrq *rbrq_tail;
2707 struct he_tpdrq *tpdrq_head;
2708 int rbpl_head, rbpl_tail;
2709 #endif
2710 static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2711
2712
2713 left = *pos;
2714 if (!left--)
2715 return sprintf(page, "ATM he driver\n");
2716
2717 if (!left--)
2718 return sprintf(page, "%s%s\n\n",
2719 he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2720
2721 if (!left--)
2722 return sprintf(page, "Mismatched Cells VPI/VCI Not Open Dropped Cells RCM Dropped Cells\n");
2723
2724 spin_lock_irqsave(&he_dev->global_lock, flags);
2725 mcc += he_readl(he_dev, MCC);
2726 oec += he_readl(he_dev, OEC);
2727 dcc += he_readl(he_dev, DCC);
2728 cec += he_readl(he_dev, CEC);
2729 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2730
2731 if (!left--)
2732 return sprintf(page, "%16ld %16ld %13ld %17ld\n\n",
2733 mcc, oec, dcc, cec);
2734
2735 if (!left--)
2736 return sprintf(page, "irq_size = %d inuse = ? peak = %d\n",
2737 CONFIG_IRQ_SIZE, he_dev->irq_peak);
2738
2739 if (!left--)
2740 return sprintf(page, "tpdrq_size = %d inuse = ?\n",
2741 CONFIG_TPDRQ_SIZE);
2742
2743 if (!left--)
2744 return sprintf(page, "rbrq_size = %d inuse = ? peak = %d\n",
2745 CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2746
2747 if (!left--)
2748 return sprintf(page, "tbrq_size = %d peak = %d\n",
2749 CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2750
2751
2752 #ifdef notdef
2753 rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2754 rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2755
2756 inuse = rbpl_head - rbpl_tail;
2757 if (inuse < 0)
2758 inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2759 inuse /= sizeof(struct he_rbp);
2760
2761 if (!left--)
2762 return sprintf(page, "rbpl_size = %d inuse = %d\n\n",
2763 CONFIG_RBPL_SIZE, inuse);
2764 #endif
2765
2766 if (!left--)
2767 return sprintf(page, "rate controller periods (cbr)\n pcr #vc\n");
2768
2769 for (i = 0; i < HE_NUM_CS_STPER; ++i)
2770 if (!left--)
2771 return sprintf(page, "cs_stper%-2d %8ld %3d\n", i,
2772 he_dev->cs_stper[i].pcr,
2773 he_dev->cs_stper[i].inuse);
2774
2775 if (!left--)
2776 return sprintf(page, "total bw (cbr): %d (limit %d)\n",
2777 he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2778
2779 return 0;
2780 }
2781
2782
2783
2784 static u8 read_prom_byte(struct he_dev *he_dev, int addr)
2785 {
2786 u32 val = 0, tmp_read = 0;
2787 int i, j = 0;
2788 u8 byte_read = 0;
2789
2790 val = readl(he_dev->membase + HOST_CNTL);
2791 val &= 0xFFFFE0FF;
2792
2793
2794 val |= 0x800;
2795 he_writel(he_dev, val, HOST_CNTL);
2796
2797
2798 for (i = 0; i < ARRAY_SIZE(readtab); i++) {
2799 he_writel(he_dev, val | readtab[i], HOST_CNTL);
2800 udelay(EEPROM_DELAY);
2801 }
2802
2803
2804 for (i = 7; i >= 0; i--) {
2805 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2806 udelay(EEPROM_DELAY);
2807 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2808 udelay(EEPROM_DELAY);
2809 }
2810
2811 j = 0;
2812
2813 val &= 0xFFFFF7FF;
2814 he_writel(he_dev, val, HOST_CNTL);
2815
2816
2817 for (i = 7; i >= 0; i--) {
2818 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2819 udelay(EEPROM_DELAY);
2820 tmp_read = he_readl(he_dev, HOST_CNTL);
2821 byte_read |= (unsigned char)
2822 ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
2823 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2824 udelay(EEPROM_DELAY);
2825 }
2826
2827 he_writel(he_dev, val | ID_CS, HOST_CNTL);
2828 udelay(EEPROM_DELAY);
2829
2830 return byte_read;
2831 }
2832
2833 MODULE_LICENSE("GPL");
2834 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
2835 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
2836 module_param(disable64, bool, 0);
2837 MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
2838 module_param(nvpibits, short, 0);
2839 MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
2840 module_param(nvcibits, short, 0);
2841 MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
2842 module_param(rx_skb_reserve, short, 0);
2843 MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
2844 module_param(irq_coalesce, bool, 0);
2845 MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
2846 module_param(sdh, bool, 0);
2847 MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
2848
2849 static const struct pci_device_id he_pci_tbl[] = {
2850 { PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 },
2851 { 0, }
2852 };
2853
2854 MODULE_DEVICE_TABLE(pci, he_pci_tbl);
2855
2856 static struct pci_driver he_driver = {
2857 .name = "he",
2858 .probe = he_init_one,
2859 .remove = he_remove_one,
2860 .id_table = he_pci_tbl,
2861 };
2862
2863 module_pci_driver(he_driver);