0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0014
0015 #include <linux/module.h>
0016 #include <linux/kernel.h>
0017 #include <linux/slab.h>
0018 #include <linux/sched.h>
0019 #include <linux/types.h>
0020 #include <linux/fcntl.h>
0021 #include <linux/string.h>
0022 #include <linux/errno.h>
0023 #include <linux/init.h>
0024 #include <linux/interrupt.h>
0025 #include <linux/ioport.h>
0026 #include <linux/netdevice.h>
0027 #include <linux/hdlc.h>
0028 #include <linux/pci.h>
0029 #include <linux/dma-mapping.h>
0030 #include <linux/delay.h>
0031 #include <asm/io.h>
0032
0033 #include "wanxl.h"
0034
0035 static const char *version = "wanXL serial card driver version: 0.48";
0036
0037 #define PLX_CTL_RESET 0x40000000
0038
0039 #undef DEBUG_PKT
0040 #undef DEBUG_PCI
0041
0042
0043 #define MBX1_CMD_ABORTJ 0x85000000
0044 #ifdef __LITTLE_ENDIAN
0045 #define MBX1_CMD_BSWAP 0x8C000001
0046 #else
0047 #define MBX1_CMD_BSWAP 0x8C000000
0048 #endif
0049
0050
0051 #define MBX2_MEMSZ_MASK 0xFFFF0000
0052
0053 struct port {
0054 struct net_device *dev;
0055 struct card *card;
0056 spinlock_t lock;
0057 int node;
0058 unsigned int clock_type;
0059 int tx_in, tx_out;
0060 struct sk_buff *tx_skbs[TX_BUFFERS];
0061 };
0062
0063 struct card_status {
0064 desc_t rx_descs[RX_QUEUE_LENGTH];
0065 port_status_t port_status[4];
0066 };
0067
0068 struct card {
0069 int n_ports;
0070 u8 irq;
0071
0072 u8 __iomem *plx;
0073 struct pci_dev *pdev;
0074 int rx_in;
0075 struct sk_buff *rx_skbs[RX_QUEUE_LENGTH];
0076 struct card_status *status;
0077 dma_addr_t status_address;
0078 struct port ports[];
0079 };
0080
0081 static inline struct port *dev_to_port(struct net_device *dev)
0082 {
0083 return (struct port *)dev_to_hdlc(dev)->priv;
0084 }
0085
0086 static inline port_status_t *get_status(struct port *port)
0087 {
0088 return &port->card->status->port_status[port->node];
0089 }
0090
0091 #ifdef DEBUG_PCI
0092 static inline dma_addr_t pci_map_single_debug(struct pci_dev *pdev, void *ptr,
0093 size_t size, int direction)
0094 {
0095 dma_addr_t addr = dma_map_single(&pdev->dev, ptr, size, direction);
0096
0097 if (addr + size > 0x100000000LL)
0098 pr_crit("%s: pci_map_single() returned memory at 0x%llx!\n",
0099 pci_name(pdev), (unsigned long long)addr);
0100 return addr;
0101 }
0102
0103 #undef pci_map_single
0104 #define pci_map_single pci_map_single_debug
0105 #endif
0106
0107
0108 static inline void wanxl_cable_intr(struct port *port)
0109 {
0110 u32 value = get_status(port)->cable;
0111 int valid = 1;
0112 const char *cable, *pm, *dte = "", *dsr = "", *dcd = "";
0113
0114 switch (value & 0x7) {
0115 case STATUS_CABLE_V35:
0116 cable = "V.35";
0117 break;
0118 case STATUS_CABLE_X21:
0119 cable = "X.21";
0120 break;
0121 case STATUS_CABLE_V24:
0122 cable = "V.24";
0123 break;
0124 case STATUS_CABLE_EIA530:
0125 cable = "EIA530";
0126 break;
0127 case STATUS_CABLE_NONE:
0128 cable = "no";
0129 break;
0130 default:
0131 cable = "invalid";
0132 }
0133
0134 switch ((value >> STATUS_CABLE_PM_SHIFT) & 0x7) {
0135 case STATUS_CABLE_V35:
0136 pm = "V.35";
0137 break;
0138 case STATUS_CABLE_X21:
0139 pm = "X.21";
0140 break;
0141 case STATUS_CABLE_V24:
0142 pm = "V.24";
0143 break;
0144 case STATUS_CABLE_EIA530:
0145 pm = "EIA530";
0146 break;
0147 case STATUS_CABLE_NONE:
0148 pm = "no personality";
0149 valid = 0;
0150 break;
0151 default:
0152 pm = "invalid personality";
0153 valid = 0;
0154 }
0155
0156 if (valid) {
0157 if ((value & 7) == ((value >> STATUS_CABLE_PM_SHIFT) & 7)) {
0158 dsr = (value & STATUS_CABLE_DSR) ? ", DSR ON" :
0159 ", DSR off";
0160 dcd = (value & STATUS_CABLE_DCD) ? ", carrier ON" :
0161 ", carrier off";
0162 }
0163 dte = (value & STATUS_CABLE_DCE) ? " DCE" : " DTE";
0164 }
0165 netdev_info(port->dev, "%s%s module, %s cable%s%s\n",
0166 pm, dte, cable, dsr, dcd);
0167
0168 if (value & STATUS_CABLE_DCD)
0169 netif_carrier_on(port->dev);
0170 else
0171 netif_carrier_off(port->dev);
0172 }
0173
0174
0175 static inline void wanxl_tx_intr(struct port *port)
0176 {
0177 struct net_device *dev = port->dev;
0178
0179 while (1) {
0180 desc_t *desc = &get_status(port)->tx_descs[port->tx_in];
0181 struct sk_buff *skb = port->tx_skbs[port->tx_in];
0182
0183 switch (desc->stat) {
0184 case PACKET_FULL:
0185 case PACKET_EMPTY:
0186 netif_wake_queue(dev);
0187 return;
0188
0189 case PACKET_UNDERRUN:
0190 dev->stats.tx_errors++;
0191 dev->stats.tx_fifo_errors++;
0192 break;
0193
0194 default:
0195 dev->stats.tx_packets++;
0196 dev->stats.tx_bytes += skb->len;
0197 }
0198 desc->stat = PACKET_EMPTY;
0199 dma_unmap_single(&port->card->pdev->dev, desc->address,
0200 skb->len, DMA_TO_DEVICE);
0201 dev_consume_skb_irq(skb);
0202 port->tx_in = (port->tx_in + 1) % TX_BUFFERS;
0203 }
0204 }
0205
0206
0207 static inline void wanxl_rx_intr(struct card *card)
0208 {
0209 desc_t *desc;
0210
0211 while (desc = &card->status->rx_descs[card->rx_in],
0212 desc->stat != PACKET_EMPTY) {
0213 if ((desc->stat & PACKET_PORT_MASK) > card->n_ports) {
0214 pr_crit("%s: received packet for nonexistent port\n",
0215 pci_name(card->pdev));
0216 } else {
0217 struct sk_buff *skb = card->rx_skbs[card->rx_in];
0218 struct port *port = &card->ports[desc->stat &
0219 PACKET_PORT_MASK];
0220 struct net_device *dev = port->dev;
0221
0222 if (!skb) {
0223 dev->stats.rx_dropped++;
0224 } else {
0225 dma_unmap_single(&card->pdev->dev,
0226 desc->address, BUFFER_LENGTH,
0227 DMA_FROM_DEVICE);
0228 skb_put(skb, desc->length);
0229
0230 #ifdef DEBUG_PKT
0231 printk(KERN_DEBUG "%s RX(%i):", dev->name,
0232 skb->len);
0233 debug_frame(skb);
0234 #endif
0235 dev->stats.rx_packets++;
0236 dev->stats.rx_bytes += skb->len;
0237 skb->protocol = hdlc_type_trans(skb, dev);
0238 netif_rx(skb);
0239 skb = NULL;
0240 }
0241
0242 if (!skb) {
0243 skb = dev_alloc_skb(BUFFER_LENGTH);
0244 desc->address = skb ?
0245 dma_map_single(&card->pdev->dev,
0246 skb->data,
0247 BUFFER_LENGTH,
0248 DMA_FROM_DEVICE) : 0;
0249 card->rx_skbs[card->rx_in] = skb;
0250 }
0251 }
0252 desc->stat = PACKET_EMPTY;
0253 card->rx_in = (card->rx_in + 1) % RX_QUEUE_LENGTH;
0254 }
0255 }
0256
0257 static irqreturn_t wanxl_intr(int irq, void *dev_id)
0258 {
0259 struct card *card = dev_id;
0260 int i;
0261 u32 stat;
0262 int handled = 0;
0263
0264 while ((stat = readl(card->plx + PLX_DOORBELL_FROM_CARD)) != 0) {
0265 handled = 1;
0266 writel(stat, card->plx + PLX_DOORBELL_FROM_CARD);
0267
0268 for (i = 0; i < card->n_ports; i++) {
0269 if (stat & (1 << (DOORBELL_FROM_CARD_TX_0 + i)))
0270 wanxl_tx_intr(&card->ports[i]);
0271 if (stat & (1 << (DOORBELL_FROM_CARD_CABLE_0 + i)))
0272 wanxl_cable_intr(&card->ports[i]);
0273 }
0274 if (stat & (1 << DOORBELL_FROM_CARD_RX))
0275 wanxl_rx_intr(card);
0276 }
0277
0278 return IRQ_RETVAL(handled);
0279 }
0280
0281 static netdev_tx_t wanxl_xmit(struct sk_buff *skb, struct net_device *dev)
0282 {
0283 struct port *port = dev_to_port(dev);
0284 desc_t *desc;
0285
0286 spin_lock(&port->lock);
0287
0288 desc = &get_status(port)->tx_descs[port->tx_out];
0289 if (desc->stat != PACKET_EMPTY) {
0290
0291 #ifdef DEBUG_PKT
0292 printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
0293 #endif
0294 netif_stop_queue(dev);
0295 spin_unlock(&port->lock);
0296 return NETDEV_TX_BUSY;
0297 }
0298
0299 #ifdef DEBUG_PKT
0300 printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len);
0301 debug_frame(skb);
0302 #endif
0303
0304 port->tx_skbs[port->tx_out] = skb;
0305 desc->address = dma_map_single(&port->card->pdev->dev, skb->data,
0306 skb->len, DMA_TO_DEVICE);
0307 desc->length = skb->len;
0308 desc->stat = PACKET_FULL;
0309 writel(1 << (DOORBELL_TO_CARD_TX_0 + port->node),
0310 port->card->plx + PLX_DOORBELL_TO_CARD);
0311
0312 port->tx_out = (port->tx_out + 1) % TX_BUFFERS;
0313
0314 if (get_status(port)->tx_descs[port->tx_out].stat != PACKET_EMPTY) {
0315 netif_stop_queue(dev);
0316 #ifdef DEBUG_PKT
0317 printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
0318 #endif
0319 }
0320
0321 spin_unlock(&port->lock);
0322 return NETDEV_TX_OK;
0323 }
0324
0325 static int wanxl_attach(struct net_device *dev, unsigned short encoding,
0326 unsigned short parity)
0327 {
0328 struct port *port = dev_to_port(dev);
0329
0330 if (encoding != ENCODING_NRZ &&
0331 encoding != ENCODING_NRZI)
0332 return -EINVAL;
0333
0334 if (parity != PARITY_NONE &&
0335 parity != PARITY_CRC32_PR1_CCITT &&
0336 parity != PARITY_CRC16_PR1_CCITT &&
0337 parity != PARITY_CRC32_PR0_CCITT &&
0338 parity != PARITY_CRC16_PR0_CCITT)
0339 return -EINVAL;
0340
0341 get_status(port)->encoding = encoding;
0342 get_status(port)->parity = parity;
0343 return 0;
0344 }
0345
0346 static int wanxl_ioctl(struct net_device *dev, struct if_settings *ifs)
0347 {
0348 const size_t size = sizeof(sync_serial_settings);
0349 sync_serial_settings line;
0350 struct port *port = dev_to_port(dev);
0351
0352 switch (ifs->type) {
0353 case IF_GET_IFACE:
0354 ifs->type = IF_IFACE_SYNC_SERIAL;
0355 if (ifs->size < size) {
0356 ifs->size = size;
0357 return -ENOBUFS;
0358 }
0359 memset(&line, 0, sizeof(line));
0360 line.clock_type = get_status(port)->clocking;
0361 line.clock_rate = 0;
0362 line.loopback = 0;
0363
0364 if (copy_to_user(ifs->ifs_ifsu.sync, &line, size))
0365 return -EFAULT;
0366 return 0;
0367
0368 case IF_IFACE_SYNC_SERIAL:
0369 if (!capable(CAP_NET_ADMIN))
0370 return -EPERM;
0371 if (dev->flags & IFF_UP)
0372 return -EBUSY;
0373
0374 if (copy_from_user(&line, ifs->ifs_ifsu.sync,
0375 size))
0376 return -EFAULT;
0377
0378 if (line.clock_type != CLOCK_EXT &&
0379 line.clock_type != CLOCK_TXFROMRX)
0380 return -EINVAL;
0381
0382 if (line.loopback != 0)
0383 return -EINVAL;
0384
0385 get_status(port)->clocking = line.clock_type;
0386 return 0;
0387
0388 default:
0389 return hdlc_ioctl(dev, ifs);
0390 }
0391 }
0392
0393 static int wanxl_open(struct net_device *dev)
0394 {
0395 struct port *port = dev_to_port(dev);
0396 u8 __iomem *dbr = port->card->plx + PLX_DOORBELL_TO_CARD;
0397 unsigned long timeout;
0398 int i;
0399
0400 if (get_status(port)->open) {
0401 netdev_err(dev, "port already open\n");
0402 return -EIO;
0403 }
0404
0405 i = hdlc_open(dev);
0406 if (i)
0407 return i;
0408
0409 port->tx_in = port->tx_out = 0;
0410 for (i = 0; i < TX_BUFFERS; i++)
0411 get_status(port)->tx_descs[i].stat = PACKET_EMPTY;
0412
0413 writel(1 << (DOORBELL_TO_CARD_OPEN_0 + port->node), dbr);
0414
0415 timeout = jiffies + HZ;
0416 do {
0417 if (get_status(port)->open) {
0418 netif_start_queue(dev);
0419 return 0;
0420 }
0421 } while (time_after(timeout, jiffies));
0422
0423 netdev_err(dev, "unable to open port\n");
0424
0425 writel(1 << (DOORBELL_TO_CARD_CLOSE_0 + port->node), dbr);
0426 return -EFAULT;
0427 }
0428
0429 static int wanxl_close(struct net_device *dev)
0430 {
0431 struct port *port = dev_to_port(dev);
0432 unsigned long timeout;
0433 int i;
0434
0435 hdlc_close(dev);
0436
0437 writel(1 << (DOORBELL_TO_CARD_CLOSE_0 + port->node),
0438 port->card->plx + PLX_DOORBELL_TO_CARD);
0439
0440 timeout = jiffies + HZ;
0441 do {
0442 if (!get_status(port)->open)
0443 break;
0444 } while (time_after(timeout, jiffies));
0445
0446 if (get_status(port)->open)
0447 netdev_err(dev, "unable to close port\n");
0448
0449 netif_stop_queue(dev);
0450
0451 for (i = 0; i < TX_BUFFERS; i++) {
0452 desc_t *desc = &get_status(port)->tx_descs[i];
0453
0454 if (desc->stat != PACKET_EMPTY) {
0455 desc->stat = PACKET_EMPTY;
0456 dma_unmap_single(&port->card->pdev->dev,
0457 desc->address, port->tx_skbs[i]->len,
0458 DMA_TO_DEVICE);
0459 dev_kfree_skb(port->tx_skbs[i]);
0460 }
0461 }
0462 return 0;
0463 }
0464
0465 static struct net_device_stats *wanxl_get_stats(struct net_device *dev)
0466 {
0467 struct port *port = dev_to_port(dev);
0468
0469 dev->stats.rx_over_errors = get_status(port)->rx_overruns;
0470 dev->stats.rx_frame_errors = get_status(port)->rx_frame_errors;
0471 dev->stats.rx_errors = dev->stats.rx_over_errors +
0472 dev->stats.rx_frame_errors;
0473 return &dev->stats;
0474 }
0475
0476 static int wanxl_puts_command(struct card *card, u32 cmd)
0477 {
0478 unsigned long timeout = jiffies + 5 * HZ;
0479
0480 writel(cmd, card->plx + PLX_MAILBOX_1);
0481 do {
0482 if (readl(card->plx + PLX_MAILBOX_1) == 0)
0483 return 0;
0484
0485 schedule();
0486 } while (time_after(timeout, jiffies));
0487
0488 return -1;
0489 }
0490
0491 static void wanxl_reset(struct card *card)
0492 {
0493 u32 old_value = readl(card->plx + PLX_CONTROL) & ~PLX_CTL_RESET;
0494
0495 writel(0x80, card->plx + PLX_MAILBOX_0);
0496 writel(old_value | PLX_CTL_RESET, card->plx + PLX_CONTROL);
0497 readl(card->plx + PLX_CONTROL);
0498 udelay(1);
0499 writel(old_value, card->plx + PLX_CONTROL);
0500 readl(card->plx + PLX_CONTROL);
0501 }
0502
0503 static void wanxl_pci_remove_one(struct pci_dev *pdev)
0504 {
0505 struct card *card = pci_get_drvdata(pdev);
0506 int i;
0507
0508 for (i = 0; i < card->n_ports; i++) {
0509 unregister_hdlc_device(card->ports[i].dev);
0510 free_netdev(card->ports[i].dev);
0511 }
0512
0513
0514 if (card->irq)
0515 free_irq(card->irq, card);
0516
0517 wanxl_reset(card);
0518
0519 for (i = 0; i < RX_QUEUE_LENGTH; i++)
0520 if (card->rx_skbs[i]) {
0521 dma_unmap_single(&card->pdev->dev,
0522 card->status->rx_descs[i].address,
0523 BUFFER_LENGTH, DMA_FROM_DEVICE);
0524 dev_kfree_skb(card->rx_skbs[i]);
0525 }
0526
0527 if (card->plx)
0528 iounmap(card->plx);
0529
0530 if (card->status)
0531 dma_free_coherent(&pdev->dev, sizeof(struct card_status),
0532 card->status, card->status_address);
0533
0534 pci_release_regions(pdev);
0535 pci_disable_device(pdev);
0536 kfree(card);
0537 }
0538
0539 #include "wanxlfw.inc"
0540
0541 static const struct net_device_ops wanxl_ops = {
0542 .ndo_open = wanxl_open,
0543 .ndo_stop = wanxl_close,
0544 .ndo_start_xmit = hdlc_start_xmit,
0545 .ndo_siocwandev = wanxl_ioctl,
0546 .ndo_get_stats = wanxl_get_stats,
0547 };
0548
0549 static int wanxl_pci_init_one(struct pci_dev *pdev,
0550 const struct pci_device_id *ent)
0551 {
0552 struct card *card;
0553 u32 ramsize, stat;
0554 unsigned long timeout;
0555 u32 plx_phy;
0556 u32 mem_phy;
0557 u8 __iomem *mem;
0558 int i, ports;
0559
0560 #ifndef MODULE
0561 pr_info_once("%s\n", version);
0562 #endif
0563
0564 i = pci_enable_device(pdev);
0565 if (i)
0566 return i;
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(28)) ||
0578 dma_set_mask(&pdev->dev, DMA_BIT_MASK(28))) {
0579 pr_err("No usable DMA configuration\n");
0580 pci_disable_device(pdev);
0581 return -EIO;
0582 }
0583
0584 i = pci_request_regions(pdev, "wanXL");
0585 if (i) {
0586 pci_disable_device(pdev);
0587 return i;
0588 }
0589
0590 switch (pdev->device) {
0591 case PCI_DEVICE_ID_SBE_WANXL100:
0592 ports = 1;
0593 break;
0594 case PCI_DEVICE_ID_SBE_WANXL200:
0595 ports = 2;
0596 break;
0597 default:
0598 ports = 4;
0599 }
0600
0601 card = kzalloc(struct_size(card, ports, ports), GFP_KERNEL);
0602 if (!card) {
0603 pci_release_regions(pdev);
0604 pci_disable_device(pdev);
0605 return -ENOBUFS;
0606 }
0607
0608 pci_set_drvdata(pdev, card);
0609 card->pdev = pdev;
0610
0611 card->status = dma_alloc_coherent(&pdev->dev,
0612 sizeof(struct card_status),
0613 &card->status_address, GFP_KERNEL);
0614 if (!card->status) {
0615 wanxl_pci_remove_one(pdev);
0616 return -ENOBUFS;
0617 }
0618
0619 #ifdef DEBUG_PCI
0620 printk(KERN_DEBUG "wanXL %s: pci_alloc_consistent() returned memory"
0621 " at 0x%LX\n", pci_name(pdev),
0622 (unsigned long long)card->status_address);
0623 #endif
0624
0625
0626
0627
0628
0629 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)) ||
0630 dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
0631 pr_err("No usable DMA configuration\n");
0632 wanxl_pci_remove_one(pdev);
0633 return -EIO;
0634 }
0635
0636
0637 plx_phy = pci_resource_start(pdev, 0);
0638
0639 card->plx = ioremap(plx_phy, 0x70);
0640 if (!card->plx) {
0641 pr_err("ioremap() failed\n");
0642 wanxl_pci_remove_one(pdev);
0643 return -EFAULT;
0644 }
0645
0646 #if RESET_WHILE_LOADING
0647 wanxl_reset(card);
0648 #endif
0649
0650 timeout = jiffies + 20 * HZ;
0651 while ((stat = readl(card->plx + PLX_MAILBOX_0)) != 0) {
0652 if (time_before(timeout, jiffies)) {
0653 pr_warn("%s: timeout waiting for PUTS to complete\n",
0654 pci_name(pdev));
0655 wanxl_pci_remove_one(pdev);
0656 return -ENODEV;
0657 }
0658
0659 switch (stat & 0xC0) {
0660 case 0x00:
0661 case 0x80:
0662 break;
0663
0664 default:
0665 pr_warn("%s: PUTS test 0x%X failed\n",
0666 pci_name(pdev), stat & 0x30);
0667 wanxl_pci_remove_one(pdev);
0668 return -ENODEV;
0669 }
0670
0671 schedule();
0672 }
0673
0674
0675 ramsize = readl(card->plx + PLX_MAILBOX_2) & MBX2_MEMSZ_MASK;
0676
0677
0678 mem_phy = pci_resource_start(pdev, 2);
0679
0680
0681 if (ramsize < BUFFERS_ADDR +
0682 (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports) {
0683 pr_warn("%s: no enough on-board RAM (%u bytes detected, %u bytes required)\n",
0684 pci_name(pdev), ramsize,
0685 BUFFERS_ADDR +
0686 (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports);
0687 wanxl_pci_remove_one(pdev);
0688 return -ENODEV;
0689 }
0690
0691 if (wanxl_puts_command(card, MBX1_CMD_BSWAP)) {
0692 pr_warn("%s: unable to Set Byte Swap Mode\n", pci_name(pdev));
0693 wanxl_pci_remove_one(pdev);
0694 return -ENODEV;
0695 }
0696
0697 for (i = 0; i < RX_QUEUE_LENGTH; i++) {
0698 struct sk_buff *skb = dev_alloc_skb(BUFFER_LENGTH);
0699
0700 card->rx_skbs[i] = skb;
0701 if (skb)
0702 card->status->rx_descs[i].address =
0703 dma_map_single(&card->pdev->dev, skb->data,
0704 BUFFER_LENGTH, DMA_FROM_DEVICE);
0705 }
0706
0707 mem = ioremap(mem_phy, PDM_OFFSET + sizeof(firmware));
0708 if (!mem) {
0709 pr_err("ioremap() failed\n");
0710 wanxl_pci_remove_one(pdev);
0711 return -EFAULT;
0712 }
0713
0714 for (i = 0; i < sizeof(firmware); i += 4)
0715 writel(ntohl(*(__be32 *)(firmware + i)), mem + PDM_OFFSET + i);
0716
0717 for (i = 0; i < ports; i++)
0718 writel(card->status_address +
0719 (void *)&card->status->port_status[i] -
0720 (void *)card->status, mem + PDM_OFFSET + 4 + i * 4);
0721 writel(card->status_address, mem + PDM_OFFSET + 20);
0722 writel(PDM_OFFSET, mem);
0723 iounmap(mem);
0724
0725 writel(0, card->plx + PLX_MAILBOX_5);
0726
0727 if (wanxl_puts_command(card, MBX1_CMD_ABORTJ)) {
0728 pr_warn("%s: unable to Abort and Jump\n", pci_name(pdev));
0729 wanxl_pci_remove_one(pdev);
0730 return -ENODEV;
0731 }
0732
0733 timeout = jiffies + 5 * HZ;
0734 do {
0735 stat = readl(card->plx + PLX_MAILBOX_5);
0736 if (stat)
0737 break;
0738 schedule();
0739 } while (time_after(timeout, jiffies));
0740
0741 if (!stat) {
0742 pr_warn("%s: timeout while initializing card firmware\n",
0743 pci_name(pdev));
0744 wanxl_pci_remove_one(pdev);
0745 return -ENODEV;
0746 }
0747
0748 #if DETECT_RAM
0749 ramsize = stat;
0750 #endif
0751
0752 pr_info("%s: at 0x%X, %u KB of RAM at 0x%X, irq %u\n",
0753 pci_name(pdev), plx_phy, ramsize / 1024, mem_phy, pdev->irq);
0754
0755
0756 if (request_irq(pdev->irq, wanxl_intr, IRQF_SHARED, "wanXL", card)) {
0757 pr_warn("%s: could not allocate IRQ%i\n",
0758 pci_name(pdev), pdev->irq);
0759 wanxl_pci_remove_one(pdev);
0760 return -EBUSY;
0761 }
0762 card->irq = pdev->irq;
0763
0764 for (i = 0; i < ports; i++) {
0765 hdlc_device *hdlc;
0766 struct port *port = &card->ports[i];
0767 struct net_device *dev = alloc_hdlcdev(port);
0768
0769 if (!dev) {
0770 pr_err("%s: unable to allocate memory\n",
0771 pci_name(pdev));
0772 wanxl_pci_remove_one(pdev);
0773 return -ENOMEM;
0774 }
0775
0776 port->dev = dev;
0777 hdlc = dev_to_hdlc(dev);
0778 spin_lock_init(&port->lock);
0779 dev->tx_queue_len = 50;
0780 dev->netdev_ops = &wanxl_ops;
0781 hdlc->attach = wanxl_attach;
0782 hdlc->xmit = wanxl_xmit;
0783 port->card = card;
0784 port->node = i;
0785 get_status(port)->clocking = CLOCK_EXT;
0786 if (register_hdlc_device(dev)) {
0787 pr_err("%s: unable to register hdlc device\n",
0788 pci_name(pdev));
0789 free_netdev(dev);
0790 wanxl_pci_remove_one(pdev);
0791 return -ENOBUFS;
0792 }
0793 card->n_ports++;
0794 }
0795
0796 pr_info("%s: port", pci_name(pdev));
0797 for (i = 0; i < ports; i++)
0798 pr_cont("%s #%i: %s",
0799 i ? "," : "", i, card->ports[i].dev->name);
0800 pr_cont("\n");
0801
0802 for (i = 0; i < ports; i++)
0803 wanxl_cable_intr(&card->ports[i]);
0804
0805 return 0;
0806 }
0807
0808 static const struct pci_device_id wanxl_pci_tbl[] = {
0809 { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL100, PCI_ANY_ID,
0810 PCI_ANY_ID, 0, 0, 0 },
0811 { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL200, PCI_ANY_ID,
0812 PCI_ANY_ID, 0, 0, 0 },
0813 { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL400, PCI_ANY_ID,
0814 PCI_ANY_ID, 0, 0, 0 },
0815 { 0, }
0816 };
0817
0818 static struct pci_driver wanxl_pci_driver = {
0819 .name = "wanXL",
0820 .id_table = wanxl_pci_tbl,
0821 .probe = wanxl_pci_init_one,
0822 .remove = wanxl_pci_remove_one,
0823 };
0824
0825 static int __init wanxl_init_module(void)
0826 {
0827 #ifdef MODULE
0828 pr_info("%s\n", version);
0829 #endif
0830 return pci_register_driver(&wanxl_pci_driver);
0831 }
0832
0833 static void __exit wanxl_cleanup_module(void)
0834 {
0835 pci_unregister_driver(&wanxl_pci_driver);
0836 }
0837
0838 MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
0839 MODULE_DESCRIPTION("SBE Inc. wanXL serial port driver");
0840 MODULE_LICENSE("GPL v2");
0841 MODULE_DEVICE_TABLE(pci, wanxl_pci_tbl);
0842
0843 module_init(wanxl_init_module);
0844 module_exit(wanxl_cleanup_module);