0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053 static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n";
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087 #include <linux/compat.h>
0088 #include <linux/module.h>
0089 #include <linux/kernel.h>
0090 #include <linux/types.h>
0091 #include <linux/fcntl.h>
0092 #include <linux/interrupt.h>
0093 #include <linux/string.h>
0094 #include <linux/slab.h>
0095 #include <linux/if_ether.h>
0096 #include <linux/in.h>
0097 #include <linux/errno.h>
0098 #include <linux/delay.h>
0099 #include <linux/init.h>
0100 #include <linux/netdevice.h>
0101 #include <linux/etherdevice.h>
0102 #include <linux/inetdevice.h>
0103 #include <linux/skbuff.h>
0104 #include <linux/if_plip.h>
0105 #include <linux/workqueue.h>
0106 #include <linux/spinlock.h>
0107 #include <linux/completion.h>
0108 #include <linux/parport.h>
0109 #include <linux/bitops.h>
0110
0111 #include <net/neighbour.h>
0112
0113 #include <asm/irq.h>
0114 #include <asm/byteorder.h>
0115
0116
0117 #define PLIP_MAX 8
0118
0119
0120 #ifndef NET_DEBUG
0121 #define NET_DEBUG 1
0122 #endif
0123 static const unsigned int net_debug = NET_DEBUG;
0124
0125 #define ENABLE(irq) if (irq != -1) enable_irq(irq)
0126 #define DISABLE(irq) if (irq != -1) disable_irq(irq)
0127
0128
0129 #define PLIP_DELAY_UNIT 1
0130
0131
0132 #define PLIP_TRIGGER_WAIT 500
0133
0134
0135 #define PLIP_NIBBLE_WAIT 3000
0136
0137
0138 static void plip_kick_bh(struct work_struct *work);
0139 static void plip_bh(struct work_struct *work);
0140 static void plip_timer_bh(struct work_struct *work);
0141
0142
0143 static void plip_interrupt(void *dev_id);
0144
0145
0146 static netdev_tx_t plip_tx_packet(struct sk_buff *skb, struct net_device *dev);
0147 static int plip_hard_header(struct sk_buff *skb, struct net_device *dev,
0148 unsigned short type, const void *daddr,
0149 const void *saddr, unsigned len);
0150 static int plip_hard_header_cache(const struct neighbour *neigh,
0151 struct hh_cache *hh, __be16 type);
0152 static int plip_open(struct net_device *dev);
0153 static int plip_close(struct net_device *dev);
0154 static int plip_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
0155 void __user *data, int cmd);
0156 static int plip_preempt(void *handle);
0157 static void plip_wakeup(void *handle);
0158
0159 enum plip_connection_state {
0160 PLIP_CN_NONE=0,
0161 PLIP_CN_RECEIVE,
0162 PLIP_CN_SEND,
0163 PLIP_CN_CLOSING,
0164 PLIP_CN_ERROR
0165 };
0166
0167 enum plip_packet_state {
0168 PLIP_PK_DONE=0,
0169 PLIP_PK_TRIGGER,
0170 PLIP_PK_LENGTH_LSB,
0171 PLIP_PK_LENGTH_MSB,
0172 PLIP_PK_DATA,
0173 PLIP_PK_CHECKSUM
0174 };
0175
0176 enum plip_nibble_state {
0177 PLIP_NB_BEGIN,
0178 PLIP_NB_1,
0179 PLIP_NB_2,
0180 };
0181
0182 struct plip_local {
0183 enum plip_packet_state state;
0184 enum plip_nibble_state nibble;
0185 union {
0186 struct {
0187 #if defined(__LITTLE_ENDIAN)
0188 unsigned char lsb;
0189 unsigned char msb;
0190 #elif defined(__BIG_ENDIAN)
0191 unsigned char msb;
0192 unsigned char lsb;
0193 #else
0194 #error "Please fix the endianness defines in <asm/byteorder.h>"
0195 #endif
0196 } b;
0197 unsigned short h;
0198 } length;
0199 unsigned short byte;
0200 unsigned char checksum;
0201 unsigned char data;
0202 struct sk_buff *skb;
0203 };
0204
0205 struct net_local {
0206 struct net_device *dev;
0207 struct work_struct immediate;
0208 struct delayed_work deferred;
0209 struct delayed_work timer;
0210 struct plip_local snd_data;
0211 struct plip_local rcv_data;
0212 struct pardevice *pardev;
0213 unsigned long trigger;
0214 unsigned long nibble;
0215 enum plip_connection_state connection;
0216 unsigned short timeout_count;
0217 int is_deferred;
0218 int port_owner;
0219 int should_relinquish;
0220 spinlock_t lock;
0221 atomic_t kill_timer;
0222 struct completion killed_timer_cmp;
0223 };
0224
0225 static inline void enable_parport_interrupts (struct net_device *dev)
0226 {
0227 if (dev->irq != -1)
0228 {
0229 struct parport *port =
0230 ((struct net_local *)netdev_priv(dev))->pardev->port;
0231 port->ops->enable_irq (port);
0232 }
0233 }
0234
0235 static inline void disable_parport_interrupts (struct net_device *dev)
0236 {
0237 if (dev->irq != -1)
0238 {
0239 struct parport *port =
0240 ((struct net_local *)netdev_priv(dev))->pardev->port;
0241 port->ops->disable_irq (port);
0242 }
0243 }
0244
0245 static inline void write_data (struct net_device *dev, unsigned char data)
0246 {
0247 struct parport *port =
0248 ((struct net_local *)netdev_priv(dev))->pardev->port;
0249
0250 port->ops->write_data (port, data);
0251 }
0252
0253 static inline unsigned char read_status (struct net_device *dev)
0254 {
0255 struct parport *port =
0256 ((struct net_local *)netdev_priv(dev))->pardev->port;
0257
0258 return port->ops->read_status (port);
0259 }
0260
0261 static const struct header_ops plip_header_ops = {
0262 .create = plip_hard_header,
0263 .cache = plip_hard_header_cache,
0264 };
0265
0266 static const struct net_device_ops plip_netdev_ops = {
0267 .ndo_open = plip_open,
0268 .ndo_stop = plip_close,
0269 .ndo_start_xmit = plip_tx_packet,
0270 .ndo_siocdevprivate = plip_siocdevprivate,
0271 .ndo_set_mac_address = eth_mac_addr,
0272 .ndo_validate_addr = eth_validate_addr,
0273 };
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284 static void
0285 plip_init_netdev(struct net_device *dev)
0286 {
0287 static const u8 addr_init[ETH_ALEN] = {
0288 0xfc, 0xfc, 0xfc,
0289 0xfc, 0xfc, 0xfc,
0290 };
0291 struct net_local *nl = netdev_priv(dev);
0292
0293
0294 dev->tx_queue_len = 10;
0295 dev->flags = IFF_POINTOPOINT|IFF_NOARP;
0296 eth_hw_addr_set(dev, addr_init);
0297
0298 dev->netdev_ops = &plip_netdev_ops;
0299 dev->header_ops = &plip_header_ops;
0300
0301
0302 nl->port_owner = 0;
0303
0304
0305 nl->trigger = PLIP_TRIGGER_WAIT;
0306 nl->nibble = PLIP_NIBBLE_WAIT;
0307
0308
0309 INIT_WORK(&nl->immediate, plip_bh);
0310 INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh);
0311
0312 if (dev->irq == -1)
0313 INIT_DELAYED_WORK(&nl->timer, plip_timer_bh);
0314
0315 spin_lock_init(&nl->lock);
0316 }
0317
0318
0319
0320
0321 static void
0322 plip_kick_bh(struct work_struct *work)
0323 {
0324 struct net_local *nl =
0325 container_of(work, struct net_local, deferred.work);
0326
0327 if (nl->is_deferred)
0328 schedule_work(&nl->immediate);
0329 }
0330
0331
0332 static int plip_none(struct net_device *, struct net_local *,
0333 struct plip_local *, struct plip_local *);
0334 static int plip_receive_packet(struct net_device *, struct net_local *,
0335 struct plip_local *, struct plip_local *);
0336 static int plip_send_packet(struct net_device *, struct net_local *,
0337 struct plip_local *, struct plip_local *);
0338 static int plip_connection_close(struct net_device *, struct net_local *,
0339 struct plip_local *, struct plip_local *);
0340 static int plip_error(struct net_device *, struct net_local *,
0341 struct plip_local *, struct plip_local *);
0342 static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
0343 struct plip_local *snd,
0344 struct plip_local *rcv,
0345 int error);
0346
0347 #define OK 0
0348 #define TIMEOUT 1
0349 #define ERROR 2
0350 #define HS_TIMEOUT 3
0351
0352 typedef int (*plip_func)(struct net_device *dev, struct net_local *nl,
0353 struct plip_local *snd, struct plip_local *rcv);
0354
0355 static const plip_func connection_state_table[] =
0356 {
0357 plip_none,
0358 plip_receive_packet,
0359 plip_send_packet,
0360 plip_connection_close,
0361 plip_error
0362 };
0363
0364
0365 static void
0366 plip_bh(struct work_struct *work)
0367 {
0368 struct net_local *nl = container_of(work, struct net_local, immediate);
0369 struct plip_local *snd = &nl->snd_data;
0370 struct plip_local *rcv = &nl->rcv_data;
0371 plip_func f;
0372 int r;
0373
0374 nl->is_deferred = 0;
0375 f = connection_state_table[nl->connection];
0376 if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK &&
0377 (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) {
0378 nl->is_deferred = 1;
0379 schedule_delayed_work(&nl->deferred, 1);
0380 }
0381 }
0382
0383 static void
0384 plip_timer_bh(struct work_struct *work)
0385 {
0386 struct net_local *nl =
0387 container_of(work, struct net_local, timer.work);
0388
0389 if (!(atomic_read (&nl->kill_timer))) {
0390 plip_interrupt (nl->dev);
0391
0392 schedule_delayed_work(&nl->timer, 1);
0393 }
0394 else {
0395 complete(&nl->killed_timer_cmp);
0396 }
0397 }
0398
0399 static int
0400 plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
0401 struct plip_local *snd, struct plip_local *rcv,
0402 int error)
0403 {
0404 unsigned char c0;
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415 spin_lock_irq(&nl->lock);
0416 if (nl->connection == PLIP_CN_SEND) {
0417
0418 if (error != ERROR) {
0419 nl->timeout_count++;
0420 if ((error == HS_TIMEOUT && nl->timeout_count <= 10) ||
0421 nl->timeout_count <= 3) {
0422 spin_unlock_irq(&nl->lock);
0423
0424 return TIMEOUT;
0425 }
0426 c0 = read_status(dev);
0427 printk(KERN_WARNING "%s: transmit timeout(%d,%02x)\n",
0428 dev->name, snd->state, c0);
0429 } else
0430 error = HS_TIMEOUT;
0431 dev->stats.tx_errors++;
0432 dev->stats.tx_aborted_errors++;
0433 } else if (nl->connection == PLIP_CN_RECEIVE) {
0434 if (rcv->state == PLIP_PK_TRIGGER) {
0435
0436 spin_unlock_irq(&nl->lock);
0437 return OK;
0438 }
0439 if (error != ERROR) {
0440 if (++nl->timeout_count <= 3) {
0441 spin_unlock_irq(&nl->lock);
0442
0443 return TIMEOUT;
0444 }
0445 c0 = read_status(dev);
0446 printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n",
0447 dev->name, rcv->state, c0);
0448 }
0449 dev->stats.rx_dropped++;
0450 }
0451 rcv->state = PLIP_PK_DONE;
0452 if (rcv->skb) {
0453 kfree_skb(rcv->skb);
0454 rcv->skb = NULL;
0455 }
0456 snd->state = PLIP_PK_DONE;
0457 if (snd->skb) {
0458 dev_kfree_skb(snd->skb);
0459 snd->skb = NULL;
0460 }
0461 spin_unlock_irq(&nl->lock);
0462 if (error == HS_TIMEOUT) {
0463 DISABLE(dev->irq);
0464 synchronize_irq(dev->irq);
0465 }
0466 disable_parport_interrupts (dev);
0467 netif_stop_queue (dev);
0468 nl->connection = PLIP_CN_ERROR;
0469 write_data (dev, 0x00);
0470
0471 return TIMEOUT;
0472 }
0473
0474 static int
0475 plip_none(struct net_device *dev, struct net_local *nl,
0476 struct plip_local *snd, struct plip_local *rcv)
0477 {
0478 return OK;
0479 }
0480
0481
0482
0483 static inline int
0484 plip_receive(unsigned short nibble_timeout, struct net_device *dev,
0485 enum plip_nibble_state *ns_p, unsigned char *data_p)
0486 {
0487 unsigned char c0, c1;
0488 unsigned int cx;
0489
0490 switch (*ns_p) {
0491 case PLIP_NB_BEGIN:
0492 cx = nibble_timeout;
0493 while (1) {
0494 c0 = read_status(dev);
0495 udelay(PLIP_DELAY_UNIT);
0496 if ((c0 & 0x80) == 0) {
0497 c1 = read_status(dev);
0498 if (c0 == c1)
0499 break;
0500 }
0501 if (--cx == 0)
0502 return TIMEOUT;
0503 }
0504 *data_p = (c0 >> 3) & 0x0f;
0505 write_data (dev, 0x10);
0506 *ns_p = PLIP_NB_1;
0507 fallthrough;
0508
0509 case PLIP_NB_1:
0510 cx = nibble_timeout;
0511 while (1) {
0512 c0 = read_status(dev);
0513 udelay(PLIP_DELAY_UNIT);
0514 if (c0 & 0x80) {
0515 c1 = read_status(dev);
0516 if (c0 == c1)
0517 break;
0518 }
0519 if (--cx == 0)
0520 return TIMEOUT;
0521 }
0522 *data_p |= (c0 << 1) & 0xf0;
0523 write_data (dev, 0x00);
0524 *ns_p = PLIP_NB_BEGIN;
0525 break;
0526 case PLIP_NB_2:
0527 break;
0528 }
0529 return OK;
0530 }
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543
0544 static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev)
0545 {
0546 struct ethhdr *eth;
0547 unsigned char *rawp;
0548
0549 skb_reset_mac_header(skb);
0550 skb_pull(skb,dev->hard_header_len);
0551 eth = eth_hdr(skb);
0552
0553 if(is_multicast_ether_addr(eth->h_dest))
0554 {
0555 if(ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
0556 skb->pkt_type=PACKET_BROADCAST;
0557 else
0558 skb->pkt_type=PACKET_MULTICAST;
0559 }
0560
0561
0562
0563
0564
0565
0566 if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
0567 return eth->h_proto;
0568
0569 rawp = skb->data;
0570
0571
0572
0573
0574
0575
0576
0577 if (*(unsigned short *)rawp == 0xFFFF)
0578 return htons(ETH_P_802_3);
0579
0580
0581
0582
0583 return htons(ETH_P_802_2);
0584 }
0585
0586
0587 static int
0588 plip_receive_packet(struct net_device *dev, struct net_local *nl,
0589 struct plip_local *snd, struct plip_local *rcv)
0590 {
0591 unsigned short nibble_timeout = nl->nibble;
0592 unsigned char *lbuf;
0593
0594 switch (rcv->state) {
0595 case PLIP_PK_TRIGGER:
0596 DISABLE(dev->irq);
0597
0598 disable_parport_interrupts (dev);
0599 write_data (dev, 0x01);
0600 if (net_debug > 2)
0601 printk(KERN_DEBUG "%s: receive start\n", dev->name);
0602 rcv->state = PLIP_PK_LENGTH_LSB;
0603 rcv->nibble = PLIP_NB_BEGIN;
0604 fallthrough;
0605
0606 case PLIP_PK_LENGTH_LSB:
0607 if (snd->state != PLIP_PK_DONE) {
0608 if (plip_receive(nl->trigger, dev,
0609 &rcv->nibble, &rcv->length.b.lsb)) {
0610
0611 rcv->state = PLIP_PK_DONE;
0612 nl->is_deferred = 1;
0613 nl->connection = PLIP_CN_SEND;
0614 schedule_delayed_work(&nl->deferred, 1);
0615 enable_parport_interrupts (dev);
0616 ENABLE(dev->irq);
0617 return OK;
0618 }
0619 } else {
0620 if (plip_receive(nibble_timeout, dev,
0621 &rcv->nibble, &rcv->length.b.lsb))
0622 return TIMEOUT;
0623 }
0624 rcv->state = PLIP_PK_LENGTH_MSB;
0625 fallthrough;
0626
0627 case PLIP_PK_LENGTH_MSB:
0628 if (plip_receive(nibble_timeout, dev,
0629 &rcv->nibble, &rcv->length.b.msb))
0630 return TIMEOUT;
0631 if (rcv->length.h > dev->mtu + dev->hard_header_len ||
0632 rcv->length.h < 8) {
0633 printk(KERN_WARNING "%s: bogus packet size %d.\n", dev->name, rcv->length.h);
0634 return ERROR;
0635 }
0636
0637 rcv->skb = dev_alloc_skb(rcv->length.h + 2);
0638 if (rcv->skb == NULL) {
0639 printk(KERN_ERR "%s: Memory squeeze.\n", dev->name);
0640 return ERROR;
0641 }
0642 skb_reserve(rcv->skb, 2);
0643 skb_put(rcv->skb,rcv->length.h);
0644 rcv->skb->dev = dev;
0645 rcv->state = PLIP_PK_DATA;
0646 rcv->byte = 0;
0647 rcv->checksum = 0;
0648 fallthrough;
0649
0650 case PLIP_PK_DATA:
0651 lbuf = rcv->skb->data;
0652 do {
0653 if (plip_receive(nibble_timeout, dev,
0654 &rcv->nibble, &lbuf[rcv->byte]))
0655 return TIMEOUT;
0656 } while (++rcv->byte < rcv->length.h);
0657 do {
0658 rcv->checksum += lbuf[--rcv->byte];
0659 } while (rcv->byte);
0660 rcv->state = PLIP_PK_CHECKSUM;
0661 fallthrough;
0662
0663 case PLIP_PK_CHECKSUM:
0664 if (plip_receive(nibble_timeout, dev,
0665 &rcv->nibble, &rcv->data))
0666 return TIMEOUT;
0667 if (rcv->data != rcv->checksum) {
0668 dev->stats.rx_crc_errors++;
0669 if (net_debug)
0670 printk(KERN_DEBUG "%s: checksum error\n", dev->name);
0671 return ERROR;
0672 }
0673 rcv->state = PLIP_PK_DONE;
0674 fallthrough;
0675
0676 case PLIP_PK_DONE:
0677
0678 rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
0679 netif_rx(rcv->skb);
0680 dev->stats.rx_bytes += rcv->length.h;
0681 dev->stats.rx_packets++;
0682 rcv->skb = NULL;
0683 if (net_debug > 2)
0684 printk(KERN_DEBUG "%s: receive end\n", dev->name);
0685
0686
0687 write_data (dev, 0x00);
0688 spin_lock_irq(&nl->lock);
0689 if (snd->state != PLIP_PK_DONE) {
0690 nl->connection = PLIP_CN_SEND;
0691 spin_unlock_irq(&nl->lock);
0692 schedule_work(&nl->immediate);
0693 enable_parport_interrupts (dev);
0694 ENABLE(dev->irq);
0695 return OK;
0696 } else {
0697 nl->connection = PLIP_CN_NONE;
0698 spin_unlock_irq(&nl->lock);
0699 enable_parport_interrupts (dev);
0700 ENABLE(dev->irq);
0701 return OK;
0702 }
0703 }
0704 return OK;
0705 }
0706
0707
0708
0709 static inline int
0710 plip_send(unsigned short nibble_timeout, struct net_device *dev,
0711 enum plip_nibble_state *ns_p, unsigned char data)
0712 {
0713 unsigned char c0;
0714 unsigned int cx;
0715
0716 switch (*ns_p) {
0717 case PLIP_NB_BEGIN:
0718 write_data (dev, data & 0x0f);
0719 *ns_p = PLIP_NB_1;
0720 fallthrough;
0721
0722 case PLIP_NB_1:
0723 write_data (dev, 0x10 | (data & 0x0f));
0724 cx = nibble_timeout;
0725 while (1) {
0726 c0 = read_status(dev);
0727 if ((c0 & 0x80) == 0)
0728 break;
0729 if (--cx == 0)
0730 return TIMEOUT;
0731 udelay(PLIP_DELAY_UNIT);
0732 }
0733 write_data (dev, 0x10 | (data >> 4));
0734 *ns_p = PLIP_NB_2;
0735 fallthrough;
0736
0737 case PLIP_NB_2:
0738 write_data (dev, (data >> 4));
0739 cx = nibble_timeout;
0740 while (1) {
0741 c0 = read_status(dev);
0742 if (c0 & 0x80)
0743 break;
0744 if (--cx == 0)
0745 return TIMEOUT;
0746 udelay(PLIP_DELAY_UNIT);
0747 }
0748 *ns_p = PLIP_NB_BEGIN;
0749 return OK;
0750 }
0751 return OK;
0752 }
0753
0754
0755 static int
0756 plip_send_packet(struct net_device *dev, struct net_local *nl,
0757 struct plip_local *snd, struct plip_local *rcv)
0758 {
0759 unsigned short nibble_timeout = nl->nibble;
0760 unsigned char *lbuf;
0761 unsigned char c0;
0762 unsigned int cx;
0763
0764 if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) {
0765 printk(KERN_DEBUG "%s: send skb lost\n", dev->name);
0766 snd->state = PLIP_PK_DONE;
0767 snd->skb = NULL;
0768 return ERROR;
0769 }
0770
0771 switch (snd->state) {
0772 case PLIP_PK_TRIGGER:
0773 if ((read_status(dev) & 0xf8) != 0x80)
0774 return HS_TIMEOUT;
0775
0776
0777 write_data (dev, 0x08);
0778 cx = nl->trigger;
0779 while (1) {
0780 udelay(PLIP_DELAY_UNIT);
0781 spin_lock_irq(&nl->lock);
0782 if (nl->connection == PLIP_CN_RECEIVE) {
0783 spin_unlock_irq(&nl->lock);
0784
0785 dev->stats.collisions++;
0786 return OK;
0787 }
0788 c0 = read_status(dev);
0789 if (c0 & 0x08) {
0790 spin_unlock_irq(&nl->lock);
0791 DISABLE(dev->irq);
0792 synchronize_irq(dev->irq);
0793 if (nl->connection == PLIP_CN_RECEIVE) {
0794
0795
0796
0797
0798
0799
0800 ENABLE(dev->irq);
0801 dev->stats.collisions++;
0802 return OK;
0803 }
0804 disable_parport_interrupts (dev);
0805 if (net_debug > 2)
0806 printk(KERN_DEBUG "%s: send start\n", dev->name);
0807 snd->state = PLIP_PK_LENGTH_LSB;
0808 snd->nibble = PLIP_NB_BEGIN;
0809 nl->timeout_count = 0;
0810 break;
0811 }
0812 spin_unlock_irq(&nl->lock);
0813 if (--cx == 0) {
0814 write_data (dev, 0x00);
0815 return HS_TIMEOUT;
0816 }
0817 }
0818 break;
0819
0820 case PLIP_PK_LENGTH_LSB:
0821 if (plip_send(nibble_timeout, dev,
0822 &snd->nibble, snd->length.b.lsb))
0823 return TIMEOUT;
0824 snd->state = PLIP_PK_LENGTH_MSB;
0825 fallthrough;
0826
0827 case PLIP_PK_LENGTH_MSB:
0828 if (plip_send(nibble_timeout, dev,
0829 &snd->nibble, snd->length.b.msb))
0830 return TIMEOUT;
0831 snd->state = PLIP_PK_DATA;
0832 snd->byte = 0;
0833 snd->checksum = 0;
0834 fallthrough;
0835
0836 case PLIP_PK_DATA:
0837 do {
0838 if (plip_send(nibble_timeout, dev,
0839 &snd->nibble, lbuf[snd->byte]))
0840 return TIMEOUT;
0841 } while (++snd->byte < snd->length.h);
0842 do {
0843 snd->checksum += lbuf[--snd->byte];
0844 } while (snd->byte);
0845 snd->state = PLIP_PK_CHECKSUM;
0846 fallthrough;
0847
0848 case PLIP_PK_CHECKSUM:
0849 if (plip_send(nibble_timeout, dev,
0850 &snd->nibble, snd->checksum))
0851 return TIMEOUT;
0852
0853 dev->stats.tx_bytes += snd->skb->len;
0854 dev_kfree_skb(snd->skb);
0855 dev->stats.tx_packets++;
0856 snd->state = PLIP_PK_DONE;
0857 fallthrough;
0858
0859 case PLIP_PK_DONE:
0860
0861 write_data (dev, 0x00);
0862 snd->skb = NULL;
0863 if (net_debug > 2)
0864 printk(KERN_DEBUG "%s: send end\n", dev->name);
0865 nl->connection = PLIP_CN_CLOSING;
0866 nl->is_deferred = 1;
0867 schedule_delayed_work(&nl->deferred, 1);
0868 enable_parport_interrupts (dev);
0869 ENABLE(dev->irq);
0870 return OK;
0871 }
0872 return OK;
0873 }
0874
0875 static int
0876 plip_connection_close(struct net_device *dev, struct net_local *nl,
0877 struct plip_local *snd, struct plip_local *rcv)
0878 {
0879 spin_lock_irq(&nl->lock);
0880 if (nl->connection == PLIP_CN_CLOSING) {
0881 nl->connection = PLIP_CN_NONE;
0882 netif_wake_queue (dev);
0883 }
0884 spin_unlock_irq(&nl->lock);
0885 if (nl->should_relinquish) {
0886 nl->should_relinquish = nl->port_owner = 0;
0887 parport_release(nl->pardev);
0888 }
0889 return OK;
0890 }
0891
0892
0893 static int
0894 plip_error(struct net_device *dev, struct net_local *nl,
0895 struct plip_local *snd, struct plip_local *rcv)
0896 {
0897 unsigned char status;
0898
0899 status = read_status(dev);
0900 if ((status & 0xf8) == 0x80) {
0901 if (net_debug > 2)
0902 printk(KERN_DEBUG "%s: reset interface.\n", dev->name);
0903 nl->connection = PLIP_CN_NONE;
0904 nl->should_relinquish = 0;
0905 netif_start_queue (dev);
0906 enable_parport_interrupts (dev);
0907 ENABLE(dev->irq);
0908 netif_wake_queue (dev);
0909 } else {
0910 nl->is_deferred = 1;
0911 schedule_delayed_work(&nl->deferred, 1);
0912 }
0913
0914 return OK;
0915 }
0916
0917
0918 static void
0919 plip_interrupt(void *dev_id)
0920 {
0921 struct net_device *dev = dev_id;
0922 struct net_local *nl;
0923 struct plip_local *rcv;
0924 unsigned char c0;
0925 unsigned long flags;
0926
0927 nl = netdev_priv(dev);
0928 rcv = &nl->rcv_data;
0929
0930 spin_lock_irqsave (&nl->lock, flags);
0931
0932 c0 = read_status(dev);
0933 if ((c0 & 0xf8) != 0xc0) {
0934 if ((dev->irq != -1) && (net_debug > 1))
0935 printk(KERN_DEBUG "%s: spurious interrupt\n", dev->name);
0936 spin_unlock_irqrestore (&nl->lock, flags);
0937 return;
0938 }
0939
0940 if (net_debug > 3)
0941 printk(KERN_DEBUG "%s: interrupt.\n", dev->name);
0942
0943 switch (nl->connection) {
0944 case PLIP_CN_CLOSING:
0945 netif_wake_queue (dev);
0946 fallthrough;
0947 case PLIP_CN_NONE:
0948 case PLIP_CN_SEND:
0949 rcv->state = PLIP_PK_TRIGGER;
0950 nl->connection = PLIP_CN_RECEIVE;
0951 nl->timeout_count = 0;
0952 schedule_work(&nl->immediate);
0953 break;
0954
0955 case PLIP_CN_RECEIVE:
0956
0957
0958
0959 break;
0960
0961 case PLIP_CN_ERROR:
0962 printk(KERN_ERR "%s: receive interrupt in error state\n", dev->name);
0963 break;
0964 }
0965
0966 spin_unlock_irqrestore(&nl->lock, flags);
0967 }
0968
0969 static netdev_tx_t
0970 plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
0971 {
0972 struct net_local *nl = netdev_priv(dev);
0973 struct plip_local *snd = &nl->snd_data;
0974
0975 if (netif_queue_stopped(dev))
0976 return NETDEV_TX_BUSY;
0977
0978
0979 if (!nl->port_owner) {
0980 if (parport_claim(nl->pardev))
0981 return NETDEV_TX_BUSY;
0982 nl->port_owner = 1;
0983 }
0984
0985 netif_stop_queue (dev);
0986
0987 if (skb->len > dev->mtu + dev->hard_header_len) {
0988 printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len);
0989 netif_start_queue (dev);
0990 return NETDEV_TX_BUSY;
0991 }
0992
0993 if (net_debug > 2)
0994 printk(KERN_DEBUG "%s: send request\n", dev->name);
0995
0996 spin_lock_irq(&nl->lock);
0997 snd->skb = skb;
0998 snd->length.h = skb->len;
0999 snd->state = PLIP_PK_TRIGGER;
1000 if (nl->connection == PLIP_CN_NONE) {
1001 nl->connection = PLIP_CN_SEND;
1002 nl->timeout_count = 0;
1003 }
1004 schedule_work(&nl->immediate);
1005 spin_unlock_irq(&nl->lock);
1006
1007 return NETDEV_TX_OK;
1008 }
1009
1010 static void
1011 plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
1012 {
1013 const struct in_device *in_dev;
1014
1015 rcu_read_lock();
1016 in_dev = __in_dev_get_rcu(dev);
1017 if (in_dev) {
1018
1019 const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list);
1020 if (ifa) {
1021 memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
1022 memset(eth->h_dest, 0xfc, 2);
1023 memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
1024 }
1025 }
1026 rcu_read_unlock();
1027 }
1028
1029 static int
1030 plip_hard_header(struct sk_buff *skb, struct net_device *dev,
1031 unsigned short type, const void *daddr,
1032 const void *saddr, unsigned len)
1033 {
1034 int ret;
1035
1036 ret = eth_header(skb, dev, type, daddr, saddr, len);
1037 if (ret >= 0)
1038 plip_rewrite_address (dev, (struct ethhdr *)skb->data);
1039
1040 return ret;
1041 }
1042
1043 static int plip_hard_header_cache(const struct neighbour *neigh,
1044 struct hh_cache *hh, __be16 type)
1045 {
1046 int ret;
1047
1048 ret = eth_header_cache(neigh, hh, type);
1049 if (ret == 0) {
1050 struct ethhdr *eth;
1051
1052 eth = (struct ethhdr*)(((u8*)hh->hh_data) +
1053 HH_DATA_OFF(sizeof(*eth)));
1054 plip_rewrite_address (neigh->dev, eth);
1055 }
1056
1057 return ret;
1058 }
1059
1060
1061
1062
1063
1064
1065
1066 static int
1067 plip_open(struct net_device *dev)
1068 {
1069 struct net_local *nl = netdev_priv(dev);
1070 struct in_device *in_dev;
1071
1072
1073 if (!nl->port_owner) {
1074 if (parport_claim(nl->pardev)) return -EAGAIN;
1075 nl->port_owner = 1;
1076 }
1077
1078 nl->should_relinquish = 0;
1079
1080
1081 write_data (dev, 0x00);
1082
1083
1084 enable_parport_interrupts (dev);
1085 if (dev->irq == -1)
1086 {
1087 atomic_set (&nl->kill_timer, 0);
1088 schedule_delayed_work(&nl->timer, 1);
1089 }
1090
1091
1092 nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE;
1093 nl->rcv_data.skb = nl->snd_data.skb = NULL;
1094 nl->connection = PLIP_CN_NONE;
1095 nl->is_deferred = 0;
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109 in_dev=__in_dev_get_rtnl(dev);
1110 if (in_dev) {
1111
1112
1113
1114 const struct in_ifaddr *ifa = rtnl_dereference(in_dev->ifa_list);
1115 if (ifa != NULL) {
1116 dev_addr_mod(dev, 2, &ifa->ifa_local, 4);
1117 }
1118 }
1119
1120 netif_start_queue (dev);
1121
1122 return 0;
1123 }
1124
1125
1126 static int
1127 plip_close(struct net_device *dev)
1128 {
1129 struct net_local *nl = netdev_priv(dev);
1130 struct plip_local *snd = &nl->snd_data;
1131 struct plip_local *rcv = &nl->rcv_data;
1132
1133 netif_stop_queue (dev);
1134 DISABLE(dev->irq);
1135 synchronize_irq(dev->irq);
1136
1137 if (dev->irq == -1)
1138 {
1139 init_completion(&nl->killed_timer_cmp);
1140 atomic_set (&nl->kill_timer, 1);
1141 wait_for_completion(&nl->killed_timer_cmp);
1142 }
1143
1144 #ifdef NOTDEF
1145 outb(0x00, PAR_DATA(dev));
1146 #endif
1147 nl->is_deferred = 0;
1148 nl->connection = PLIP_CN_NONE;
1149 if (nl->port_owner) {
1150 parport_release(nl->pardev);
1151 nl->port_owner = 0;
1152 }
1153
1154 snd->state = PLIP_PK_DONE;
1155 if (snd->skb) {
1156 dev_kfree_skb(snd->skb);
1157 snd->skb = NULL;
1158 }
1159 rcv->state = PLIP_PK_DONE;
1160 if (rcv->skb) {
1161 kfree_skb(rcv->skb);
1162 rcv->skb = NULL;
1163 }
1164
1165 #ifdef NOTDEF
1166
1167 outb(0x00, PAR_CONTROL(dev));
1168 #endif
1169 return 0;
1170 }
1171
1172 static int
1173 plip_preempt(void *handle)
1174 {
1175 struct net_device *dev = (struct net_device *)handle;
1176 struct net_local *nl = netdev_priv(dev);
1177
1178
1179 if (nl->connection != PLIP_CN_NONE) {
1180 nl->should_relinquish = 1;
1181 return 1;
1182 }
1183
1184 nl->port_owner = 0;
1185 return 0;
1186 }
1187
1188 static void
1189 plip_wakeup(void *handle)
1190 {
1191 struct net_device *dev = (struct net_device *)handle;
1192 struct net_local *nl = netdev_priv(dev);
1193
1194 if (nl->port_owner) {
1195
1196 printk(KERN_DEBUG "%s: why am I being woken up?\n", dev->name);
1197 if (!parport_claim(nl->pardev))
1198
1199 printk(KERN_DEBUG "%s: I'm broken.\n", dev->name);
1200 else
1201 return;
1202 }
1203
1204 if (!(dev->flags & IFF_UP))
1205
1206 return;
1207
1208 if (!parport_claim(nl->pardev)) {
1209 nl->port_owner = 1;
1210
1211 write_data (dev, 0x00);
1212 }
1213 }
1214
1215 static int
1216 plip_siocdevprivate(struct net_device *dev, struct ifreq *rq,
1217 void __user *data, int cmd)
1218 {
1219 struct net_local *nl = netdev_priv(dev);
1220 struct plipconf *pc = (struct plipconf *) &rq->ifr_ifru;
1221
1222 if (cmd != SIOCDEVPLIP)
1223 return -EOPNOTSUPP;
1224
1225 if (in_compat_syscall())
1226 return -EOPNOTSUPP;
1227
1228 switch(pc->pcmd) {
1229 case PLIP_GET_TIMEOUT:
1230 pc->trigger = nl->trigger;
1231 pc->nibble = nl->nibble;
1232 break;
1233 case PLIP_SET_TIMEOUT:
1234 if(!capable(CAP_NET_ADMIN))
1235 return -EPERM;
1236 nl->trigger = pc->trigger;
1237 nl->nibble = pc->nibble;
1238 break;
1239 default:
1240 return -EOPNOTSUPP;
1241 }
1242 return 0;
1243 }
1244
1245 static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 };
1246 static int timid;
1247
1248 module_param_array(parport, int, NULL, 0);
1249 module_param(timid, int, 0);
1250 MODULE_PARM_DESC(parport, "List of parport device numbers to use by plip");
1251
1252 static struct net_device *dev_plip[PLIP_MAX] = { NULL, };
1253
1254 static inline int
1255 plip_searchfor(int list[], int a)
1256 {
1257 int i;
1258 for (i = 0; i < PLIP_MAX && list[i] != -1; i++) {
1259 if (list[i] == a) return 1;
1260 }
1261 return 0;
1262 }
1263
1264
1265
1266 static void plip_attach (struct parport *port)
1267 {
1268 static int unit;
1269 struct net_device *dev;
1270 struct net_local *nl;
1271 char name[IFNAMSIZ];
1272 struct pardev_cb plip_cb;
1273
1274 if ((parport[0] == -1 && (!timid || !port->devices)) ||
1275 plip_searchfor(parport, port->number)) {
1276 if (unit == PLIP_MAX) {
1277 printk(KERN_ERR "plip: too many devices\n");
1278 return;
1279 }
1280
1281 sprintf(name, "plip%d", unit);
1282 dev = alloc_etherdev(sizeof(struct net_local));
1283 if (!dev)
1284 return;
1285
1286 strcpy(dev->name, name);
1287
1288 dev->irq = port->irq;
1289 dev->base_addr = port->base;
1290 if (port->irq == -1) {
1291 printk(KERN_INFO "plip: %s has no IRQ. Using IRQ-less mode,"
1292 "which is fairly inefficient!\n", port->name);
1293 }
1294
1295 nl = netdev_priv(dev);
1296 nl->dev = dev;
1297
1298 memset(&plip_cb, 0, sizeof(plip_cb));
1299 plip_cb.private = dev;
1300 plip_cb.preempt = plip_preempt;
1301 plip_cb.wakeup = plip_wakeup;
1302 plip_cb.irq_func = plip_interrupt;
1303
1304 nl->pardev = parport_register_dev_model(port, dev->name,
1305 &plip_cb, unit);
1306
1307 if (!nl->pardev) {
1308 printk(KERN_ERR "%s: parport_register failed\n", name);
1309 goto err_free_dev;
1310 }
1311
1312 plip_init_netdev(dev);
1313
1314 if (register_netdev(dev)) {
1315 printk(KERN_ERR "%s: network register failed\n", name);
1316 goto err_parport_unregister;
1317 }
1318
1319 printk(KERN_INFO "%s", version);
1320 if (dev->irq != -1)
1321 printk(KERN_INFO "%s: Parallel port at %#3lx, "
1322 "using IRQ %d.\n",
1323 dev->name, dev->base_addr, dev->irq);
1324 else
1325 printk(KERN_INFO "%s: Parallel port at %#3lx, "
1326 "not using IRQ.\n",
1327 dev->name, dev->base_addr);
1328 dev_plip[unit++] = dev;
1329 }
1330 return;
1331
1332 err_parport_unregister:
1333 parport_unregister_device(nl->pardev);
1334 err_free_dev:
1335 free_netdev(dev);
1336 }
1337
1338
1339
1340 static void plip_detach (struct parport *port)
1341 {
1342
1343 }
1344
1345 static int plip_probe(struct pardevice *par_dev)
1346 {
1347 struct device_driver *drv = par_dev->dev.driver;
1348 int len = strlen(drv->name);
1349
1350 if (strncmp(par_dev->name, drv->name, len))
1351 return -ENODEV;
1352
1353 return 0;
1354 }
1355
1356 static struct parport_driver plip_driver = {
1357 .name = "plip",
1358 .probe = plip_probe,
1359 .match_port = plip_attach,
1360 .detach = plip_detach,
1361 .devmodel = true,
1362 };
1363
1364 static void __exit plip_cleanup_module (void)
1365 {
1366 struct net_device *dev;
1367 int i;
1368
1369 for (i=0; i < PLIP_MAX; i++) {
1370 if ((dev = dev_plip[i])) {
1371 struct net_local *nl = netdev_priv(dev);
1372 unregister_netdev(dev);
1373 if (nl->port_owner)
1374 parport_release(nl->pardev);
1375 parport_unregister_device(nl->pardev);
1376 free_netdev(dev);
1377 dev_plip[i] = NULL;
1378 }
1379 }
1380
1381 parport_unregister_driver(&plip_driver);
1382 }
1383
1384 #ifndef MODULE
1385
1386 static int parport_ptr;
1387
1388 static int __init plip_setup(char *str)
1389 {
1390 int ints[4];
1391
1392 str = get_options(str, ARRAY_SIZE(ints), ints);
1393
1394
1395 if (!strncmp(str, "parport", 7)) {
1396 int n = simple_strtoul(str+7, NULL, 10);
1397 if (parport_ptr < PLIP_MAX)
1398 parport[parport_ptr++] = n;
1399 else
1400 printk(KERN_INFO "plip: too many ports, %s ignored.\n",
1401 str);
1402 } else if (!strcmp(str, "timid")) {
1403 timid = 1;
1404 } else {
1405 if (ints[0] == 0 || ints[1] == 0) {
1406
1407 parport[0] = -2;
1408 } else {
1409 printk(KERN_WARNING "warning: 'plip=0x%x' ignored\n",
1410 ints[1]);
1411 }
1412 }
1413 return 1;
1414 }
1415
1416 __setup("plip=", plip_setup);
1417
1418 #endif
1419
1420 static int __init plip_init (void)
1421 {
1422 if (parport[0] == -2)
1423 return 0;
1424
1425 if (parport[0] != -1 && timid) {
1426 printk(KERN_WARNING "plip: warning, ignoring `timid' since specific ports given.\n");
1427 timid = 0;
1428 }
1429
1430 if (parport_register_driver (&plip_driver)) {
1431 printk (KERN_WARNING "plip: couldn't register driver\n");
1432 return 1;
1433 }
1434
1435 return 0;
1436 }
1437
1438 module_init(plip_init);
1439 module_exit(plip_cleanup_module);
1440 MODULE_LICENSE("GPL");