0001 #define RCS_ID "$Id: scc.c,v 1.75 1998/11/04 15:15:01 jreuter Exp jreuter $"
0002
0003 #define VERSION "3.0"
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139 #undef SCC_LDELAY
0140 #undef SCC_DONT_CHECK
0141
0142 #define SCC_MAXCHIPS 4
0143 #define SCC_BUFSIZE 384
0144 #undef SCC_DEBUG
0145
0146 #define SCC_DEFAULT_CLOCK 4915200
0147
0148
0149
0150
0151 #include <linux/compat.h>
0152 #include <linux/module.h>
0153 #include <linux/errno.h>
0154 #include <linux/signal.h>
0155 #include <linux/timer.h>
0156 #include <linux/interrupt.h>
0157 #include <linux/ioport.h>
0158 #include <linux/string.h>
0159 #include <linux/in.h>
0160 #include <linux/fcntl.h>
0161 #include <linux/ptrace.h>
0162 #include <linux/delay.h>
0163 #include <linux/skbuff.h>
0164 #include <linux/netdevice.h>
0165 #include <linux/rtnetlink.h>
0166 #include <linux/if_ether.h>
0167 #include <linux/if_arp.h>
0168 #include <linux/socket.h>
0169 #include <linux/init.h>
0170 #include <linux/scc.h>
0171 #include <linux/ctype.h>
0172 #include <linux/kernel.h>
0173 #include <linux/proc_fs.h>
0174 #include <linux/seq_file.h>
0175 #include <linux/bitops.h>
0176
0177 #include <net/net_namespace.h>
0178 #include <net/ax25.h>
0179
0180 #include <asm/irq.h>
0181 #include <asm/io.h>
0182 #include <linux/uaccess.h>
0183
0184 #include "z8530.h"
0185
0186 static const char banner[] __initconst = KERN_INFO \
0187 "AX.25: Z8530 SCC driver version "VERSION".dl1bke\n";
0188
0189 static void t_dwait(struct timer_list *t);
0190 static void t_txdelay(struct timer_list *t);
0191 static void t_tail(struct timer_list *t);
0192 static void t_busy(struct timer_list *);
0193 static void t_maxkeyup(struct timer_list *);
0194 static void t_idle(struct timer_list *t);
0195 static void scc_tx_done(struct scc_channel *);
0196 static void scc_start_tx_timer(struct scc_channel *,
0197 void (*)(struct timer_list *), unsigned long);
0198 static void scc_start_maxkeyup(struct scc_channel *);
0199 static void scc_start_defer(struct scc_channel *);
0200
0201 static void z8530_init(void);
0202
0203 static void init_channel(struct scc_channel *scc);
0204 static void scc_key_trx (struct scc_channel *scc, char tx);
0205 static void scc_init_timer(struct scc_channel *scc);
0206
0207 static int scc_net_alloc(const char *name, struct scc_channel *scc);
0208 static void scc_net_setup(struct net_device *dev);
0209 static int scc_net_open(struct net_device *dev);
0210 static int scc_net_close(struct net_device *dev);
0211 static void scc_net_rx(struct scc_channel *scc, struct sk_buff *skb);
0212 static netdev_tx_t scc_net_tx(struct sk_buff *skb,
0213 struct net_device *dev);
0214 static int scc_net_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
0215 void __user *data, int cmd);
0216 static int scc_net_set_mac_address(struct net_device *dev, void *addr);
0217 static struct net_device_stats * scc_net_get_stats(struct net_device *dev);
0218
0219 static unsigned char SCC_DriverName[] = "scc";
0220
0221 static struct irqflags { unsigned char used : 1; } Ivec[NR_IRQS];
0222
0223 static struct scc_channel SCC_Info[2 * SCC_MAXCHIPS];
0224
0225 static struct scc_ctrl {
0226 io_port chan_A;
0227 io_port chan_B;
0228 int irq;
0229 } SCC_ctrl[SCC_MAXCHIPS+1];
0230
0231 static unsigned char Driver_Initialized;
0232 static int Nchips;
0233 static io_port Vector_Latch;
0234
0235
0236
0237
0238
0239
0240
0241
0242 static DEFINE_SPINLOCK(iolock);
0243
0244 static inline unsigned char InReg(io_port port, unsigned char reg)
0245 {
0246 unsigned long flags;
0247 unsigned char r;
0248
0249 spin_lock_irqsave(&iolock, flags);
0250 #ifdef SCC_LDELAY
0251 Outb(port, reg);
0252 udelay(SCC_LDELAY);
0253 r=Inb(port);
0254 udelay(SCC_LDELAY);
0255 #else
0256 Outb(port, reg);
0257 r=Inb(port);
0258 #endif
0259 spin_unlock_irqrestore(&iolock, flags);
0260 return r;
0261 }
0262
0263 static inline void OutReg(io_port port, unsigned char reg, unsigned char val)
0264 {
0265 unsigned long flags;
0266
0267 spin_lock_irqsave(&iolock, flags);
0268 #ifdef SCC_LDELAY
0269 Outb(port, reg); udelay(SCC_LDELAY);
0270 Outb(port, val); udelay(SCC_LDELAY);
0271 #else
0272 Outb(port, reg);
0273 Outb(port, val);
0274 #endif
0275 spin_unlock_irqrestore(&iolock, flags);
0276 }
0277
0278 static inline void wr(struct scc_channel *scc, unsigned char reg,
0279 unsigned char val)
0280 {
0281 OutReg(scc->ctrl, reg, (scc->wreg[reg] = val));
0282 }
0283
0284 static inline void or(struct scc_channel *scc, unsigned char reg, unsigned char val)
0285 {
0286 OutReg(scc->ctrl, reg, (scc->wreg[reg] |= val));
0287 }
0288
0289 static inline void cl(struct scc_channel *scc, unsigned char reg, unsigned char val)
0290 {
0291 OutReg(scc->ctrl, reg, (scc->wreg[reg] &= ~val));
0292 }
0293
0294
0295
0296
0297
0298 static inline void scc_discard_buffers(struct scc_channel *scc)
0299 {
0300 unsigned long flags;
0301
0302 spin_lock_irqsave(&scc->lock, flags);
0303 if (scc->tx_buff != NULL)
0304 {
0305 dev_kfree_skb(scc->tx_buff);
0306 scc->tx_buff = NULL;
0307 }
0308
0309 while (!skb_queue_empty(&scc->tx_queue))
0310 dev_kfree_skb(skb_dequeue(&scc->tx_queue));
0311
0312 spin_unlock_irqrestore(&scc->lock, flags);
0313 }
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324 static inline void scc_notify(struct scc_channel *scc, int event)
0325 {
0326 struct sk_buff *skb;
0327 char *bp;
0328
0329 if (scc->kiss.fulldup != KISS_DUPLEX_OPTIMA)
0330 return;
0331
0332 skb = dev_alloc_skb(2);
0333 if (skb != NULL)
0334 {
0335 bp = skb_put(skb, 2);
0336 *bp++ = PARAM_HWEVENT;
0337 *bp++ = event;
0338 scc_net_rx(scc, skb);
0339 } else
0340 scc->stat.nospace++;
0341 }
0342
0343 static inline void flush_rx_FIFO(struct scc_channel *scc)
0344 {
0345 int k;
0346
0347 for (k=0; k<3; k++)
0348 Inb(scc->data);
0349
0350 if(scc->rx_buff != NULL)
0351 {
0352 scc->stat.rxerrs++;
0353 dev_kfree_skb_irq(scc->rx_buff);
0354 scc->rx_buff = NULL;
0355 }
0356 }
0357
0358 static void start_hunt(struct scc_channel *scc)
0359 {
0360 if ((scc->modem.clocksrc != CLK_EXTERNAL))
0361 OutReg(scc->ctrl,R14,SEARCH|scc->wreg[R14]);
0362 or(scc,R3,ENT_HM|RxENABLE);
0363 }
0364
0365
0366
0367
0368
0369 static inline void scc_txint(struct scc_channel *scc)
0370 {
0371 struct sk_buff *skb;
0372
0373 scc->stat.txints++;
0374 skb = scc->tx_buff;
0375
0376
0377
0378 if (skb == NULL)
0379 {
0380 skb = skb_dequeue(&scc->tx_queue);
0381 scc->tx_buff = skb;
0382 netif_wake_queue(scc->dev);
0383
0384 if (skb == NULL)
0385 {
0386 scc_tx_done(scc);
0387 Outb(scc->ctrl, RES_Tx_P);
0388 return;
0389 }
0390
0391 if (skb->len == 0)
0392 {
0393 dev_kfree_skb_irq(skb);
0394 scc->tx_buff = NULL;
0395 scc_tx_done(scc);
0396 Outb(scc->ctrl, RES_Tx_P);
0397 return;
0398 }
0399
0400 scc->stat.tx_state = TXS_ACTIVE;
0401
0402 OutReg(scc->ctrl, R0, RES_Tx_CRC);
0403
0404 or(scc,R10,ABUNDER);
0405 Outb(scc->data,*skb->data);
0406 skb_pull(skb, 1);
0407
0408 if (!scc->enhanced)
0409 Outb(scc->ctrl,RES_EOM_L);
0410 return;
0411 }
0412
0413
0414
0415 if (skb->len == 0)
0416 {
0417 Outb(scc->ctrl, RES_Tx_P);
0418 cl(scc, R10, ABUNDER);
0419 dev_kfree_skb_irq(skb);
0420 scc->tx_buff = NULL;
0421 scc->stat.tx_state = TXS_NEWFRAME;
0422 return;
0423 }
0424
0425
0426
0427 Outb(scc->data,*skb->data);
0428 skb_pull(skb, 1);
0429 }
0430
0431
0432
0433 static inline void scc_exint(struct scc_channel *scc)
0434 {
0435 unsigned char status,changes,chg_and_stat;
0436
0437 scc->stat.exints++;
0438
0439 status = InReg(scc->ctrl,R0);
0440 changes = status ^ scc->status;
0441 chg_and_stat = changes & status;
0442
0443
0444
0445 if (chg_and_stat & BRK_ABRT)
0446 flush_rx_FIFO(scc);
0447
0448
0449
0450 if ((changes & SYNC_HUNT) && scc->kiss.softdcd)
0451 {
0452 if (status & SYNC_HUNT)
0453 {
0454 scc->dcd = 0;
0455 flush_rx_FIFO(scc);
0456 if ((scc->modem.clocksrc != CLK_EXTERNAL))
0457 OutReg(scc->ctrl,R14,SEARCH|scc->wreg[R14]);
0458 } else {
0459 scc->dcd = 1;
0460 }
0461
0462 scc_notify(scc, scc->dcd? HWEV_DCD_OFF:HWEV_DCD_ON);
0463 }
0464
0465
0466
0467
0468 if((changes & DCD) && !scc->kiss.softdcd)
0469 {
0470 if(status & DCD)
0471 {
0472 start_hunt(scc);
0473 scc->dcd = 1;
0474 } else {
0475 cl(scc,R3,ENT_HM|RxENABLE);
0476 flush_rx_FIFO(scc);
0477 scc->dcd = 0;
0478 }
0479
0480 scc_notify(scc, scc->dcd? HWEV_DCD_ON:HWEV_DCD_OFF);
0481 }
0482
0483 #ifdef notdef
0484
0485
0486
0487
0488
0489
0490 if (chg_and_stat & CTS)
0491 {
0492 if (scc->kiss.txdelay == 0)
0493 scc_start_tx_timer(scc, t_txdelay, 0);
0494 }
0495 #endif
0496
0497 if (scc->stat.tx_state == TXS_ACTIVE && (status & TxEOM))
0498 {
0499 scc->stat.tx_under++;
0500 Outb(scc->ctrl, RES_EXT_INT);
0501
0502 if (scc->tx_buff != NULL)
0503 {
0504 dev_kfree_skb_irq(scc->tx_buff);
0505 scc->tx_buff = NULL;
0506 }
0507
0508 or(scc,R10,ABUNDER);
0509 scc_start_tx_timer(scc, t_txdelay, 0);
0510 }
0511
0512 scc->status = status;
0513 Outb(scc->ctrl,RES_EXT_INT);
0514 }
0515
0516
0517
0518 static inline void scc_rxint(struct scc_channel *scc)
0519 {
0520 struct sk_buff *skb;
0521
0522 scc->stat.rxints++;
0523
0524 if((scc->wreg[5] & RTS) && scc->kiss.fulldup == KISS_DUPLEX_HALF)
0525 {
0526 Inb(scc->data);
0527 or(scc,R3,ENT_HM);
0528 return;
0529 }
0530
0531 skb = scc->rx_buff;
0532
0533 if (skb == NULL)
0534 {
0535 skb = dev_alloc_skb(scc->stat.bufsize);
0536 if (skb == NULL)
0537 {
0538 scc->dev_stat.rx_dropped++;
0539 scc->stat.nospace++;
0540 Inb(scc->data);
0541 or(scc, R3, ENT_HM);
0542 return;
0543 }
0544
0545 scc->rx_buff = skb;
0546 skb_put_u8(skb, 0);
0547 }
0548
0549 if (skb->len >= scc->stat.bufsize)
0550 {
0551 #ifdef notdef
0552 printk(KERN_DEBUG "z8530drv: oops, scc_rxint() received huge frame...\n");
0553 #endif
0554 dev_kfree_skb_irq(skb);
0555 scc->rx_buff = NULL;
0556 Inb(scc->data);
0557 or(scc, R3, ENT_HM);
0558 return;
0559 }
0560
0561 skb_put_u8(skb, Inb(scc->data));
0562 }
0563
0564
0565
0566 static inline void scc_spint(struct scc_channel *scc)
0567 {
0568 unsigned char status;
0569 struct sk_buff *skb;
0570
0571 scc->stat.spints++;
0572
0573 status = InReg(scc->ctrl,R1);
0574
0575 Inb(scc->data);
0576 skb = scc->rx_buff;
0577
0578 if(status & Rx_OVR)
0579 {
0580 scc->stat.rx_over++;
0581 or(scc,R3,ENT_HM);
0582
0583 if (skb != NULL)
0584 dev_kfree_skb_irq(skb);
0585 scc->rx_buff = skb = NULL;
0586 }
0587
0588 if(status & END_FR && skb != NULL)
0589 {
0590
0591
0592 if (!(status & CRC_ERR) && (status & 0xe) == RES8 && skb->len > 0)
0593 {
0594
0595 skb_trim(skb, skb->len-1);
0596 scc_net_rx(scc, skb);
0597 scc->rx_buff = NULL;
0598 scc->stat.rxframes++;
0599 } else {
0600 dev_kfree_skb_irq(skb);
0601 scc->rx_buff = NULL;
0602 scc->stat.rxerrs++;
0603 }
0604 }
0605
0606 Outb(scc->ctrl,ERR_RES);
0607 }
0608
0609
0610
0611
0612 static void scc_isr_dispatch(struct scc_channel *scc, int vector)
0613 {
0614 spin_lock(&scc->lock);
0615 switch (vector & VECTOR_MASK)
0616 {
0617 case TXINT: scc_txint(scc); break;
0618 case EXINT: scc_exint(scc); break;
0619 case RXINT: scc_rxint(scc); break;
0620 case SPINT: scc_spint(scc); break;
0621 }
0622 spin_unlock(&scc->lock);
0623 }
0624
0625
0626
0627
0628
0629
0630 #define SCC_IRQTIMEOUT 30000
0631
0632 static irqreturn_t scc_isr(int irq, void *dev_id)
0633 {
0634 int chip_irq = (long) dev_id;
0635 unsigned char vector;
0636 struct scc_channel *scc;
0637 struct scc_ctrl *ctrl;
0638 int k;
0639
0640 if (Vector_Latch)
0641 {
0642 for(k=0; k < SCC_IRQTIMEOUT; k++)
0643 {
0644 Outb(Vector_Latch, 0);
0645
0646
0647 if((vector=Inb(Vector_Latch)) >= 16 * Nchips) break;
0648 if (vector & 0x01) break;
0649
0650 scc=&SCC_Info[vector >> 3 ^ 0x01];
0651 if (!scc->dev) break;
0652
0653 scc_isr_dispatch(scc, vector);
0654
0655 OutReg(scc->ctrl,R0,RES_H_IUS);
0656 }
0657
0658 if (k == SCC_IRQTIMEOUT)
0659 printk(KERN_WARNING "z8530drv: endless loop in scc_isr()?\n");
0660
0661 return IRQ_HANDLED;
0662 }
0663
0664
0665
0666
0667
0668 ctrl = SCC_ctrl;
0669 while (ctrl->chan_A)
0670 {
0671 if (ctrl->irq != chip_irq)
0672 {
0673 ctrl++;
0674 continue;
0675 }
0676
0677 scc = NULL;
0678 for (k = 0; InReg(ctrl->chan_A,R3) && k < SCC_IRQTIMEOUT; k++)
0679 {
0680 vector=InReg(ctrl->chan_B,R2);
0681 if (vector & 0x01) break;
0682
0683 scc = &SCC_Info[vector >> 3 ^ 0x01];
0684 if (!scc->dev) break;
0685
0686 scc_isr_dispatch(scc, vector);
0687 }
0688
0689 if (k == SCC_IRQTIMEOUT)
0690 {
0691 printk(KERN_WARNING "z8530drv: endless loop in scc_isr()?!\n");
0692 break;
0693 }
0694
0695
0696
0697
0698
0699
0700
0701
0702 if (scc != NULL)
0703 {
0704 OutReg(scc->ctrl,R0,RES_H_IUS);
0705 ctrl = SCC_ctrl;
0706 } else
0707 ctrl++;
0708 }
0709 return IRQ_HANDLED;
0710 }
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721 static inline void set_brg(struct scc_channel *scc, unsigned int tc)
0722 {
0723 cl(scc,R14,BRENABL);
0724 wr(scc,R12,tc & 255);
0725 wr(scc,R13,tc >> 8);
0726 or(scc,R14,BRENABL);
0727 }
0728
0729 static inline void set_speed(struct scc_channel *scc)
0730 {
0731 unsigned long flags;
0732 spin_lock_irqsave(&scc->lock, flags);
0733
0734 if (scc->modem.speed > 0)
0735 set_brg(scc, (unsigned) (scc->clock / (scc->modem.speed * 64)) - 2);
0736
0737 spin_unlock_irqrestore(&scc->lock, flags);
0738 }
0739
0740
0741
0742
0743 static inline void init_brg(struct scc_channel *scc)
0744 {
0745 wr(scc, R14, BRSRC);
0746 OutReg(scc->ctrl, R14, SSBR|scc->wreg[R14]);
0747 OutReg(scc->ctrl, R14, SNRZI|scc->wreg[R14]);
0748 }
0749
0750
0751
0752
0753
0754
0755
0756
0757
0758
0759
0760
0761
0762
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776
0777
0778
0779
0780
0781
0782
0783
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793
0794
0795 static void init_channel(struct scc_channel *scc)
0796 {
0797 del_timer(&scc->tx_t);
0798 del_timer(&scc->tx_wdog);
0799
0800 disable_irq(scc->irq);
0801
0802 wr(scc,R4,X1CLK|SDLC);
0803 wr(scc,R1,0);
0804 wr(scc,R3,Rx8|RxCRC_ENAB);
0805 wr(scc,R5,Tx8|DTR|TxCRC_ENAB);
0806 wr(scc,R6,0);
0807 wr(scc,R7,FLAG);
0808 wr(scc,R9,VIS);
0809 wr(scc,R10,(scc->modem.nrz? NRZ : NRZI)|CRCPS|ABUNDER);
0810 wr(scc,R14, 0);
0811
0812
0813
0814
0815
0816
0817
0818
0819
0820
0821
0822
0823
0824
0825
0826
0827
0828
0829
0830
0831
0832
0833
0834
0835
0836
0837
0838
0839 switch(scc->modem.clocksrc)
0840 {
0841 case CLK_DPLL:
0842 wr(scc, R11, RCDPLL|TCDPLL|TRxCOI|TRxCDP);
0843 init_brg(scc);
0844 break;
0845
0846 case CLK_DIVIDER:
0847 wr(scc, R11, ((scc->brand & BAYCOM)? TRxCDP : TRxCBR) | RCDPLL|TCRTxCP|TRxCOI);
0848 init_brg(scc);
0849 break;
0850
0851 case CLK_EXTERNAL:
0852 wr(scc, R11, (scc->brand & BAYCOM)? RCTRxCP|TCRTxCP : RCRTxCP|TCTRxCP);
0853 OutReg(scc->ctrl, R14, DISDPLL);
0854 break;
0855
0856 }
0857
0858 set_speed(scc);
0859
0860 if(scc->enhanced)
0861 {
0862 or(scc,R15,SHDLCE|FIFOE);
0863 wr(scc,R7,AUTOEOM);
0864 }
0865
0866 if(scc->kiss.softdcd || (InReg(scc->ctrl,R0) & DCD))
0867
0868 {
0869 start_hunt(scc);
0870 }
0871
0872
0873
0874 wr(scc,R15, BRKIE|TxUIE|(scc->kiss.softdcd? SYNCIE:DCDIE));
0875
0876 Outb(scc->ctrl,RES_EXT_INT);
0877 Outb(scc->ctrl,RES_EXT_INT);
0878
0879 or(scc,R1,INT_ALL_Rx|TxINT_ENAB|EXT_INT_ENAB);
0880
0881 scc->status = InReg(scc->ctrl,R0);
0882
0883 or(scc,R9,MIE);
0884
0885 scc_init_timer(scc);
0886
0887 enable_irq(scc->irq);
0888 }
0889
0890
0891
0892
0893
0894
0895
0896
0897
0898
0899
0900
0901 static void scc_key_trx(struct scc_channel *scc, char tx)
0902 {
0903 unsigned int time_const;
0904
0905 if (scc->brand & PRIMUS)
0906 Outb(scc->ctrl + 4, scc->option | (tx? 0x80 : 0));
0907
0908 if (scc->modem.speed < 300)
0909 scc->modem.speed = 1200;
0910
0911 time_const = (unsigned) (scc->clock / (scc->modem.speed * (tx? 2:64))) - 2;
0912
0913 disable_irq(scc->irq);
0914
0915 if (tx)
0916 {
0917 or(scc, R1, TxINT_ENAB);
0918 or(scc, R15, TxUIE);
0919 }
0920
0921 if (scc->modem.clocksrc == CLK_DPLL)
0922 {
0923 if (tx)
0924 {
0925 #ifdef CONFIG_SCC_TRXECHO
0926 cl(scc, R3, RxENABLE|ENT_HM);
0927 cl(scc, R15, DCDIE|SYNCIE);
0928 #endif
0929 set_brg(scc, time_const);
0930
0931
0932 wr(scc, R11, RCDPLL|TCBR|TRxCOI|TRxCBR);
0933
0934
0935 if (scc->kiss.tx_inhibit)
0936 {
0937 or(scc,R5, TxENAB);
0938 scc->wreg[R5] |= RTS;
0939 } else {
0940 or(scc,R5,RTS|TxENAB);
0941 }
0942 } else {
0943 cl(scc,R5,RTS|TxENAB);
0944
0945 set_brg(scc, time_const);
0946
0947
0948 wr(scc, R11, RCDPLL|TCDPLL|TRxCOI|TRxCDP);
0949
0950 #ifndef CONFIG_SCC_TRXECHO
0951 if (scc->kiss.softdcd)
0952 #endif
0953 {
0954 or(scc,R15, scc->kiss.softdcd? SYNCIE:DCDIE);
0955 start_hunt(scc);
0956 }
0957 }
0958 } else {
0959 if (tx)
0960 {
0961 #ifdef CONFIG_SCC_TRXECHO
0962 if (scc->kiss.fulldup == KISS_DUPLEX_HALF)
0963 {
0964 cl(scc, R3, RxENABLE);
0965 cl(scc, R15, DCDIE|SYNCIE);
0966 }
0967 #endif
0968
0969 if (scc->kiss.tx_inhibit)
0970 {
0971 or(scc,R5, TxENAB);
0972 scc->wreg[R5] |= RTS;
0973 } else {
0974 or(scc,R5,RTS|TxENAB);
0975 }
0976 } else {
0977 cl(scc,R5,RTS|TxENAB);
0978
0979 if ((scc->kiss.fulldup == KISS_DUPLEX_HALF) &&
0980 #ifndef CONFIG_SCC_TRXECHO
0981 scc->kiss.softdcd)
0982 #else
0983 1)
0984 #endif
0985 {
0986 or(scc, R15, scc->kiss.softdcd? SYNCIE:DCDIE);
0987 start_hunt(scc);
0988 }
0989 }
0990 }
0991
0992 enable_irq(scc->irq);
0993 }
0994
0995
0996
0997
0998 static void __scc_start_tx_timer(struct scc_channel *scc,
0999 void (*handler)(struct timer_list *t),
1000 unsigned long when)
1001 {
1002 del_timer(&scc->tx_t);
1003
1004 if (when == 0)
1005 {
1006 handler(&scc->tx_t);
1007 } else
1008 if (when != TIMER_OFF)
1009 {
1010 scc->tx_t.function = handler;
1011 scc->tx_t.expires = jiffies + (when*HZ)/100;
1012 add_timer(&scc->tx_t);
1013 }
1014 }
1015
1016 static void scc_start_tx_timer(struct scc_channel *scc,
1017 void (*handler)(struct timer_list *t),
1018 unsigned long when)
1019 {
1020 unsigned long flags;
1021
1022 spin_lock_irqsave(&scc->lock, flags);
1023 __scc_start_tx_timer(scc, handler, when);
1024 spin_unlock_irqrestore(&scc->lock, flags);
1025 }
1026
1027 static void scc_start_defer(struct scc_channel *scc)
1028 {
1029 unsigned long flags;
1030
1031 spin_lock_irqsave(&scc->lock, flags);
1032 del_timer(&scc->tx_wdog);
1033
1034 if (scc->kiss.maxdefer != 0 && scc->kiss.maxdefer != TIMER_OFF)
1035 {
1036 scc->tx_wdog.function = t_busy;
1037 scc->tx_wdog.expires = jiffies + HZ*scc->kiss.maxdefer;
1038 add_timer(&scc->tx_wdog);
1039 }
1040 spin_unlock_irqrestore(&scc->lock, flags);
1041 }
1042
1043 static void scc_start_maxkeyup(struct scc_channel *scc)
1044 {
1045 unsigned long flags;
1046
1047 spin_lock_irqsave(&scc->lock, flags);
1048 del_timer(&scc->tx_wdog);
1049
1050 if (scc->kiss.maxkeyup != 0 && scc->kiss.maxkeyup != TIMER_OFF)
1051 {
1052 scc->tx_wdog.function = t_maxkeyup;
1053 scc->tx_wdog.expires = jiffies + HZ*scc->kiss.maxkeyup;
1054 add_timer(&scc->tx_wdog);
1055 }
1056 spin_unlock_irqrestore(&scc->lock, flags);
1057 }
1058
1059
1060
1061
1062
1063
1064 static void scc_tx_done(struct scc_channel *scc)
1065 {
1066
1067
1068
1069
1070 switch (scc->kiss.fulldup)
1071 {
1072 case KISS_DUPLEX_LINK:
1073 scc->stat.tx_state = TXS_IDLE2;
1074 if (scc->kiss.idletime != TIMER_OFF)
1075 scc_start_tx_timer(scc, t_idle,
1076 scc->kiss.idletime*100);
1077 break;
1078 case KISS_DUPLEX_OPTIMA:
1079 scc_notify(scc, HWEV_ALL_SENT);
1080 break;
1081 default:
1082 scc->stat.tx_state = TXS_BUSY;
1083 scc_start_tx_timer(scc, t_tail, scc->kiss.tailtime);
1084 }
1085
1086 netif_wake_queue(scc->dev);
1087 }
1088
1089
1090 static unsigned char Rand = 17;
1091
1092 static inline int is_grouped(struct scc_channel *scc)
1093 {
1094 int k;
1095 struct scc_channel *scc2;
1096 unsigned char grp1, grp2;
1097
1098 grp1 = scc->kiss.group;
1099
1100 for (k = 0; k < (Nchips * 2); k++)
1101 {
1102 scc2 = &SCC_Info[k];
1103 grp2 = scc2->kiss.group;
1104
1105 if (scc2 == scc || !(scc2->dev && grp2))
1106 continue;
1107
1108 if ((grp1 & 0x3f) == (grp2 & 0x3f))
1109 {
1110 if ( (grp1 & TXGROUP) && (scc2->wreg[R5] & RTS) )
1111 return 1;
1112
1113 if ( (grp1 & RXGROUP) && scc2->dcd )
1114 return 1;
1115 }
1116 }
1117 return 0;
1118 }
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128 static void t_dwait(struct timer_list *t)
1129 {
1130 struct scc_channel *scc = from_timer(scc, t, tx_t);
1131
1132 if (scc->stat.tx_state == TXS_WAIT)
1133 {
1134 if (skb_queue_empty(&scc->tx_queue)) {
1135 scc->stat.tx_state = TXS_IDLE;
1136 netif_wake_queue(scc->dev);
1137 return;
1138 }
1139
1140 scc->stat.tx_state = TXS_BUSY;
1141 }
1142
1143 if (scc->kiss.fulldup == KISS_DUPLEX_HALF)
1144 {
1145 Rand = Rand * 17 + 31;
1146
1147 if (scc->dcd || (scc->kiss.persist) < Rand || (scc->kiss.group && is_grouped(scc)) )
1148 {
1149 scc_start_defer(scc);
1150 scc_start_tx_timer(scc, t_dwait, scc->kiss.slottime);
1151 return ;
1152 }
1153 }
1154
1155 if ( !(scc->wreg[R5] & RTS) )
1156 {
1157 scc_key_trx(scc, TX_ON);
1158 scc_start_tx_timer(scc, t_txdelay, scc->kiss.txdelay);
1159 } else {
1160 scc_start_tx_timer(scc, t_txdelay, 0);
1161 }
1162 }
1163
1164
1165
1166
1167
1168
1169
1170 static void t_txdelay(struct timer_list *t)
1171 {
1172 struct scc_channel *scc = from_timer(scc, t, tx_t);
1173
1174 scc_start_maxkeyup(scc);
1175
1176 if (scc->tx_buff == NULL)
1177 {
1178 disable_irq(scc->irq);
1179 scc_txint(scc);
1180 enable_irq(scc->irq);
1181 }
1182 }
1183
1184
1185
1186
1187
1188
1189
1190
1191 static void t_tail(struct timer_list *t)
1192 {
1193 struct scc_channel *scc = from_timer(scc, t, tx_t);
1194 unsigned long flags;
1195
1196 spin_lock_irqsave(&scc->lock, flags);
1197 del_timer(&scc->tx_wdog);
1198 scc_key_trx(scc, TX_OFF);
1199 spin_unlock_irqrestore(&scc->lock, flags);
1200
1201 if (scc->stat.tx_state == TXS_TIMEOUT)
1202 {
1203 scc->stat.tx_state = TXS_WAIT;
1204 scc_start_tx_timer(scc, t_dwait, scc->kiss.mintime*100);
1205 return;
1206 }
1207
1208 scc->stat.tx_state = TXS_IDLE;
1209 netif_wake_queue(scc->dev);
1210 }
1211
1212
1213
1214
1215
1216
1217
1218 static void t_busy(struct timer_list *t)
1219 {
1220 struct scc_channel *scc = from_timer(scc, t, tx_wdog);
1221
1222 del_timer(&scc->tx_t);
1223 netif_stop_queue(scc->dev);
1224
1225 scc_discard_buffers(scc);
1226 scc->stat.txerrs++;
1227 scc->stat.tx_state = TXS_IDLE;
1228
1229 netif_wake_queue(scc->dev);
1230 }
1231
1232
1233
1234
1235
1236
1237 static void t_maxkeyup(struct timer_list *t)
1238 {
1239 struct scc_channel *scc = from_timer(scc, t, tx_wdog);
1240 unsigned long flags;
1241
1242 spin_lock_irqsave(&scc->lock, flags);
1243
1244
1245
1246
1247
1248 netif_stop_queue(scc->dev);
1249 scc_discard_buffers(scc);
1250
1251 del_timer(&scc->tx_t);
1252
1253 cl(scc, R1, TxINT_ENAB);
1254 cl(scc, R15, TxUIE);
1255 OutReg(scc->ctrl, R0, RES_Tx_P);
1256
1257 spin_unlock_irqrestore(&scc->lock, flags);
1258
1259 scc->stat.txerrs++;
1260 scc->stat.tx_state = TXS_TIMEOUT;
1261 scc_start_tx_timer(scc, t_tail, scc->kiss.tailtime);
1262 }
1263
1264
1265
1266
1267
1268
1269
1270
1271 static void t_idle(struct timer_list *t)
1272 {
1273 struct scc_channel *scc = from_timer(scc, t, tx_t);
1274
1275 del_timer(&scc->tx_wdog);
1276
1277 scc_key_trx(scc, TX_OFF);
1278 if(scc->kiss.mintime)
1279 scc_start_tx_timer(scc, t_dwait, scc->kiss.mintime*100);
1280 scc->stat.tx_state = TXS_WAIT;
1281 }
1282
1283 static void scc_init_timer(struct scc_channel *scc)
1284 {
1285 unsigned long flags;
1286
1287 spin_lock_irqsave(&scc->lock, flags);
1288 scc->stat.tx_state = TXS_IDLE;
1289 spin_unlock_irqrestore(&scc->lock, flags);
1290 }
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302 #define CAST(x) (unsigned long)(x)
1303
1304 static unsigned int scc_set_param(struct scc_channel *scc, unsigned int cmd, unsigned int arg)
1305 {
1306 switch (cmd)
1307 {
1308 case PARAM_TXDELAY: scc->kiss.txdelay=arg; break;
1309 case PARAM_PERSIST: scc->kiss.persist=arg; break;
1310 case PARAM_SLOTTIME: scc->kiss.slottime=arg; break;
1311 case PARAM_TXTAIL: scc->kiss.tailtime=arg; break;
1312 case PARAM_FULLDUP: scc->kiss.fulldup=arg; break;
1313 case PARAM_DTR: break;
1314 case PARAM_GROUP: scc->kiss.group=arg; break;
1315 case PARAM_IDLE: scc->kiss.idletime=arg; break;
1316 case PARAM_MIN: scc->kiss.mintime=arg; break;
1317 case PARAM_MAXKEY: scc->kiss.maxkeyup=arg; break;
1318 case PARAM_WAIT: scc->kiss.waittime=arg; break;
1319 case PARAM_MAXDEFER: scc->kiss.maxdefer=arg; break;
1320 case PARAM_TX: scc->kiss.tx_inhibit=arg; break;
1321
1322 case PARAM_SOFTDCD:
1323 scc->kiss.softdcd=arg;
1324 if (arg)
1325 {
1326 or(scc, R15, SYNCIE);
1327 cl(scc, R15, DCDIE);
1328 start_hunt(scc);
1329 } else {
1330 or(scc, R15, DCDIE);
1331 cl(scc, R15, SYNCIE);
1332 }
1333 break;
1334
1335 case PARAM_SPEED:
1336 if (arg < 256)
1337 scc->modem.speed=arg*100;
1338 else
1339 scc->modem.speed=arg;
1340
1341 if (scc->stat.tx_state == 0)
1342 set_speed(scc);
1343 break;
1344
1345 case PARAM_RTS:
1346 if ( !(scc->wreg[R5] & RTS) )
1347 {
1348 if (arg != TX_OFF) {
1349 scc_key_trx(scc, TX_ON);
1350 scc_start_tx_timer(scc, t_txdelay, scc->kiss.txdelay);
1351 }
1352 } else {
1353 if (arg == TX_OFF)
1354 {
1355 scc->stat.tx_state = TXS_BUSY;
1356 scc_start_tx_timer(scc, t_tail, scc->kiss.tailtime);
1357 }
1358 }
1359 break;
1360
1361 case PARAM_HWEVENT:
1362 scc_notify(scc, scc->dcd? HWEV_DCD_ON:HWEV_DCD_OFF);
1363 break;
1364
1365 default: return -EINVAL;
1366 }
1367
1368 return 0;
1369 }
1370
1371
1372
1373 static unsigned long scc_get_param(struct scc_channel *scc, unsigned int cmd)
1374 {
1375 switch (cmd)
1376 {
1377 case PARAM_TXDELAY: return CAST(scc->kiss.txdelay);
1378 case PARAM_PERSIST: return CAST(scc->kiss.persist);
1379 case PARAM_SLOTTIME: return CAST(scc->kiss.slottime);
1380 case PARAM_TXTAIL: return CAST(scc->kiss.tailtime);
1381 case PARAM_FULLDUP: return CAST(scc->kiss.fulldup);
1382 case PARAM_SOFTDCD: return CAST(scc->kiss.softdcd);
1383 case PARAM_DTR: return CAST((scc->wreg[R5] & DTR)? 1:0);
1384 case PARAM_RTS: return CAST((scc->wreg[R5] & RTS)? 1:0);
1385 case PARAM_SPEED: return CAST(scc->modem.speed);
1386 case PARAM_GROUP: return CAST(scc->kiss.group);
1387 case PARAM_IDLE: return CAST(scc->kiss.idletime);
1388 case PARAM_MIN: return CAST(scc->kiss.mintime);
1389 case PARAM_MAXKEY: return CAST(scc->kiss.maxkeyup);
1390 case PARAM_WAIT: return CAST(scc->kiss.waittime);
1391 case PARAM_MAXDEFER: return CAST(scc->kiss.maxdefer);
1392 case PARAM_TX: return CAST(scc->kiss.tx_inhibit);
1393 default: return NO_SUCH_PARAM;
1394 }
1395
1396 }
1397
1398 #undef CAST
1399
1400
1401
1402
1403
1404 static void scc_stop_calibrate(struct timer_list *t)
1405 {
1406 struct scc_channel *scc = from_timer(scc, t, tx_wdog);
1407 unsigned long flags;
1408
1409 spin_lock_irqsave(&scc->lock, flags);
1410 del_timer(&scc->tx_wdog);
1411 scc_key_trx(scc, TX_OFF);
1412 wr(scc, R6, 0);
1413 wr(scc, R7, FLAG);
1414 Outb(scc->ctrl,RES_EXT_INT);
1415 Outb(scc->ctrl,RES_EXT_INT);
1416
1417 netif_wake_queue(scc->dev);
1418 spin_unlock_irqrestore(&scc->lock, flags);
1419 }
1420
1421
1422 static void
1423 scc_start_calibrate(struct scc_channel *scc, int duration, unsigned char pattern)
1424 {
1425 unsigned long flags;
1426
1427 spin_lock_irqsave(&scc->lock, flags);
1428 netif_stop_queue(scc->dev);
1429 scc_discard_buffers(scc);
1430
1431 del_timer(&scc->tx_wdog);
1432
1433 scc->tx_wdog.function = scc_stop_calibrate;
1434 scc->tx_wdog.expires = jiffies + HZ*duration;
1435 add_timer(&scc->tx_wdog);
1436
1437
1438 wr(scc, R6, 0);
1439 wr(scc, R7, pattern);
1440
1441
1442
1443
1444
1445
1446 Outb(scc->ctrl,RES_EXT_INT);
1447 Outb(scc->ctrl,RES_EXT_INT);
1448
1449 scc_key_trx(scc, TX_ON);
1450 spin_unlock_irqrestore(&scc->lock, flags);
1451 }
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461 static void z8530_init(void)
1462 {
1463 struct scc_channel *scc;
1464 int chip, k;
1465 unsigned long flags;
1466 char *flag;
1467
1468
1469 printk(KERN_INFO "Init Z8530 driver: %u channels, IRQ", Nchips*2);
1470
1471 flag=" ";
1472 for (k = 0; k < nr_irqs; k++)
1473 if (Ivec[k].used)
1474 {
1475 printk("%s%d", flag, k);
1476 flag=",";
1477 }
1478 printk("\n");
1479
1480
1481
1482 for (chip = 0; chip < Nchips; chip++)
1483 {
1484 scc=&SCC_Info[2*chip];
1485 if (!scc->ctrl) continue;
1486
1487
1488
1489 if(scc->brand & EAGLE)
1490 Outb(scc->special,0x08);
1491
1492 if(scc->brand & (PC100 | PRIMUS))
1493 Outb(scc->special,scc->option);
1494
1495
1496
1497
1498 spin_lock_irqsave(&scc->lock, flags);
1499
1500 Outb(scc->ctrl, 0);
1501 OutReg(scc->ctrl,R9,FHWRES);
1502 udelay(100);
1503 wr(scc, R2, chip*16);
1504 wr(scc, R9, VIS);
1505 spin_unlock_irqrestore(&scc->lock, flags);
1506 }
1507
1508
1509 Driver_Initialized = 1;
1510 }
1511
1512
1513
1514
1515
1516 static int scc_net_alloc(const char *name, struct scc_channel *scc)
1517 {
1518 int err;
1519 struct net_device *dev;
1520
1521 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, scc_net_setup);
1522 if (!dev)
1523 return -ENOMEM;
1524
1525 dev->ml_priv = scc;
1526 scc->dev = dev;
1527 spin_lock_init(&scc->lock);
1528 timer_setup(&scc->tx_t, NULL, 0);
1529 timer_setup(&scc->tx_wdog, NULL, 0);
1530
1531 err = register_netdevice(dev);
1532 if (err) {
1533 printk(KERN_ERR "%s: can't register network device (%d)\n",
1534 name, err);
1535 free_netdev(dev);
1536 scc->dev = NULL;
1537 return err;
1538 }
1539
1540 return 0;
1541 }
1542
1543
1544
1545
1546
1547
1548
1549 static const struct net_device_ops scc_netdev_ops = {
1550 .ndo_open = scc_net_open,
1551 .ndo_stop = scc_net_close,
1552 .ndo_start_xmit = scc_net_tx,
1553 .ndo_set_mac_address = scc_net_set_mac_address,
1554 .ndo_get_stats = scc_net_get_stats,
1555 .ndo_siocdevprivate = scc_net_siocdevprivate,
1556 };
1557
1558
1559
1560 static void scc_net_setup(struct net_device *dev)
1561 {
1562 dev->tx_queue_len = 16;
1563
1564 dev->netdev_ops = &scc_netdev_ops;
1565 dev->header_ops = &ax25_header_ops;
1566
1567 dev->flags = 0;
1568
1569 dev->type = ARPHRD_AX25;
1570 dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN;
1571 dev->mtu = AX25_DEF_PACLEN;
1572 dev->addr_len = AX25_ADDR_LEN;
1573
1574 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
1575 dev_addr_set(dev, (u8 *)&ax25_defaddr);
1576 }
1577
1578
1579
1580 static int scc_net_open(struct net_device *dev)
1581 {
1582 struct scc_channel *scc = (struct scc_channel *) dev->ml_priv;
1583
1584 if (!scc->init)
1585 return -EINVAL;
1586
1587 scc->tx_buff = NULL;
1588 skb_queue_head_init(&scc->tx_queue);
1589
1590 init_channel(scc);
1591
1592 netif_start_queue(dev);
1593 return 0;
1594 }
1595
1596
1597
1598 static int scc_net_close(struct net_device *dev)
1599 {
1600 struct scc_channel *scc = (struct scc_channel *) dev->ml_priv;
1601 unsigned long flags;
1602
1603 netif_stop_queue(dev);
1604
1605 spin_lock_irqsave(&scc->lock, flags);
1606 Outb(scc->ctrl,0);
1607 wr(scc,R1,0);
1608 wr(scc,R3,0);
1609 spin_unlock_irqrestore(&scc->lock, flags);
1610
1611 del_timer_sync(&scc->tx_t);
1612 del_timer_sync(&scc->tx_wdog);
1613
1614 scc_discard_buffers(scc);
1615
1616 return 0;
1617 }
1618
1619
1620
1621 static void scc_net_rx(struct scc_channel *scc, struct sk_buff *skb)
1622 {
1623 if (skb->len == 0) {
1624 dev_kfree_skb_irq(skb);
1625 return;
1626 }
1627
1628 scc->dev_stat.rx_packets++;
1629 scc->dev_stat.rx_bytes += skb->len;
1630
1631 skb->protocol = ax25_type_trans(skb, scc->dev);
1632
1633 netif_rx(skb);
1634 }
1635
1636
1637
1638 static netdev_tx_t scc_net_tx(struct sk_buff *skb, struct net_device *dev)
1639 {
1640 struct scc_channel *scc = (struct scc_channel *) dev->ml_priv;
1641 unsigned long flags;
1642 char kisscmd;
1643
1644 if (skb->protocol == htons(ETH_P_IP))
1645 return ax25_ip_xmit(skb);
1646
1647 if (skb->len > scc->stat.bufsize || skb->len < 2) {
1648 scc->dev_stat.tx_dropped++;
1649 dev_kfree_skb(skb);
1650 return NETDEV_TX_OK;
1651 }
1652
1653 scc->dev_stat.tx_packets++;
1654 scc->dev_stat.tx_bytes += skb->len;
1655 scc->stat.txframes++;
1656
1657 kisscmd = *skb->data & 0x1f;
1658 skb_pull(skb, 1);
1659
1660 if (kisscmd) {
1661 scc_set_param(scc, kisscmd, *skb->data);
1662 dev_kfree_skb(skb);
1663 return NETDEV_TX_OK;
1664 }
1665
1666 spin_lock_irqsave(&scc->lock, flags);
1667
1668 if (skb_queue_len(&scc->tx_queue) > scc->dev->tx_queue_len) {
1669 struct sk_buff *skb_del;
1670 skb_del = skb_dequeue(&scc->tx_queue);
1671 dev_kfree_skb(skb_del);
1672 }
1673 skb_queue_tail(&scc->tx_queue, skb);
1674 netif_trans_update(dev);
1675
1676
1677
1678
1679
1680
1681
1682
1683 if(scc->stat.tx_state == TXS_IDLE || scc->stat.tx_state == TXS_IDLE2) {
1684 scc->stat.tx_state = TXS_BUSY;
1685 if (scc->kiss.fulldup == KISS_DUPLEX_HALF)
1686 __scc_start_tx_timer(scc, t_dwait, scc->kiss.waittime);
1687 else
1688 __scc_start_tx_timer(scc, t_dwait, 0);
1689 }
1690 spin_unlock_irqrestore(&scc->lock, flags);
1691 return NETDEV_TX_OK;
1692 }
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707 static int scc_net_siocdevprivate(struct net_device *dev,
1708 struct ifreq *ifr, void __user *arg, int cmd)
1709 {
1710 struct scc_kiss_cmd kiss_cmd;
1711 struct scc_mem_config memcfg;
1712 struct scc_hw_config hwcfg;
1713 struct scc_calibrate cal;
1714 struct scc_channel *scc = (struct scc_channel *) dev->ml_priv;
1715 int chan;
1716 unsigned char device_name[IFNAMSIZ];
1717
1718 if (!Driver_Initialized)
1719 {
1720 if (cmd == SIOCSCCCFG)
1721 {
1722 int found = 1;
1723
1724 if (!capable(CAP_SYS_RAWIO)) return -EPERM;
1725 if (in_compat_syscall())
1726 return -EOPNOTSUPP;
1727
1728 if (!arg) return -EFAULT;
1729
1730 if (Nchips >= SCC_MAXCHIPS)
1731 return -EINVAL;
1732
1733 if (copy_from_user(&hwcfg, arg, sizeof(hwcfg)))
1734 return -EFAULT;
1735
1736 if (hwcfg.irq == 2) hwcfg.irq = 9;
1737
1738 if (hwcfg.irq < 0 || hwcfg.irq >= nr_irqs)
1739 return -EINVAL;
1740
1741 if (!Ivec[hwcfg.irq].used && hwcfg.irq)
1742 {
1743 if (request_irq(hwcfg.irq, scc_isr,
1744 0, "AX.25 SCC",
1745 (void *)(long) hwcfg.irq))
1746 printk(KERN_WARNING "z8530drv: warning, cannot get IRQ %d\n", hwcfg.irq);
1747 else
1748 Ivec[hwcfg.irq].used = 1;
1749 }
1750
1751 if (hwcfg.vector_latch && !Vector_Latch) {
1752 if (!request_region(hwcfg.vector_latch, 1, "scc vector latch"))
1753 printk(KERN_WARNING "z8530drv: warning, cannot reserve vector latch port 0x%lx\n, disabled.", hwcfg.vector_latch);
1754 else
1755 Vector_Latch = hwcfg.vector_latch;
1756 }
1757
1758 if (hwcfg.clock == 0)
1759 hwcfg.clock = SCC_DEFAULT_CLOCK;
1760
1761 #ifndef SCC_DONT_CHECK
1762
1763 if(request_region(hwcfg.ctrl_a, 1, "scc-probe"))
1764 {
1765 disable_irq(hwcfg.irq);
1766 Outb(hwcfg.ctrl_a, 0);
1767 OutReg(hwcfg.ctrl_a, R9, FHWRES);
1768 udelay(100);
1769 OutReg(hwcfg.ctrl_a,R13,0x55);
1770 udelay(5);
1771
1772 if (InReg(hwcfg.ctrl_a,R13) != 0x55)
1773 found = 0;
1774 enable_irq(hwcfg.irq);
1775 release_region(hwcfg.ctrl_a, 1);
1776 }
1777 else
1778 found = 0;
1779 #endif
1780
1781 if (found)
1782 {
1783 SCC_Info[2*Nchips ].ctrl = hwcfg.ctrl_a;
1784 SCC_Info[2*Nchips ].data = hwcfg.data_a;
1785 SCC_Info[2*Nchips ].irq = hwcfg.irq;
1786 SCC_Info[2*Nchips+1].ctrl = hwcfg.ctrl_b;
1787 SCC_Info[2*Nchips+1].data = hwcfg.data_b;
1788 SCC_Info[2*Nchips+1].irq = hwcfg.irq;
1789
1790 SCC_ctrl[Nchips].chan_A = hwcfg.ctrl_a;
1791 SCC_ctrl[Nchips].chan_B = hwcfg.ctrl_b;
1792 SCC_ctrl[Nchips].irq = hwcfg.irq;
1793 }
1794
1795
1796 for (chan = 0; chan < 2; chan++)
1797 {
1798 sprintf(device_name, "%s%i", SCC_DriverName, 2*Nchips+chan);
1799
1800 SCC_Info[2*Nchips+chan].special = hwcfg.special;
1801 SCC_Info[2*Nchips+chan].clock = hwcfg.clock;
1802 SCC_Info[2*Nchips+chan].brand = hwcfg.brand;
1803 SCC_Info[2*Nchips+chan].option = hwcfg.option;
1804 SCC_Info[2*Nchips+chan].enhanced = hwcfg.escc;
1805
1806 #ifdef SCC_DONT_CHECK
1807 printk(KERN_INFO "%s: data port = 0x%3.3x control port = 0x%3.3x\n",
1808 device_name,
1809 SCC_Info[2*Nchips+chan].data,
1810 SCC_Info[2*Nchips+chan].ctrl);
1811
1812 #else
1813 printk(KERN_INFO "%s: data port = 0x%3.3lx control port = 0x%3.3lx -- %s\n",
1814 device_name,
1815 chan? hwcfg.data_b : hwcfg.data_a,
1816 chan? hwcfg.ctrl_b : hwcfg.ctrl_a,
1817 found? "found" : "missing");
1818 #endif
1819
1820 if (found)
1821 {
1822 request_region(SCC_Info[2*Nchips+chan].ctrl, 1, "scc ctrl");
1823 request_region(SCC_Info[2*Nchips+chan].data, 1, "scc data");
1824 if (Nchips+chan != 0 &&
1825 scc_net_alloc(device_name,
1826 &SCC_Info[2*Nchips+chan]))
1827 return -EINVAL;
1828 }
1829 }
1830
1831 if (found) Nchips++;
1832
1833 return 0;
1834 }
1835
1836 if (cmd == SIOCSCCINI)
1837 {
1838 if (!capable(CAP_SYS_RAWIO))
1839 return -EPERM;
1840
1841 if (Nchips == 0)
1842 return -EINVAL;
1843
1844 z8530_init();
1845 return 0;
1846 }
1847
1848 return -EINVAL;
1849 }
1850
1851 if (!scc->init)
1852 {
1853 if (cmd == SIOCSCCCHANINI)
1854 {
1855 if (!capable(CAP_NET_ADMIN)) return -EPERM;
1856 if (!arg) return -EINVAL;
1857
1858 scc->stat.bufsize = SCC_BUFSIZE;
1859
1860 if (copy_from_user(&scc->modem, arg, sizeof(struct scc_modem)))
1861 return -EINVAL;
1862
1863
1864
1865 if (scc->modem.speed < 4800)
1866 {
1867 scc->kiss.txdelay = 36;
1868 scc->kiss.persist = 42;
1869 scc->kiss.slottime = 16;
1870 scc->kiss.tailtime = 4;
1871 scc->kiss.fulldup = 0;
1872 scc->kiss.waittime = 50;
1873 scc->kiss.maxkeyup = 10;
1874 scc->kiss.mintime = 3;
1875 scc->kiss.idletime = 30;
1876 scc->kiss.maxdefer = 120;
1877 scc->kiss.softdcd = 0;
1878 } else {
1879 scc->kiss.txdelay = 10;
1880 scc->kiss.persist = 64;
1881 scc->kiss.slottime = 8;
1882 scc->kiss.tailtime = 1;
1883 scc->kiss.fulldup = 0;
1884 scc->kiss.waittime = 50;
1885 scc->kiss.maxkeyup = 7;
1886 scc->kiss.mintime = 3;
1887 scc->kiss.idletime = 30;
1888 scc->kiss.maxdefer = 120;
1889 scc->kiss.softdcd = 0;
1890 }
1891
1892 scc->tx_buff = NULL;
1893 skb_queue_head_init(&scc->tx_queue);
1894 scc->init = 1;
1895
1896 return 0;
1897 }
1898
1899 return -EINVAL;
1900 }
1901
1902 switch(cmd)
1903 {
1904 case SIOCSCCRESERVED:
1905 return -ENOIOCTLCMD;
1906
1907 case SIOCSCCSMEM:
1908 if (!capable(CAP_SYS_RAWIO)) return -EPERM;
1909 if (!arg || copy_from_user(&memcfg, arg, sizeof(memcfg)))
1910 return -EINVAL;
1911 scc->stat.bufsize = memcfg.bufsize;
1912 return 0;
1913
1914 case SIOCSCCGSTAT:
1915 if (!arg || copy_to_user(arg, &scc->stat, sizeof(scc->stat)))
1916 return -EINVAL;
1917 return 0;
1918
1919 case SIOCSCCGKISS:
1920 if (!arg || copy_from_user(&kiss_cmd, arg, sizeof(kiss_cmd)))
1921 return -EINVAL;
1922 kiss_cmd.param = scc_get_param(scc, kiss_cmd.command);
1923 if (copy_to_user(arg, &kiss_cmd, sizeof(kiss_cmd)))
1924 return -EINVAL;
1925 return 0;
1926
1927 case SIOCSCCSKISS:
1928 if (!capable(CAP_NET_ADMIN)) return -EPERM;
1929 if (!arg || copy_from_user(&kiss_cmd, arg, sizeof(kiss_cmd)))
1930 return -EINVAL;
1931 return scc_set_param(scc, kiss_cmd.command, kiss_cmd.param);
1932
1933 case SIOCSCCCAL:
1934 if (!capable(CAP_SYS_RAWIO)) return -EPERM;
1935 if (!arg || copy_from_user(&cal, arg, sizeof(cal)) || cal.time == 0)
1936 return -EINVAL;
1937
1938 scc_start_calibrate(scc, cal.time, cal.pattern);
1939 return 0;
1940
1941 default:
1942 return -ENOIOCTLCMD;
1943
1944 }
1945
1946 return -EINVAL;
1947 }
1948
1949
1950
1951 static int scc_net_set_mac_address(struct net_device *dev, void *addr)
1952 {
1953 struct sockaddr *sa = (struct sockaddr *) addr;
1954 dev_addr_set(dev, sa->sa_data);
1955 return 0;
1956 }
1957
1958
1959
1960 static struct net_device_stats *scc_net_get_stats(struct net_device *dev)
1961 {
1962 struct scc_channel *scc = (struct scc_channel *) dev->ml_priv;
1963
1964 scc->dev_stat.rx_errors = scc->stat.rxerrs + scc->stat.rx_over;
1965 scc->dev_stat.tx_errors = scc->stat.txerrs + scc->stat.tx_under;
1966 scc->dev_stat.rx_fifo_errors = scc->stat.rx_over;
1967 scc->dev_stat.tx_fifo_errors = scc->stat.tx_under;
1968
1969 return &scc->dev_stat;
1970 }
1971
1972
1973
1974
1975
1976 #ifdef CONFIG_PROC_FS
1977
1978 static inline struct scc_channel *scc_net_seq_idx(loff_t pos)
1979 {
1980 int k;
1981
1982 for (k = 0; k < Nchips*2; ++k) {
1983 if (!SCC_Info[k].init)
1984 continue;
1985 if (pos-- == 0)
1986 return &SCC_Info[k];
1987 }
1988 return NULL;
1989 }
1990
1991 static void *scc_net_seq_start(struct seq_file *seq, loff_t *pos)
1992 {
1993 return *pos ? scc_net_seq_idx(*pos - 1) : SEQ_START_TOKEN;
1994
1995 }
1996
1997 static void *scc_net_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1998 {
1999 unsigned k;
2000 struct scc_channel *scc = v;
2001 ++*pos;
2002
2003 for (k = (v == SEQ_START_TOKEN) ? 0 : (scc - SCC_Info)+1;
2004 k < Nchips*2; ++k) {
2005 if (SCC_Info[k].init)
2006 return &SCC_Info[k];
2007 }
2008 return NULL;
2009 }
2010
2011 static void scc_net_seq_stop(struct seq_file *seq, void *v)
2012 {
2013 }
2014
2015 static int scc_net_seq_show(struct seq_file *seq, void *v)
2016 {
2017 if (v == SEQ_START_TOKEN) {
2018 seq_puts(seq, "z8530drv-"VERSION"\n");
2019 } else if (!Driver_Initialized) {
2020 seq_puts(seq, "not initialized\n");
2021 } else if (!Nchips) {
2022 seq_puts(seq, "chips missing\n");
2023 } else {
2024 const struct scc_channel *scc = v;
2025 const struct scc_stat *stat = &scc->stat;
2026 const struct scc_kiss *kiss = &scc->kiss;
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038 seq_printf(seq, "%s\t%3.3lx %3.3lx %d %lu %2.2x %d %3.3lx %3.3lx %d\n",
2039 scc->dev->name,
2040 scc->data, scc->ctrl, scc->irq, scc->clock, scc->brand,
2041 scc->enhanced, Vector_Latch, scc->special,
2042 scc->option);
2043 seq_printf(seq, "\t%lu %d %d %d %d\n",
2044 scc->modem.speed, scc->modem.nrz,
2045 scc->modem.clocksrc, kiss->softdcd,
2046 stat->bufsize);
2047 seq_printf(seq, "\t%lu %lu %lu %lu\n",
2048 stat->rxints, stat->txints, stat->exints, stat->spints);
2049 seq_printf(seq, "\t%lu %lu %d / %lu %lu %d / %d %d\n",
2050 stat->rxframes, stat->rxerrs, stat->rx_over,
2051 stat->txframes, stat->txerrs, stat->tx_under,
2052 stat->nospace, stat->tx_state);
2053
2054 #define K(x) kiss->x
2055 seq_printf(seq, "\t%d %d %d %d %d %d %d %d %d %d %d %d\n",
2056 K(txdelay), K(persist), K(slottime), K(tailtime),
2057 K(fulldup), K(waittime), K(mintime), K(maxkeyup),
2058 K(idletime), K(maxdefer), K(tx_inhibit), K(group));
2059 #undef K
2060 #ifdef SCC_DEBUG
2061 {
2062 int reg;
2063
2064 seq_printf(seq, "\tW ");
2065 for (reg = 0; reg < 16; reg++)
2066 seq_printf(seq, "%2.2x ", scc->wreg[reg]);
2067 seq_printf(seq, "\n");
2068
2069 seq_printf(seq, "\tR %2.2x %2.2x XX ", InReg(scc->ctrl,R0), InReg(scc->ctrl,R1));
2070 for (reg = 3; reg < 8; reg++)
2071 seq_printf(seq, "%2.2x ", InReg(scc->ctrl, reg));
2072 seq_printf(seq, "XX ");
2073 for (reg = 9; reg < 16; reg++)
2074 seq_printf(seq, "%2.2x ", InReg(scc->ctrl, reg));
2075 seq_printf(seq, "\n");
2076 }
2077 #endif
2078 seq_putc(seq, '\n');
2079 }
2080
2081 return 0;
2082 }
2083
2084 static const struct seq_operations scc_net_seq_ops = {
2085 .start = scc_net_seq_start,
2086 .next = scc_net_seq_next,
2087 .stop = scc_net_seq_stop,
2088 .show = scc_net_seq_show,
2089 };
2090 #endif
2091
2092
2093
2094
2095
2096
2097 static int __init scc_init_driver (void)
2098 {
2099 char devname[IFNAMSIZ];
2100
2101 printk(banner);
2102
2103 sprintf(devname,"%s0", SCC_DriverName);
2104
2105 rtnl_lock();
2106 if (scc_net_alloc(devname, SCC_Info)) {
2107 rtnl_unlock();
2108 printk(KERN_ERR "z8530drv: cannot initialize module\n");
2109 return -EIO;
2110 }
2111 rtnl_unlock();
2112
2113 proc_create_seq("z8530drv", 0, init_net.proc_net, &scc_net_seq_ops);
2114
2115 return 0;
2116 }
2117
2118 static void __exit scc_cleanup_driver(void)
2119 {
2120 io_port ctrl;
2121 int k;
2122 struct scc_channel *scc;
2123 struct net_device *dev;
2124
2125 if (Nchips == 0 && (dev = SCC_Info[0].dev))
2126 {
2127 unregister_netdev(dev);
2128 free_netdev(dev);
2129 }
2130
2131
2132 local_irq_disable();
2133
2134 for (k = 0; k < Nchips; k++)
2135 if ( (ctrl = SCC_ctrl[k].chan_A) )
2136 {
2137 Outb(ctrl, 0);
2138 OutReg(ctrl,R9,FHWRES);
2139 udelay(50);
2140 }
2141
2142
2143 for (k = 0; k < nr_irqs ; k++)
2144 if (Ivec[k].used) free_irq(k, NULL);
2145
2146 local_irq_enable();
2147
2148
2149 for (k = 0; k < Nchips*2; k++)
2150 {
2151 scc = &SCC_Info[k];
2152 if (scc->ctrl)
2153 {
2154 release_region(scc->ctrl, 1);
2155 release_region(scc->data, 1);
2156 }
2157 if (scc->dev)
2158 {
2159 unregister_netdev(scc->dev);
2160 free_netdev(scc->dev);
2161 }
2162 }
2163
2164
2165 if (Vector_Latch)
2166 release_region(Vector_Latch, 1);
2167
2168 remove_proc_entry("z8530drv", init_net.proc_net);
2169 }
2170
2171 MODULE_AUTHOR("Joerg Reuter <jreuter@yaina.de>");
2172 MODULE_DESCRIPTION("AX.25 Device Driver for Z8530 based HDLC cards");
2173 MODULE_LICENSE("GPL");
2174 module_init(scc_init_driver);
2175 module_exit(scc_cleanup_driver);