0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #define HWMTM
0014
0015 #ifndef FDDI
0016 #define FDDI
0017 #endif
0018
0019 #include "h/types.h"
0020 #include "h/fddi.h"
0021 #include "h/smc.h"
0022 #include "h/supern_2.h"
0023 #include "h/skfbiinc.h"
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040 #ifdef COMMON_MB_POOL
0041 static SMbuf *mb_start;
0042 static SMbuf *mb_free;
0043 static int mb_init = FALSE ;
0044 static int call_count;
0045 #endif
0046
0047
0048
0049
0050
0051
0052
0053 #ifdef DEBUG
0054 #ifndef DEBUG_BRD
0055 extern struct smt_debug debug ;
0056 #endif
0057 #endif
0058
0059 #ifdef NDIS_OS2
0060 extern u_char offDepth ;
0061 extern u_char force_irq_pending ;
0062 #endif
0063
0064
0065
0066
0067
0068
0069
0070 static void queue_llc_rx(struct s_smc *smc, SMbuf *mb);
0071 static void smt_to_llc(struct s_smc *smc, SMbuf *mb);
0072 static void init_txd_ring(struct s_smc *smc);
0073 static void init_rxd_ring(struct s_smc *smc);
0074 static void queue_txd_mb(struct s_smc *smc, SMbuf *mb);
0075 static u_long init_descr_ring(struct s_smc *smc, union s_fp_descr volatile *start,
0076 int count);
0077 static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue);
0078 static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue);
0079 static SMbuf* get_llc_rx(struct s_smc *smc);
0080 static SMbuf* get_txd_mb(struct s_smc *smc);
0081 static void mac_drv_clear_txd(struct s_smc *smc);
0082
0083
0084
0085
0086
0087
0088
0089
0090 extern void* mac_drv_get_space(struct s_smc *smc, unsigned int size);
0091 extern void* mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size);
0092 extern void mac_drv_fill_rxd(struct s_smc *smc);
0093 extern void mac_drv_tx_complete(struct s_smc *smc,
0094 volatile struct s_smt_fp_txd *txd);
0095 extern void mac_drv_rx_complete(struct s_smc *smc,
0096 volatile struct s_smt_fp_rxd *rxd,
0097 int frag_count, int len);
0098 extern void mac_drv_requeue_rxd(struct s_smc *smc,
0099 volatile struct s_smt_fp_rxd *rxd,
0100 int frag_count);
0101 extern void mac_drv_clear_rxd(struct s_smc *smc,
0102 volatile struct s_smt_fp_rxd *rxd, int frag_count);
0103
0104 #ifdef USE_OS_CPY
0105 extern void hwm_cpy_rxd2mb(void);
0106 extern void hwm_cpy_txd2mb(void);
0107 #endif
0108
0109 #ifdef ALL_RX_COMPLETE
0110 extern void mac_drv_all_receives_complete(void);
0111 #endif
0112
0113 extern u_long mac_drv_virt2phys(struct s_smc *smc, void *virt);
0114 extern u_long dma_master(struct s_smc *smc, void *virt, int len, int flag);
0115
0116 #ifdef NDIS_OS2
0117 extern void post_proc(void);
0118 #else
0119 extern void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr,
0120 int flag);
0121 #endif
0122
0123 extern int mac_drv_rx_init(struct s_smc *smc, int len, int fc, char *look_ahead,
0124 int la_len);
0125
0126
0127
0128
0129
0130
0131 void process_receive(struct s_smc *smc);
0132 void fddi_isr(struct s_smc *smc);
0133 void smt_free_mbuf(struct s_smc *smc, SMbuf *mb);
0134 void init_driver_fplus(struct s_smc *smc);
0135 void mac_drv_rx_mode(struct s_smc *smc, int mode);
0136 void init_fddi_driver(struct s_smc *smc, u_char *mac_addr);
0137 void mac_drv_clear_tx_queue(struct s_smc *smc);
0138 void mac_drv_clear_rx_queue(struct s_smc *smc);
0139 void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
0140 int frame_status);
0141 void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
0142 int frame_status);
0143
0144 int mac_drv_init(struct s_smc *smc);
0145 int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len,
0146 int frame_status);
0147
0148 u_int mac_drv_check_space(void);
0149
0150 SMbuf* smt_get_mbuf(struct s_smc *smc);
0151
0152 #ifdef DEBUG
0153 void mac_drv_debug_lev(struct s_smc *smc, int flag, int lev);
0154 #endif
0155
0156
0157
0158
0159
0160
0161 #ifndef UNUSED
0162 #ifdef lint
0163 #define UNUSED(x) (x) = (x)
0164 #else
0165 #define UNUSED(x)
0166 #endif
0167 #endif
0168
0169 #ifdef USE_CAN_ADDR
0170 #define MA smc->hw.fddi_canon_addr.a
0171 #define GROUP_ADDR_BIT 0x01
0172 #else
0173 #define MA smc->hw.fddi_home_addr.a
0174 #define GROUP_ADDR_BIT 0x80
0175 #endif
0176
0177 #define RXD_TXD_COUNT (HWM_ASYNC_TXD_COUNT+HWM_SYNC_TXD_COUNT+\
0178 SMT_R1_RXD_COUNT+SMT_R2_RXD_COUNT)
0179
0180 #ifdef MB_OUTSIDE_SMC
0181 #define EXT_VIRT_MEM ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd) +\
0182 MAX_MBUF*sizeof(SMbuf))
0183 #define EXT_VIRT_MEM_2 ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd))
0184 #else
0185 #define EXT_VIRT_MEM ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd))
0186 #endif
0187
0188
0189
0190
0191 #if defined(NDIS_OS2) || defined(ODI2)
0192 #define CR_READ(var) ((var) & 0xffff0000 | ((var) & 0xffff))
0193 #else
0194 #define CR_READ(var) (__le32)(var)
0195 #endif
0196
0197 #define IMASK_SLOW (IS_PLINT1 | IS_PLINT2 | IS_TIMINT | IS_TOKEN | \
0198 IS_MINTR1 | IS_MINTR2 | IS_MINTR3 | IS_R1_P | \
0199 IS_R1_C | IS_XA_C | IS_XS_C)
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221 u_int mac_drv_check_space(void)
0222 {
0223 #ifdef MB_OUTSIDE_SMC
0224 #ifdef COMMON_MB_POOL
0225 call_count++ ;
0226 if (call_count == 1) {
0227 return EXT_VIRT_MEM;
0228 }
0229 else {
0230 return EXT_VIRT_MEM_2;
0231 }
0232 #else
0233 return EXT_VIRT_MEM;
0234 #endif
0235 #else
0236 return 0;
0237 #endif
0238 }
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251 int mac_drv_init(struct s_smc *smc)
0252 {
0253 if (sizeof(struct s_smt_fp_rxd) % 16) {
0254 SMT_PANIC(smc,HWM_E0001,HWM_E0001_MSG) ;
0255 }
0256 if (sizeof(struct s_smt_fp_txd) % 16) {
0257 SMT_PANIC(smc,HWM_E0002,HWM_E0002_MSG) ;
0258 }
0259
0260
0261
0262
0263 if (!(smc->os.hwm.descr_p = (union s_fp_descr volatile *)
0264 mac_drv_get_desc_mem(smc,(u_int)
0265 (RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd)))) {
0266 return 1;
0267 }
0268
0269
0270
0271
0272 #ifndef MB_OUTSIDE_SMC
0273 smc->os.hwm.mbuf_pool.mb_start=(SMbuf *)(&smc->os.hwm.mbuf_pool.mb[0]) ;
0274 #else
0275 #ifndef COMMON_MB_POOL
0276 if (!(smc->os.hwm.mbuf_pool.mb_start = (SMbuf *) mac_drv_get_space(smc,
0277 MAX_MBUF*sizeof(SMbuf)))) {
0278 return 1;
0279 }
0280 #else
0281 if (!mb_start) {
0282 if (!(mb_start = (SMbuf *) mac_drv_get_space(smc,
0283 MAX_MBUF*sizeof(SMbuf)))) {
0284 return 1;
0285 }
0286 }
0287 #endif
0288 #endif
0289 return 0;
0290 }
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301 void init_driver_fplus(struct s_smc *smc)
0302 {
0303 smc->hw.fp.mdr2init = FM_LSB | FM_BMMODE | FM_ENNPRQ | FM_ENHSRQ | 3 ;
0304
0305 #ifdef PCI
0306 smc->hw.fp.mdr2init |= FM_CHKPAR | FM_PARITY ;
0307 #endif
0308 smc->hw.fp.mdr3init = FM_MENRQAUNLCK | FM_MENRS ;
0309
0310 #ifdef USE_CAN_ADDR
0311
0312 smc->hw.fp.frselreg_init = FM_ENXMTADSWAP | FM_ENRCVADSWAP ;
0313 #endif
0314 }
0315
0316 static u_long init_descr_ring(struct s_smc *smc,
0317 union s_fp_descr volatile *start,
0318 int count)
0319 {
0320 int i ;
0321 union s_fp_descr volatile *d1 ;
0322 union s_fp_descr volatile *d2 ;
0323 u_long phys ;
0324
0325 DB_GEN(3, "descr ring starts at = %p", start);
0326 for (i=count-1, d1=start; i ; i--) {
0327 d2 = d1 ;
0328 d1++ ;
0329 d2->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ;
0330 d2->r.rxd_next = &d1->r ;
0331 phys = mac_drv_virt2phys(smc,(void *)d1) ;
0332 d2->r.rxd_nrdadr = cpu_to_le32(phys) ;
0333 }
0334 DB_GEN(3, "descr ring ends at = %p", d1);
0335 d1->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ;
0336 d1->r.rxd_next = &start->r ;
0337 phys = mac_drv_virt2phys(smc,(void *)start) ;
0338 d1->r.rxd_nrdadr = cpu_to_le32(phys) ;
0339
0340 for (i=count, d1=start; i ; i--) {
0341 DRV_BUF_FLUSH(&d1->r,DDI_DMA_SYNC_FORDEV) ;
0342 d1++;
0343 }
0344 return phys;
0345 }
0346
0347 static void init_txd_ring(struct s_smc *smc)
0348 {
0349 struct s_smt_fp_txd volatile *ds ;
0350 struct s_smt_tx_queue *queue ;
0351 u_long phys ;
0352
0353
0354
0355
0356 ds = (struct s_smt_fp_txd volatile *) ((char *)smc->os.hwm.descr_p +
0357 SMT_R1_RXD_COUNT*sizeof(struct s_smt_fp_rxd)) ;
0358 queue = smc->hw.fp.tx[QUEUE_A0] ;
0359 DB_GEN(3, "Init async TxD ring, %d TxDs", HWM_ASYNC_TXD_COUNT);
0360 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
0361 HWM_ASYNC_TXD_COUNT) ;
0362 phys = le32_to_cpu(ds->txd_ntdadr) ;
0363 ds++ ;
0364 queue->tx_curr_put = queue->tx_curr_get = ds ;
0365 ds-- ;
0366 queue->tx_free = HWM_ASYNC_TXD_COUNT ;
0367 queue->tx_used = 0 ;
0368 outpd(ADDR(B5_XA_DA),phys) ;
0369
0370 ds = (struct s_smt_fp_txd volatile *) ((char *)ds +
0371 HWM_ASYNC_TXD_COUNT*sizeof(struct s_smt_fp_txd)) ;
0372 queue = smc->hw.fp.tx[QUEUE_S] ;
0373 DB_GEN(3, "Init sync TxD ring, %d TxDs", HWM_SYNC_TXD_COUNT);
0374 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
0375 HWM_SYNC_TXD_COUNT) ;
0376 phys = le32_to_cpu(ds->txd_ntdadr) ;
0377 ds++ ;
0378 queue->tx_curr_put = queue->tx_curr_get = ds ;
0379 queue->tx_free = HWM_SYNC_TXD_COUNT ;
0380 queue->tx_used = 0 ;
0381 outpd(ADDR(B5_XS_DA),phys) ;
0382 }
0383
0384 static void init_rxd_ring(struct s_smc *smc)
0385 {
0386 struct s_smt_fp_rxd volatile *ds ;
0387 struct s_smt_rx_queue *queue ;
0388 u_long phys ;
0389
0390
0391
0392
0393 ds = (struct s_smt_fp_rxd volatile *) smc->os.hwm.descr_p ;
0394 queue = smc->hw.fp.rx[QUEUE_R1] ;
0395 DB_GEN(3, "Init RxD ring, %d RxDs", SMT_R1_RXD_COUNT);
0396 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
0397 SMT_R1_RXD_COUNT) ;
0398 phys = le32_to_cpu(ds->rxd_nrdadr) ;
0399 ds++ ;
0400 queue->rx_curr_put = queue->rx_curr_get = ds ;
0401 queue->rx_free = SMT_R1_RXD_COUNT ;
0402 queue->rx_used = 0 ;
0403 outpd(ADDR(B4_R1_DA),phys) ;
0404 }
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414 void init_fddi_driver(struct s_smc *smc, u_char *mac_addr)
0415 {
0416 SMbuf *mb ;
0417 int i ;
0418
0419 init_board(smc,mac_addr) ;
0420 (void)init_fplus(smc) ;
0421
0422
0423
0424
0425 #ifndef COMMON_MB_POOL
0426 mb = smc->os.hwm.mbuf_pool.mb_start ;
0427 smc->os.hwm.mbuf_pool.mb_free = (SMbuf *)NULL ;
0428 for (i = 0; i < MAX_MBUF; i++) {
0429 mb->sm_use_count = 1 ;
0430 smt_free_mbuf(smc,mb) ;
0431 mb++ ;
0432 }
0433 #else
0434 mb = mb_start ;
0435 if (!mb_init) {
0436 mb_free = 0 ;
0437 for (i = 0; i < MAX_MBUF; i++) {
0438 mb->sm_use_count = 1 ;
0439 smt_free_mbuf(smc,mb) ;
0440 mb++ ;
0441 }
0442 mb_init = TRUE ;
0443 }
0444 #endif
0445
0446
0447
0448
0449 smc->os.hwm.llc_rx_pipe = smc->os.hwm.llc_rx_tail = (SMbuf *)NULL ;
0450 smc->os.hwm.txd_tx_pipe = smc->os.hwm.txd_tx_tail = NULL ;
0451 smc->os.hwm.pass_SMT = smc->os.hwm.pass_NSA = smc->os.hwm.pass_DB = 0 ;
0452 smc->os.hwm.pass_llc_promisc = TRUE ;
0453 smc->os.hwm.queued_rx_frames = smc->os.hwm.queued_txd_mb = 0 ;
0454 smc->os.hwm.detec_count = 0 ;
0455 smc->os.hwm.rx_break = 0 ;
0456 smc->os.hwm.rx_len_error = 0 ;
0457 smc->os.hwm.isr_flag = FALSE ;
0458
0459
0460
0461
0462 i = 16 - ((long)smc->os.hwm.descr_p & 0xf) ;
0463 if (i != 16) {
0464 DB_GEN(3, "i = %d", i);
0465 smc->os.hwm.descr_p = (union s_fp_descr volatile *)
0466 ((char *)smc->os.hwm.descr_p+i) ;
0467 }
0468 DB_GEN(3, "pt to descr area = %p", smc->os.hwm.descr_p);
0469
0470 init_txd_ring(smc) ;
0471 init_rxd_ring(smc) ;
0472 mac_drv_fill_rxd(smc) ;
0473
0474 init_plc(smc) ;
0475 }
0476
0477
0478 SMbuf *smt_get_mbuf(struct s_smc *smc)
0479 {
0480 register SMbuf *mb ;
0481
0482 #ifndef COMMON_MB_POOL
0483 mb = smc->os.hwm.mbuf_pool.mb_free ;
0484 #else
0485 mb = mb_free ;
0486 #endif
0487 if (mb) {
0488 #ifndef COMMON_MB_POOL
0489 smc->os.hwm.mbuf_pool.mb_free = mb->sm_next ;
0490 #else
0491 mb_free = mb->sm_next ;
0492 #endif
0493 mb->sm_off = 8 ;
0494 mb->sm_use_count = 1 ;
0495 }
0496 DB_GEN(3, "get SMbuf: mb = %p", mb);
0497 return mb;
0498 }
0499
0500 void smt_free_mbuf(struct s_smc *smc, SMbuf *mb)
0501 {
0502
0503 if (mb) {
0504 mb->sm_use_count-- ;
0505 DB_GEN(3, "free_mbuf: sm_use_count = %d", mb->sm_use_count);
0506
0507
0508
0509
0510
0511 if (!mb->sm_use_count) {
0512 DB_GEN(3, "free SMbuf: mb = %p", mb);
0513 #ifndef COMMON_MB_POOL
0514 mb->sm_next = smc->os.hwm.mbuf_pool.mb_free ;
0515 smc->os.hwm.mbuf_pool.mb_free = mb ;
0516 #else
0517 mb->sm_next = mb_free ;
0518 mb_free = mb ;
0519 #endif
0520 }
0521 }
0522 else
0523 SMT_PANIC(smc,HWM_E0003,HWM_E0003_MSG) ;
0524 }
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546 void mac_drv_repair_descr(struct s_smc *smc)
0547 {
0548 u_long phys ;
0549
0550 if (smc->hw.hw_state != STOPPED) {
0551 SK_BREAK() ;
0552 SMT_PANIC(smc,HWM_E0013,HWM_E0013_MSG) ;
0553 return ;
0554 }
0555
0556
0557
0558
0559 phys = repair_txd_ring(smc,smc->hw.fp.tx[QUEUE_A0]) ;
0560 outpd(ADDR(B5_XA_DA),phys) ;
0561 if (smc->hw.fp.tx_q[QUEUE_A0].tx_used) {
0562 outpd(ADDR(B0_XA_CSR),CSR_START) ;
0563 }
0564 phys = repair_txd_ring(smc,smc->hw.fp.tx[QUEUE_S]) ;
0565 outpd(ADDR(B5_XS_DA),phys) ;
0566 if (smc->hw.fp.tx_q[QUEUE_S].tx_used) {
0567 outpd(ADDR(B0_XS_CSR),CSR_START) ;
0568 }
0569
0570
0571
0572
0573 phys = repair_rxd_ring(smc,smc->hw.fp.rx[QUEUE_R1]) ;
0574 outpd(ADDR(B4_R1_DA),phys) ;
0575 outpd(ADDR(B0_R1_CSR),CSR_START) ;
0576 }
0577
0578 static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue)
0579 {
0580 int i ;
0581 int tx_used ;
0582 u_long phys ;
0583 u_long tbctrl ;
0584 struct s_smt_fp_txd volatile *t ;
0585
0586 SK_UNUSED(smc) ;
0587
0588 t = queue->tx_curr_get ;
0589 tx_used = queue->tx_used ;
0590 for (i = tx_used+queue->tx_free-1 ; i ; i-- ) {
0591 t = t->txd_next ;
0592 }
0593 phys = le32_to_cpu(t->txd_ntdadr) ;
0594
0595 t = queue->tx_curr_get ;
0596 while (tx_used) {
0597 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ;
0598 tbctrl = le32_to_cpu(t->txd_tbctrl) ;
0599
0600 if (tbctrl & BMU_OWN) {
0601 if (tbctrl & BMU_STF) {
0602 break ;
0603 }
0604 else {
0605
0606
0607
0608 t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ;
0609 }
0610 }
0611 phys = le32_to_cpu(t->txd_ntdadr) ;
0612 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
0613 t = t->txd_next ;
0614 tx_used-- ;
0615 }
0616 return phys;
0617 }
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630 static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue)
0631 {
0632 int i ;
0633 int rx_used ;
0634 u_long phys ;
0635 u_long rbctrl ;
0636 struct s_smt_fp_rxd volatile *r ;
0637
0638 SK_UNUSED(smc) ;
0639
0640 r = queue->rx_curr_get ;
0641 rx_used = queue->rx_used ;
0642 for (i = SMT_R1_RXD_COUNT-1 ; i ; i-- ) {
0643 r = r->rxd_next ;
0644 }
0645 phys = le32_to_cpu(r->rxd_nrdadr) ;
0646
0647 r = queue->rx_curr_get ;
0648 while (rx_used) {
0649 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
0650 rbctrl = le32_to_cpu(r->rxd_rbctrl) ;
0651
0652 if (rbctrl & BMU_OWN) {
0653 if (rbctrl & BMU_STF) {
0654 break ;
0655 }
0656 else {
0657
0658
0659
0660 r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
0661 }
0662 }
0663 phys = le32_to_cpu(r->rxd_nrdadr) ;
0664 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
0665 r = r->rxd_next ;
0666 rx_used-- ;
0667 }
0668 return phys;
0669 }
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697
0698
0699
0700
0701 void fddi_isr(struct s_smc *smc)
0702 {
0703 u_long is ;
0704 u_short stu, stl ;
0705 SMbuf *mb ;
0706
0707 #ifdef USE_BREAK_ISR
0708 int force_irq ;
0709 #endif
0710
0711 #ifdef ODI2
0712 if (smc->os.hwm.rx_break) {
0713 mac_drv_fill_rxd(smc) ;
0714 if (smc->hw.fp.rx_q[QUEUE_R1].rx_used > 0) {
0715 smc->os.hwm.rx_break = 0 ;
0716 process_receive(smc) ;
0717 }
0718 else {
0719 smc->os.hwm.detec_count = 0 ;
0720 smt_force_irq(smc) ;
0721 }
0722 }
0723 #endif
0724 smc->os.hwm.isr_flag = TRUE ;
0725
0726 #ifdef USE_BREAK_ISR
0727 force_irq = TRUE ;
0728 if (smc->os.hwm.leave_isr) {
0729 smc->os.hwm.leave_isr = FALSE ;
0730 process_receive(smc) ;
0731 }
0732 #endif
0733
0734 while ((is = GET_ISR() & ISR_MASK)) {
0735 NDD_TRACE("CH0B",is,0,0) ;
0736 DB_GEN(7, "ISA = 0x%lx", is);
0737
0738 if (is & IMASK_SLOW) {
0739 NDD_TRACE("CH1b",is,0,0) ;
0740 if (is & IS_PLINT1) {
0741 plc1_irq(smc) ;
0742 }
0743 if (is & IS_PLINT2) {
0744 plc2_irq(smc) ;
0745 }
0746 if (is & IS_MINTR1) {
0747 stu = inpw(FM_A(FM_ST1U)) ;
0748 stl = inpw(FM_A(FM_ST1L)) ;
0749 DB_GEN(6, "Slow transmit complete");
0750 mac1_irq(smc,stu,stl) ;
0751 }
0752 if (is & IS_MINTR2) {
0753 stu= inpw(FM_A(FM_ST2U)) ;
0754 stl= inpw(FM_A(FM_ST2L)) ;
0755 DB_GEN(6, "Slow receive complete");
0756 DB_GEN(7, "stl = %x : stu = %x", stl, stu);
0757 mac2_irq(smc,stu,stl) ;
0758 }
0759 if (is & IS_MINTR3) {
0760 stu= inpw(FM_A(FM_ST3U)) ;
0761 stl= inpw(FM_A(FM_ST3L)) ;
0762 DB_GEN(6, "FORMAC Mode Register 3");
0763 mac3_irq(smc,stu,stl) ;
0764 }
0765 if (is & IS_TIMINT) {
0766 timer_irq(smc) ;
0767 #ifdef NDIS_OS2
0768 force_irq_pending = 0 ;
0769 #endif
0770
0771
0772
0773 if (++smc->os.hwm.detec_count > 4) {
0774
0775
0776
0777 process_receive(smc) ;
0778 }
0779 }
0780 if (is & IS_TOKEN) {
0781 rtm_irq(smc) ;
0782 }
0783 if (is & IS_R1_P) {
0784
0785 outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_P) ;
0786 SMT_PANIC(smc,HWM_E0004,HWM_E0004_MSG) ;
0787 }
0788 if (is & IS_R1_C) {
0789
0790 outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_C) ;
0791 SMT_PANIC(smc,HWM_E0005,HWM_E0005_MSG) ;
0792 }
0793 if (is & IS_XA_C) {
0794
0795 outpd(ADDR(B5_XA_CSR),CSR_IRQ_CL_C) ;
0796 SMT_PANIC(smc,HWM_E0006,HWM_E0006_MSG) ;
0797 }
0798 if (is & IS_XS_C) {
0799
0800 outpd(ADDR(B5_XS_CSR),CSR_IRQ_CL_C) ;
0801 SMT_PANIC(smc,HWM_E0007,HWM_E0007_MSG) ;
0802 }
0803 }
0804
0805
0806
0807
0808 if (is & (IS_XS_F|IS_XA_F)) {
0809 DB_GEN(6, "Fast tx complete queue");
0810
0811
0812
0813
0814 outpd(ADDR(B5_XS_CSR),CSR_IRQ_CL_F) ;
0815 outpd(ADDR(B5_XA_CSR),CSR_IRQ_CL_F) ;
0816 mac_drv_clear_txd(smc) ;
0817 llc_restart_tx(smc) ;
0818 }
0819
0820
0821
0822
0823 if (is & IS_R1_F) {
0824 DB_GEN(6, "Fast receive complete");
0825
0826 #ifndef USE_BREAK_ISR
0827 outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_F) ;
0828 process_receive(smc) ;
0829 #else
0830 process_receive(smc) ;
0831 if (smc->os.hwm.leave_isr) {
0832 force_irq = FALSE ;
0833 } else {
0834 outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_F) ;
0835 process_receive(smc) ;
0836 }
0837 #endif
0838 }
0839
0840 #ifndef NDIS_OS2
0841 while ((mb = get_llc_rx(smc))) {
0842 smt_to_llc(smc,mb) ;
0843 }
0844 #else
0845 if (offDepth)
0846 post_proc() ;
0847
0848 while (!offDepth && (mb = get_llc_rx(smc))) {
0849 smt_to_llc(smc,mb) ;
0850 }
0851
0852 if (!offDepth && smc->os.hwm.rx_break) {
0853 process_receive(smc) ;
0854 }
0855 #endif
0856 if (smc->q.ev_get != smc->q.ev_put) {
0857 NDD_TRACE("CH2a",0,0,0) ;
0858 ev_dispatcher(smc) ;
0859 }
0860 #ifdef NDIS_OS2
0861 post_proc() ;
0862 if (offDepth) {
0863 break ;
0864 }
0865 #endif
0866 #ifdef USE_BREAK_ISR
0867 if (smc->os.hwm.leave_isr) {
0868 break ;
0869 }
0870 #endif
0871
0872
0873 }
0874
0875 #ifdef USE_BREAK_ISR
0876 if (smc->os.hwm.leave_isr && force_irq) {
0877 smt_force_irq(smc) ;
0878 }
0879 #endif
0880 smc->os.hwm.isr_flag = FALSE ;
0881 NDD_TRACE("CH0E",0,0,0) ;
0882 }
0883
0884
0885
0886
0887
0888
0889
0890
0891 #ifndef NDIS_OS2
0892
0893
0894
0895
0896
0897
0898
0899
0900
0901
0902
0903
0904
0905
0906
0907
0908
0909
0910
0911
0912
0913
0914
0915
0916
0917
0918
0919
0920
0921
0922
0923
0924
0925
0926
0927
0928
0929
0930
0931
0932
0933
0934
0935
0936
0937
0938
0939
0940
0941
0942
0943
0944
0945
0946
0947
0948
0949
0950
0951
0952
0953
0954
0955
0956
0957
0958
0959
0960
0961
0962
0963
0964
0965
0966
0967
0968
0969
0970
0971
0972
0973
0974
0975
0976
0977
0978
0979
0980
0981
0982
0983
0984 void mac_drv_rx_mode(struct s_smc *smc, int mode)
0985 {
0986 switch(mode) {
0987 case RX_ENABLE_PASS_SMT:
0988 smc->os.hwm.pass_SMT = TRUE ;
0989 break ;
0990 case RX_DISABLE_PASS_SMT:
0991 smc->os.hwm.pass_SMT = FALSE ;
0992 break ;
0993 case RX_ENABLE_PASS_NSA:
0994 smc->os.hwm.pass_NSA = TRUE ;
0995 break ;
0996 case RX_DISABLE_PASS_NSA:
0997 smc->os.hwm.pass_NSA = FALSE ;
0998 break ;
0999 case RX_ENABLE_PASS_DB:
1000 smc->os.hwm.pass_DB = TRUE ;
1001 break ;
1002 case RX_DISABLE_PASS_DB:
1003 smc->os.hwm.pass_DB = FALSE ;
1004 break ;
1005 case RX_DISABLE_PASS_ALL:
1006 smc->os.hwm.pass_SMT = smc->os.hwm.pass_NSA = FALSE ;
1007 smc->os.hwm.pass_DB = FALSE ;
1008 smc->os.hwm.pass_llc_promisc = TRUE ;
1009 mac_set_rx_mode(smc,RX_DISABLE_NSA) ;
1010 break ;
1011 case RX_DISABLE_LLC_PROMISC:
1012 smc->os.hwm.pass_llc_promisc = FALSE ;
1013 break ;
1014 case RX_ENABLE_LLC_PROMISC:
1015 smc->os.hwm.pass_llc_promisc = TRUE ;
1016 break ;
1017 case RX_ENABLE_ALLMULTI:
1018 case RX_DISABLE_ALLMULTI:
1019 case RX_ENABLE_PROMISC:
1020 case RX_DISABLE_PROMISC:
1021 case RX_ENABLE_NSA:
1022 case RX_DISABLE_NSA:
1023 default:
1024 mac_set_rx_mode(smc,mode) ;
1025 break ;
1026 }
1027 }
1028 #endif
1029
1030
1031
1032
1033 void process_receive(struct s_smc *smc)
1034 {
1035 int i ;
1036 int n ;
1037 int frag_count ;
1038 int used_frags ;
1039 struct s_smt_rx_queue *queue ;
1040 struct s_smt_fp_rxd volatile *r ;
1041 struct s_smt_fp_rxd volatile *rxd ;
1042 u_long rbctrl ;
1043 u_long rfsw ;
1044 u_short rx_used ;
1045 u_char far *virt ;
1046 char far *data ;
1047 SMbuf *mb ;
1048 u_char fc ;
1049 int len ;
1050
1051 smc->os.hwm.detec_count = 0 ;
1052 queue = smc->hw.fp.rx[QUEUE_R1] ;
1053 NDD_TRACE("RHxB",0,0,0) ;
1054 for ( ; ; ) {
1055 r = queue->rx_curr_get ;
1056 rx_used = queue->rx_used ;
1057 frag_count = 0 ;
1058
1059 #ifdef USE_BREAK_ISR
1060 if (smc->os.hwm.leave_isr) {
1061 goto rx_end ;
1062 }
1063 #endif
1064 #ifdef NDIS_OS2
1065 if (offDepth) {
1066 smc->os.hwm.rx_break = 1 ;
1067 goto rx_end ;
1068 }
1069 smc->os.hwm.rx_break = 0 ;
1070 #endif
1071 #ifdef ODI2
1072 if (smc->os.hwm.rx_break) {
1073 goto rx_end ;
1074 }
1075 #endif
1076 n = 0 ;
1077 do {
1078 DB_RX(5, "Check RxD %p for OWN and EOF", r);
1079 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1080 rbctrl = le32_to_cpu(CR_READ(r->rxd_rbctrl));
1081
1082 if (rbctrl & BMU_OWN) {
1083 NDD_TRACE("RHxE",r,rfsw,rbctrl) ;
1084 DB_RX(4, "End of RxDs");
1085 goto rx_end ;
1086 }
1087
1088
1089
1090 if (!rx_used) {
1091 SK_BREAK() ;
1092 SMT_PANIC(smc,HWM_E0009,HWM_E0009_MSG) ;
1093
1094
1095
1096 smc->hw.hw_state = STOPPED ;
1097 mac_drv_clear_rx_queue(smc) ;
1098 smc->hw.hw_state = STARTED ;
1099 mac_drv_fill_rxd(smc) ;
1100 smc->os.hwm.detec_count = 0 ;
1101 goto rx_end ;
1102 }
1103 rfsw = le32_to_cpu(r->rxd_rfsw) ;
1104 if ((rbctrl & BMU_STF) != ((rbctrl & BMU_ST_BUF) <<5)) {
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119 SK_BREAK() ;
1120 rfsw = 0 ;
1121 if (frag_count) {
1122 break ;
1123 }
1124 }
1125 n += rbctrl & 0xffff ;
1126 r = r->rxd_next ;
1127 frag_count++ ;
1128 rx_used-- ;
1129 } while (!(rbctrl & BMU_EOF)) ;
1130 used_frags = frag_count ;
1131 DB_RX(5, "EOF set in RxD, used_frags = %d", used_frags);
1132
1133
1134
1135 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1136 while (rx_used && !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) {
1137 DB_RX(5, "Check STF bit in %p", r);
1138 r = r->rxd_next ;
1139 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1140 frag_count++ ;
1141 rx_used-- ;
1142 }
1143 DB_RX(5, "STF bit found");
1144
1145
1146
1147
1148 rxd = queue->rx_curr_get ;
1149 queue->rx_curr_get = r ;
1150 queue->rx_free += frag_count ;
1151 queue->rx_used = rx_used ;
1152
1153
1154
1155
1156 rxd->rxd_rbctrl &= cpu_to_le32(~BMU_STF) ;
1157
1158 for (r=rxd, i=frag_count ; i ; r=r->rxd_next, i--){
1159 DB_RX(5, "dma_complete for RxD %p", r);
1160 dma_complete(smc,(union s_fp_descr volatile *)r,DMA_WR);
1161 }
1162 smc->hw.fp.err_stats.err_valid++ ;
1163 smc->mib.m[MAC0].fddiMACCopied_Ct++ ;
1164
1165
1166 len = (rfsw & RD_LENGTH) - 4 ;
1167
1168 DB_RX(4, "frame length = %d", len);
1169
1170
1171
1172 if (rfsw & (RX_MSRABT|RX_FS_E|RX_FS_CRC|RX_FS_IMPL)){
1173 if (rfsw & RD_S_MSRABT) {
1174 DB_RX(2, "Frame aborted by the FORMAC");
1175 smc->hw.fp.err_stats.err_abort++ ;
1176 }
1177
1178
1179
1180 if (rfsw & RD_S_SEAC2) {
1181 DB_RX(2, "E-Indicator set");
1182 smc->hw.fp.err_stats.err_e_indicator++ ;
1183 }
1184 if (rfsw & RD_S_SFRMERR) {
1185 DB_RX(2, "CRC error");
1186 smc->hw.fp.err_stats.err_crc++ ;
1187 }
1188 if (rfsw & RX_FS_IMPL) {
1189 DB_RX(2, "Implementer frame");
1190 smc->hw.fp.err_stats.err_imp_frame++ ;
1191 }
1192 goto abort_frame ;
1193 }
1194 if (len > FDDI_RAW_MTU-4) {
1195 DB_RX(2, "Frame too long error");
1196 smc->hw.fp.err_stats.err_too_long++ ;
1197 goto abort_frame ;
1198 }
1199
1200
1201
1202
1203 if (len <= 4) {
1204 DB_RX(2, "Frame length = 0");
1205 goto abort_frame ;
1206 }
1207
1208 if (len != (n-4)) {
1209 DB_RX(4, "BMU: rx len differs: [%d:%d]", len, n);
1210 smc->os.hwm.rx_len_error++ ;
1211 goto abort_frame ;
1212 }
1213
1214
1215
1216
1217 virt = (u_char far *) rxd->rxd_virt ;
1218 DB_RX(2, "FC = %x", *virt);
1219 if (virt[12] == MA[5] &&
1220 virt[11] == MA[4] &&
1221 virt[10] == MA[3] &&
1222 virt[9] == MA[2] &&
1223 virt[8] == MA[1] &&
1224 (virt[7] & ~GROUP_ADDR_BIT) == MA[0]) {
1225 goto abort_frame ;
1226 }
1227
1228
1229
1230
1231 if (rfsw & RX_FS_LLC) {
1232
1233
1234
1235
1236
1237 if (!smc->os.hwm.pass_llc_promisc) {
1238 if(!(virt[1] & GROUP_ADDR_BIT)) {
1239 if (virt[6] != MA[5] ||
1240 virt[5] != MA[4] ||
1241 virt[4] != MA[3] ||
1242 virt[3] != MA[2] ||
1243 virt[2] != MA[1] ||
1244 virt[1] != MA[0]) {
1245 DB_RX(2, "DA != MA and not multi- or broadcast");
1246 goto abort_frame ;
1247 }
1248 }
1249 }
1250
1251
1252
1253
1254 DB_RX(4, "LLC - receive");
1255 mac_drv_rx_complete(smc,rxd,frag_count,len) ;
1256 }
1257 else {
1258 if (!(mb = smt_get_mbuf(smc))) {
1259 smc->hw.fp.err_stats.err_no_buf++ ;
1260 DB_RX(4, "No SMbuf; receive terminated");
1261 goto abort_frame ;
1262 }
1263 data = smtod(mb,char *) - 1 ;
1264
1265
1266
1267
1268 #ifdef USE_OS_CPY
1269 hwm_cpy_rxd2mb(rxd,data,len) ;
1270 #else
1271 for (r=rxd, i=used_frags ; i ; r=r->rxd_next, i--){
1272 n = le32_to_cpu(r->rxd_rbctrl) & RD_LENGTH ;
1273 DB_RX(6, "cp SMT frame to mb: len = %d", n);
1274 memcpy(data,r->rxd_virt,n) ;
1275 data += n ;
1276 }
1277 data = smtod(mb,char *) - 1 ;
1278 #endif
1279 fc = *(char *)mb->sm_data = *data ;
1280 mb->sm_len = len - 1 ;
1281 data++ ;
1282
1283
1284
1285
1286 switch(fc) {
1287 case FC_SMT_INFO :
1288 smc->hw.fp.err_stats.err_smt_frame++ ;
1289 DB_RX(5, "SMT frame received");
1290
1291 if (smc->os.hwm.pass_SMT) {
1292 DB_RX(5, "pass SMT frame");
1293 mac_drv_rx_complete(smc, rxd,
1294 frag_count,len) ;
1295 }
1296 else {
1297 DB_RX(5, "requeue RxD");
1298 mac_drv_requeue_rxd(smc,rxd,frag_count);
1299 }
1300
1301 smt_received_pack(smc,mb,(int)(rfsw>>25)) ;
1302 break ;
1303 case FC_SMT_NSA :
1304 smc->hw.fp.err_stats.err_smt_frame++ ;
1305 DB_RX(5, "SMT frame received");
1306
1307
1308
1309
1310 if (smc->os.hwm.pass_NSA ||
1311 (smc->os.hwm.pass_SMT &&
1312 !(rfsw & A_INDIC))) {
1313 DB_RX(5, "pass SMT frame");
1314 mac_drv_rx_complete(smc, rxd,
1315 frag_count,len) ;
1316 }
1317 else {
1318 DB_RX(5, "requeue RxD");
1319 mac_drv_requeue_rxd(smc,rxd,frag_count);
1320 }
1321
1322 smt_received_pack(smc,mb,(int)(rfsw>>25)) ;
1323 break ;
1324 case FC_BEACON :
1325 if (smc->os.hwm.pass_DB) {
1326 DB_RX(5, "pass DB frame");
1327 mac_drv_rx_complete(smc, rxd,
1328 frag_count,len) ;
1329 }
1330 else {
1331 DB_RX(5, "requeue RxD");
1332 mac_drv_requeue_rxd(smc,rxd,frag_count);
1333 }
1334 smt_free_mbuf(smc,mb) ;
1335 break ;
1336 default :
1337
1338
1339
1340 DB_RX(2, "unknown FC error");
1341 smt_free_mbuf(smc,mb) ;
1342 DB_RX(5, "requeue RxD");
1343 mac_drv_requeue_rxd(smc,rxd,frag_count) ;
1344 if ((fc & 0xf0) == FC_MAC)
1345 smc->hw.fp.err_stats.err_mac_frame++ ;
1346 else
1347 smc->hw.fp.err_stats.err_imp_frame++ ;
1348
1349 break ;
1350 }
1351 }
1352
1353 DB_RX(3, "next RxD is %p", queue->rx_curr_get);
1354 NDD_TRACE("RHx1",queue->rx_curr_get,0,0) ;
1355
1356 continue ;
1357
1358 abort_frame:
1359 DB_RX(5, "requeue RxD");
1360 mac_drv_requeue_rxd(smc,rxd,frag_count) ;
1361
1362 DB_RX(3, "next RxD is %p", queue->rx_curr_get);
1363 NDD_TRACE("RHx2",queue->rx_curr_get,0,0) ;
1364 }
1365 rx_end:
1366 #ifdef ALL_RX_COMPLETE
1367 mac_drv_all_receives_complete(smc) ;
1368 #endif
1369 return ;
1370 }
1371
1372 static void smt_to_llc(struct s_smc *smc, SMbuf *mb)
1373 {
1374 u_char fc ;
1375
1376 DB_RX(4, "send a queued frame to the llc layer");
1377 smc->os.hwm.r.len = mb->sm_len ;
1378 smc->os.hwm.r.mb_pos = smtod(mb,char *) ;
1379 fc = *smc->os.hwm.r.mb_pos ;
1380 (void)mac_drv_rx_init(smc,(int)mb->sm_len,(int)fc,
1381 smc->os.hwm.r.mb_pos,(int)mb->sm_len) ;
1382 smt_free_mbuf(smc,mb) ;
1383 }
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407 void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
1408 int frame_status)
1409 {
1410 struct s_smt_fp_rxd volatile *r ;
1411 __le32 rbctrl;
1412
1413 NDD_TRACE("RHfB",virt,len,frame_status) ;
1414 DB_RX(2, "hwm_rx_frag: len = %d, frame_status = %x", len, frame_status);
1415 r = smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put ;
1416 r->rxd_virt = virt ;
1417 r->rxd_rbadr = cpu_to_le32(phys) ;
1418 rbctrl = cpu_to_le32( (((__u32)frame_status &
1419 (FIRST_FRAG|LAST_FRAG))<<26) |
1420 (((u_long) frame_status & FIRST_FRAG) << 21) |
1421 BMU_OWN | BMU_CHECK | BMU_EN_IRQ_EOF | len) ;
1422 r->rxd_rbctrl = rbctrl ;
1423
1424 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
1425 outpd(ADDR(B0_R1_CSR),CSR_START) ;
1426 smc->hw.fp.rx_q[QUEUE_R1].rx_free-- ;
1427 smc->hw.fp.rx_q[QUEUE_R1].rx_used++ ;
1428 smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put = r->rxd_next ;
1429 NDD_TRACE("RHfE",r,le32_to_cpu(r->rxd_rbadr),0) ;
1430 }
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455 void mac_drv_clear_rx_queue(struct s_smc *smc)
1456 {
1457 struct s_smt_fp_rxd volatile *r ;
1458 struct s_smt_fp_rxd volatile *next_rxd ;
1459 struct s_smt_rx_queue *queue ;
1460 int frag_count ;
1461 int i ;
1462
1463 if (smc->hw.hw_state != STOPPED) {
1464 SK_BREAK() ;
1465 SMT_PANIC(smc,HWM_E0012,HWM_E0012_MSG) ;
1466 return ;
1467 }
1468
1469 queue = smc->hw.fp.rx[QUEUE_R1] ;
1470 DB_RX(5, "clear_rx_queue");
1471
1472
1473
1474
1475 r = queue->rx_curr_get ;
1476 while (queue->rx_used) {
1477 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1478 DB_RX(5, "switch OWN bit of RxD 0x%p", r);
1479 r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
1480 frag_count = 1 ;
1481 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
1482 r = r->rxd_next ;
1483 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1484 while (r != queue->rx_curr_put &&
1485 !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) {
1486 DB_RX(5, "Check STF bit in %p", r);
1487 r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
1488 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
1489 r = r->rxd_next ;
1490 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1491 frag_count++ ;
1492 }
1493 DB_RX(5, "STF bit found");
1494 next_rxd = r ;
1495
1496 for (r=queue->rx_curr_get,i=frag_count; i ; r=r->rxd_next,i--){
1497 DB_RX(5, "dma_complete for RxD %p", r);
1498 dma_complete(smc,(union s_fp_descr volatile *)r,DMA_WR);
1499 }
1500
1501 DB_RX(5, "mac_drv_clear_rxd: RxD %p frag_count %d",
1502 queue->rx_curr_get, frag_count);
1503 mac_drv_clear_rxd(smc,queue->rx_curr_get,frag_count) ;
1504
1505 queue->rx_curr_get = next_rxd ;
1506 queue->rx_used -= frag_count ;
1507 queue->rx_free += frag_count ;
1508 }
1509 }
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542 int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len,
1543 int frame_status)
1544 {
1545 NDD_TRACE("THiB",fc,frag_count,frame_len) ;
1546 smc->os.hwm.tx_p = smc->hw.fp.tx[frame_status & QUEUE_A0] ;
1547 smc->os.hwm.tx_descr = TX_DESCRIPTOR | (((u_long)(frame_len-1)&3)<<27) ;
1548 smc->os.hwm.tx_len = frame_len ;
1549 DB_TX(3, "hwm_tx_init: fc = %x, len = %d", fc, frame_len);
1550 if ((fc & ~(FC_SYNC_BIT|FC_LLC_PRIOR)) == FC_ASYNC_LLC) {
1551 frame_status |= LAN_TX ;
1552 }
1553 else {
1554 switch (fc) {
1555 case FC_SMT_INFO :
1556 case FC_SMT_NSA :
1557 frame_status |= LAN_TX ;
1558 break ;
1559 case FC_SMT_LOC :
1560 frame_status |= LOC_TX ;
1561 break ;
1562 case FC_SMT_LAN_LOC :
1563 frame_status |= LAN_TX | LOC_TX ;
1564 break ;
1565 default :
1566 SMT_PANIC(smc,HWM_E0010,HWM_E0010_MSG) ;
1567 }
1568 }
1569 if (!smc->hw.mac_ring_is_up) {
1570 frame_status &= ~LAN_TX ;
1571 frame_status |= RING_DOWN ;
1572 DB_TX(2, "Ring is down: terminate LAN_TX");
1573 }
1574 if (frag_count > smc->os.hwm.tx_p->tx_free) {
1575 #ifndef NDIS_OS2
1576 mac_drv_clear_txd(smc) ;
1577 if (frag_count > smc->os.hwm.tx_p->tx_free) {
1578 DB_TX(2, "Out of TxDs, terminate LAN_TX");
1579 frame_status &= ~LAN_TX ;
1580 frame_status |= OUT_OF_TXD ;
1581 }
1582 #else
1583 DB_TX(2, "Out of TxDs, terminate LAN_TX");
1584 frame_status &= ~LAN_TX ;
1585 frame_status |= OUT_OF_TXD ;
1586 #endif
1587 }
1588 DB_TX(3, "frame_status = %x", frame_status);
1589 NDD_TRACE("THiE",frame_status,smc->os.hwm.tx_p->tx_free,0) ;
1590 return frame_status;
1591 }
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620 void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
1621 int frame_status)
1622 {
1623 struct s_smt_fp_txd volatile *t ;
1624 struct s_smt_tx_queue *queue ;
1625 __le32 tbctrl ;
1626
1627 queue = smc->os.hwm.tx_p ;
1628
1629 NDD_TRACE("THfB",virt,len,frame_status) ;
1630
1631
1632
1633
1634
1635 t = queue->tx_curr_put ;
1636
1637 DB_TX(2, "hwm_tx_frag: len = %d, frame_status = %x", len, frame_status);
1638 if (frame_status & LAN_TX) {
1639
1640 DB_TX(3, "LAN_TX: TxD = %p, virt = %p", t, virt);
1641 t->txd_virt = virt ;
1642 t->txd_txdscr = cpu_to_le32(smc->os.hwm.tx_descr) ;
1643 t->txd_tbadr = cpu_to_le32(phys) ;
1644 tbctrl = cpu_to_le32((((__u32)frame_status &
1645 (FIRST_FRAG|LAST_FRAG|EN_IRQ_EOF))<< 26) |
1646 BMU_OWN|BMU_CHECK |len) ;
1647 t->txd_tbctrl = tbctrl ;
1648
1649 #ifndef AIX
1650 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
1651 outpd(queue->tx_bmu_ctl,CSR_START) ;
1652 #else
1653 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
1654 if (frame_status & QUEUE_A0) {
1655 outpd(ADDR(B0_XA_CSR),CSR_START) ;
1656 }
1657 else {
1658 outpd(ADDR(B0_XS_CSR),CSR_START) ;
1659 }
1660 #endif
1661 queue->tx_free-- ;
1662 queue->tx_used++ ;
1663 queue->tx_curr_put = t->txd_next ;
1664 if (frame_status & LAST_FRAG) {
1665 smc->mib.m[MAC0].fddiMACTransmit_Ct++ ;
1666 }
1667 }
1668 if (frame_status & LOC_TX) {
1669 DB_TX(3, "LOC_TX:");
1670 if (frame_status & FIRST_FRAG) {
1671 if(!(smc->os.hwm.tx_mb = smt_get_mbuf(smc))) {
1672 smc->hw.fp.err_stats.err_no_buf++ ;
1673 DB_TX(4, "No SMbuf; transmit terminated");
1674 }
1675 else {
1676 smc->os.hwm.tx_data =
1677 smtod(smc->os.hwm.tx_mb,char *) - 1 ;
1678 #ifdef USE_OS_CPY
1679 #ifdef PASS_1ST_TXD_2_TX_COMP
1680 hwm_cpy_txd2mb(t,smc->os.hwm.tx_data,
1681 smc->os.hwm.tx_len) ;
1682 #endif
1683 #endif
1684 }
1685 }
1686 if (smc->os.hwm.tx_mb) {
1687 #ifndef USE_OS_CPY
1688 DB_TX(3, "copy fragment into MBuf");
1689 memcpy(smc->os.hwm.tx_data,virt,len) ;
1690 smc->os.hwm.tx_data += len ;
1691 #endif
1692 if (frame_status & LAST_FRAG) {
1693 #ifdef USE_OS_CPY
1694 #ifndef PASS_1ST_TXD_2_TX_COMP
1695
1696
1697
1698
1699
1700
1701
1702 hwm_cpy_txd2mb(t,smc->os.hwm.tx_data,
1703 smc->os.hwm.tx_len) ;
1704 #endif
1705 #endif
1706 smc->os.hwm.tx_data =
1707 smtod(smc->os.hwm.tx_mb,char *) - 1 ;
1708 *(char *)smc->os.hwm.tx_mb->sm_data =
1709 *smc->os.hwm.tx_data ;
1710 smc->os.hwm.tx_data++ ;
1711 smc->os.hwm.tx_mb->sm_len =
1712 smc->os.hwm.tx_len - 1 ;
1713 DB_TX(3, "pass LLC frame to SMT");
1714 smt_received_pack(smc,smc->os.hwm.tx_mb,
1715 RD_FS_LOCAL) ;
1716 }
1717 }
1718 }
1719 NDD_TRACE("THfE",t,queue->tx_free,0) ;
1720 }
1721
1722
1723
1724
1725
1726 static void queue_llc_rx(struct s_smc *smc, SMbuf *mb)
1727 {
1728 DB_GEN(4, "queue_llc_rx: mb = %p", mb);
1729 smc->os.hwm.queued_rx_frames++ ;
1730 mb->sm_next = (SMbuf *)NULL ;
1731 if (smc->os.hwm.llc_rx_pipe == NULL) {
1732 smc->os.hwm.llc_rx_pipe = mb ;
1733 }
1734 else {
1735 smc->os.hwm.llc_rx_tail->sm_next = mb ;
1736 }
1737 smc->os.hwm.llc_rx_tail = mb ;
1738
1739
1740
1741
1742 if (!smc->os.hwm.isr_flag) {
1743 smt_force_irq(smc) ;
1744 }
1745 }
1746
1747
1748
1749
1750 static SMbuf *get_llc_rx(struct s_smc *smc)
1751 {
1752 SMbuf *mb ;
1753
1754 if ((mb = smc->os.hwm.llc_rx_pipe)) {
1755 smc->os.hwm.queued_rx_frames-- ;
1756 smc->os.hwm.llc_rx_pipe = mb->sm_next ;
1757 }
1758 DB_GEN(4, "get_llc_rx: mb = 0x%p", mb);
1759 return mb;
1760 }
1761
1762
1763
1764
1765
1766 static void queue_txd_mb(struct s_smc *smc, SMbuf *mb)
1767 {
1768 DB_GEN(4, "_rx: queue_txd_mb = %p", mb);
1769 smc->os.hwm.queued_txd_mb++ ;
1770 mb->sm_next = (SMbuf *)NULL ;
1771 if (smc->os.hwm.txd_tx_pipe == NULL) {
1772 smc->os.hwm.txd_tx_pipe = mb ;
1773 }
1774 else {
1775 smc->os.hwm.txd_tx_tail->sm_next = mb ;
1776 }
1777 smc->os.hwm.txd_tx_tail = mb ;
1778 }
1779
1780
1781
1782
1783 static SMbuf *get_txd_mb(struct s_smc *smc)
1784 {
1785 SMbuf *mb ;
1786
1787 if ((mb = smc->os.hwm.txd_tx_pipe)) {
1788 smc->os.hwm.queued_txd_mb-- ;
1789 smc->os.hwm.txd_tx_pipe = mb->sm_next ;
1790 }
1791 DB_GEN(4, "get_txd_mb: mb = 0x%p", mb);
1792 return mb;
1793 }
1794
1795
1796
1797
1798 void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc)
1799 {
1800 char far *data ;
1801 int len ;
1802 int n ;
1803 int i ;
1804 int frag_count ;
1805 int frame_status ;
1806 SK_LOC_DECL(char far,*virt[3]) ;
1807 int frag_len[3] ;
1808 struct s_smt_tx_queue *queue ;
1809 struct s_smt_fp_txd volatile *t ;
1810 u_long phys ;
1811 __le32 tbctrl;
1812
1813 NDD_TRACE("THSB",mb,fc,0) ;
1814 DB_TX(4, "smt_send_mbuf: mb = 0x%p, fc = 0x%x", mb, fc);
1815
1816 mb->sm_off-- ;
1817 mb->sm_len++ ;
1818 data = smtod(mb,char *) ;
1819 *data = fc ;
1820 if (fc == FC_SMT_LOC)
1821 *data = FC_SMT_INFO ;
1822
1823
1824
1825
1826 frag_count = 0 ;
1827 len = mb->sm_len ;
1828 while (len) {
1829 n = SMT_PAGESIZE - ((long)data & (SMT_PAGESIZE-1)) ;
1830 if (n >= len) {
1831 n = len ;
1832 }
1833 DB_TX(5, "frag: virt/len = 0x%p/%d", data, n);
1834 virt[frag_count] = data ;
1835 frag_len[frag_count] = n ;
1836 frag_count++ ;
1837 len -= n ;
1838 data += n ;
1839 }
1840
1841
1842
1843
1844 queue = smc->hw.fp.tx[QUEUE_A0] ;
1845 if (fc == FC_BEACON || fc == FC_SMT_LOC) {
1846 frame_status = LOC_TX ;
1847 }
1848 else {
1849 frame_status = LAN_TX ;
1850 if ((smc->os.hwm.pass_NSA &&(fc == FC_SMT_NSA)) ||
1851 (smc->os.hwm.pass_SMT &&(fc == FC_SMT_INFO)))
1852 frame_status |= LOC_TX ;
1853 }
1854
1855 if (!smc->hw.mac_ring_is_up || frag_count > queue->tx_free) {
1856 frame_status &= ~LAN_TX;
1857 if (frame_status) {
1858 DB_TX(2, "Ring is down: terminate LAN_TX");
1859 }
1860 else {
1861 DB_TX(2, "Ring is down: terminate transmission");
1862 smt_free_mbuf(smc,mb) ;
1863 return ;
1864 }
1865 }
1866 DB_TX(5, "frame_status = 0x%x", frame_status);
1867
1868 if ((frame_status & LAN_TX) && (frame_status & LOC_TX)) {
1869 mb->sm_use_count = 2 ;
1870 }
1871
1872 if (frame_status & LAN_TX) {
1873 t = queue->tx_curr_put ;
1874 frame_status |= FIRST_FRAG ;
1875 for (i = 0; i < frag_count; i++) {
1876 DB_TX(5, "init TxD = 0x%p", t);
1877 if (i == frag_count-1) {
1878 frame_status |= LAST_FRAG ;
1879 t->txd_txdscr = cpu_to_le32(TX_DESCRIPTOR |
1880 (((__u32)(mb->sm_len-1)&3) << 27)) ;
1881 }
1882 t->txd_virt = virt[i] ;
1883 phys = dma_master(smc, (void far *)virt[i],
1884 frag_len[i], DMA_RD|SMT_BUF) ;
1885 t->txd_tbadr = cpu_to_le32(phys) ;
1886 tbctrl = cpu_to_le32((((__u32)frame_status &
1887 (FIRST_FRAG|LAST_FRAG)) << 26) |
1888 BMU_OWN | BMU_CHECK | BMU_SMT_TX |frag_len[i]) ;
1889 t->txd_tbctrl = tbctrl ;
1890 #ifndef AIX
1891 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
1892 outpd(queue->tx_bmu_ctl,CSR_START) ;
1893 #else
1894 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
1895 outpd(ADDR(B0_XA_CSR),CSR_START) ;
1896 #endif
1897 frame_status &= ~FIRST_FRAG ;
1898 queue->tx_curr_put = t = t->txd_next ;
1899 queue->tx_free-- ;
1900 queue->tx_used++ ;
1901 }
1902 smc->mib.m[MAC0].fddiMACTransmit_Ct++ ;
1903 queue_txd_mb(smc,mb) ;
1904 }
1905
1906 if (frame_status & LOC_TX) {
1907 DB_TX(5, "pass Mbuf to LLC queue");
1908 queue_llc_rx(smc,mb) ;
1909 }
1910
1911
1912
1913
1914
1915 mac_drv_clear_txd(smc) ;
1916 NDD_TRACE("THSE",t,queue->tx_free,frag_count) ;
1917 }
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933 static void mac_drv_clear_txd(struct s_smc *smc)
1934 {
1935 struct s_smt_tx_queue *queue ;
1936 struct s_smt_fp_txd volatile *t1 ;
1937 struct s_smt_fp_txd volatile *t2 = NULL ;
1938 SMbuf *mb ;
1939 u_long tbctrl ;
1940 int i ;
1941 int frag_count ;
1942 int n ;
1943
1944 NDD_TRACE("THcB",0,0,0) ;
1945 for (i = QUEUE_S; i <= QUEUE_A0; i++) {
1946 queue = smc->hw.fp.tx[i] ;
1947 t1 = queue->tx_curr_get ;
1948 DB_TX(5, "clear_txd: QUEUE = %d (0=sync/1=async)", i);
1949
1950 for ( ; ; ) {
1951 frag_count = 0 ;
1952
1953 do {
1954 DRV_BUF_FLUSH(t1,DDI_DMA_SYNC_FORCPU) ;
1955 DB_TX(5, "check OWN/EOF bit of TxD 0x%p", t1);
1956 tbctrl = le32_to_cpu(CR_READ(t1->txd_tbctrl));
1957
1958 if (tbctrl & BMU_OWN || !queue->tx_used){
1959 DB_TX(4, "End of TxDs queue %d", i);
1960 goto free_next_queue ;
1961 }
1962 t1 = t1->txd_next ;
1963 frag_count++ ;
1964 } while (!(tbctrl & BMU_EOF)) ;
1965
1966 t1 = queue->tx_curr_get ;
1967 for (n = frag_count; n; n--) {
1968 tbctrl = le32_to_cpu(t1->txd_tbctrl) ;
1969 dma_complete(smc,
1970 (union s_fp_descr volatile *) t1,
1971 (int) (DMA_RD |
1972 ((tbctrl & BMU_SMT_TX) >> 18))) ;
1973 t2 = t1 ;
1974 t1 = t1->txd_next ;
1975 }
1976
1977 if (tbctrl & BMU_SMT_TX) {
1978 mb = get_txd_mb(smc) ;
1979 smt_free_mbuf(smc,mb) ;
1980 }
1981 else {
1982 #ifndef PASS_1ST_TXD_2_TX_COMP
1983 DB_TX(4, "mac_drv_tx_comp for TxD 0x%p", t2);
1984 mac_drv_tx_complete(smc,t2) ;
1985 #else
1986 DB_TX(4, "mac_drv_tx_comp for TxD 0x%x",
1987 queue->tx_curr_get);
1988 mac_drv_tx_complete(smc,queue->tx_curr_get) ;
1989 #endif
1990 }
1991 queue->tx_curr_get = t1 ;
1992 queue->tx_free += frag_count ;
1993 queue->tx_used -= frag_count ;
1994 }
1995 free_next_queue: ;
1996 }
1997 NDD_TRACE("THcE",0,0,0) ;
1998 }
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023 void mac_drv_clear_tx_queue(struct s_smc *smc)
2024 {
2025 struct s_smt_fp_txd volatile *t ;
2026 struct s_smt_tx_queue *queue ;
2027 int tx_used ;
2028 int i ;
2029
2030 if (smc->hw.hw_state != STOPPED) {
2031 SK_BREAK() ;
2032 SMT_PANIC(smc,HWM_E0011,HWM_E0011_MSG) ;
2033 return ;
2034 }
2035
2036 for (i = QUEUE_S; i <= QUEUE_A0; i++) {
2037 queue = smc->hw.fp.tx[i] ;
2038 DB_TX(5, "clear_tx_queue: QUEUE = %d (0=sync/1=async)", i);
2039
2040
2041
2042
2043 t = queue->tx_curr_get ;
2044 tx_used = queue->tx_used ;
2045 while (tx_used) {
2046 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ;
2047 DB_TX(5, "switch OWN bit of TxD 0x%p", t);
2048 t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ;
2049 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
2050 t = t->txd_next ;
2051 tx_used-- ;
2052 }
2053 }
2054
2055
2056
2057
2058 mac_drv_clear_txd(smc) ;
2059
2060 for (i = QUEUE_S; i <= QUEUE_A0; i++) {
2061 queue = smc->hw.fp.tx[i] ;
2062 t = queue->tx_curr_get ;
2063
2064
2065
2066
2067
2068
2069 if (i == QUEUE_S) {
2070 outpd(ADDR(B5_XS_DA),le32_to_cpu(t->txd_ntdadr)) ;
2071 }
2072 else {
2073 outpd(ADDR(B5_XA_DA),le32_to_cpu(t->txd_ntdadr)) ;
2074 }
2075
2076 queue->tx_curr_put = queue->tx_curr_get->txd_next ;
2077 queue->tx_curr_get = queue->tx_curr_put ;
2078 }
2079 }
2080
2081
2082
2083
2084
2085
2086
2087
2088 #ifdef DEBUG
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115 void mac_drv_debug_lev(struct s_smc *smc, int flag, int lev)
2116 {
2117 switch(flag) {
2118 case (int)NULL:
2119 DB_P.d_smtf = DB_P.d_smt = DB_P.d_ecm = DB_P.d_rmt = 0 ;
2120 DB_P.d_cfm = 0 ;
2121 DB_P.d_os.hwm_rx = DB_P.d_os.hwm_tx = DB_P.d_os.hwm_gen = 0 ;
2122 #ifdef SBA
2123 DB_P.d_sba = 0 ;
2124 #endif
2125 #ifdef ESS
2126 DB_P.d_ess = 0 ;
2127 #endif
2128 break ;
2129 case DEBUG_SMTF:
2130 DB_P.d_smtf = lev ;
2131 break ;
2132 case DEBUG_SMT:
2133 DB_P.d_smt = lev ;
2134 break ;
2135 case DEBUG_ECM:
2136 DB_P.d_ecm = lev ;
2137 break ;
2138 case DEBUG_RMT:
2139 DB_P.d_rmt = lev ;
2140 break ;
2141 case DEBUG_CFM:
2142 DB_P.d_cfm = lev ;
2143 break ;
2144 case DEBUG_PCM:
2145 DB_P.d_pcm = lev ;
2146 break ;
2147 case DEBUG_SBA:
2148 #ifdef SBA
2149 DB_P.d_sba = lev ;
2150 #endif
2151 break ;
2152 case DEBUG_ESS:
2153 #ifdef ESS
2154 DB_P.d_ess = lev ;
2155 #endif
2156 break ;
2157 case DB_HWM_RX:
2158 DB_P.d_os.hwm_rx = lev ;
2159 break ;
2160 case DB_HWM_TX:
2161 DB_P.d_os.hwm_tx = lev ;
2162 break ;
2163 case DB_HWM_GEN:
2164 DB_P.d_os.hwm_gen = lev ;
2165 break ;
2166 default:
2167 break ;
2168 }
2169 }
2170 #endif