Back to home page

OSCL-LXR

 
 

    


0001 /******************************************************************************
0002          iphase.c: Device driver for Interphase ATM PCI adapter cards 
0003                     Author: Peter Wang  <pwang@iphase.com>            
0004            Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
0005                    Interphase Corporation  <www.iphase.com>           
0006                                Version: 1.0                           
0007 *******************************************************************************
0008       
0009       This software may be used and distributed according to the terms
0010       of the GNU General Public License (GPL), incorporated herein by reference.
0011       Drivers based on this skeleton fall under the GPL and must retain
0012       the authorship (implicit copyright) notice.
0013 
0014       This program is distributed in the hope that it will be useful, but
0015       WITHOUT ANY WARRANTY; without even the implied warranty of
0016       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
0017       General Public License for more details.
0018       
0019       Modified from an incomplete driver for Interphase 5575 1KVC 1M card which 
0020       was originally written by Monalisa Agrawal at UNH. Now this driver 
0021       supports a variety of varients of Interphase ATM PCI (i)Chip adapter 
0022       card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM) 
0023       in terms of PHY type, the size of control memory and the size of 
0024       packet memory. The following are the change log and history:
0025      
0026           Bugfix the Mona's UBR driver.
0027           Modify the basic memory allocation and dma logic.
0028           Port the driver to the latest kernel from 2.0.46.
0029           Complete the ABR logic of the driver, and added the ABR work-
0030               around for the hardware anormalies.
0031           Add the CBR support.
0032       Add the flow control logic to the driver to allow rate-limit VC.
0033           Add 4K VC support to the board with 512K control memory.
0034           Add the support of all the variants of the Interphase ATM PCI 
0035           (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
0036           (25M UTP25) and x531 (DS3 and E3).
0037           Add SMP support.
0038 
0039       Support and updates available at: ftp://ftp.iphase.com/pub/atm
0040 
0041 *******************************************************************************/
0042 
0043 #include <linux/module.h>  
0044 #include <linux/kernel.h>  
0045 #include <linux/mm.h>  
0046 #include <linux/pci.h>  
0047 #include <linux/errno.h>  
0048 #include <linux/atm.h>  
0049 #include <linux/atmdev.h>  
0050 #include <linux/ctype.h>
0051 #include <linux/sonet.h>  
0052 #include <linux/skbuff.h>  
0053 #include <linux/time.h>  
0054 #include <linux/delay.h>  
0055 #include <linux/uio.h>  
0056 #include <linux/init.h>  
0057 #include <linux/interrupt.h>
0058 #include <linux/wait.h>
0059 #include <linux/slab.h>
0060 #include <asm/io.h>  
0061 #include <linux/atomic.h>
0062 #include <linux/uaccess.h>  
0063 #include <asm/string.h>  
0064 #include <asm/byteorder.h>  
0065 #include <linux/vmalloc.h>
0066 #include <linux/jiffies.h>
0067 #include <linux/nospec.h>
0068 #include "iphase.h"       
0069 #include "suni.h"         
0070 #define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
0071 
0072 #define PRIV(dev) ((struct suni_priv *) dev->phy_data)
0073 
0074 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
0075 static void desc_dbg(IADEV *iadev);
0076 
0077 static IADEV *ia_dev[8];
0078 static struct atm_dev *_ia_dev[8];
0079 static int iadev_count;
0080 static void ia_led_timer(struct timer_list *unused);
0081 static DEFINE_TIMER(ia_timer, ia_led_timer);
0082 static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
0083 static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
0084 static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
0085             |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0; 
0086 
0087 module_param(IA_TX_BUF, int, 0);
0088 module_param(IA_TX_BUF_SZ, int, 0);
0089 module_param(IA_RX_BUF, int, 0);
0090 module_param(IA_RX_BUF_SZ, int, 0);
0091 module_param(IADebugFlag, uint, 0644);
0092 
0093 MODULE_LICENSE("GPL");
0094 
0095 /**************************** IA_LIB **********************************/
0096 
0097 static void ia_init_rtn_q (IARTN_Q *que) 
0098 { 
0099    que->next = NULL; 
0100    que->tail = NULL; 
0101 }
0102 
0103 static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data) 
0104 {
0105    data->next = NULL;
0106    if (que->next == NULL) 
0107       que->next = que->tail = data;
0108    else {
0109       data->next = que->next;
0110       que->next = data;
0111    } 
0112    return;
0113 }
0114 
0115 static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
0116    IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
0117    if (!entry)
0118       return -ENOMEM;
0119    entry->data = data;
0120    entry->next = NULL;
0121    if (que->next == NULL) 
0122       que->next = que->tail = entry;
0123    else {
0124       que->tail->next = entry;
0125       que->tail = que->tail->next;
0126    }      
0127    return 1;
0128 }
0129 
0130 static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
0131    IARTN_Q *tmpdata;
0132    if (que->next == NULL)
0133       return NULL;
0134    tmpdata = que->next;
0135    if ( que->next == que->tail)  
0136       que->next = que->tail = NULL;
0137    else 
0138       que->next = que->next->next;
0139    return tmpdata;
0140 }
0141 
0142 static void ia_hack_tcq(IADEV *dev) {
0143 
0144   u_short       desc1;
0145   u_short       tcq_wr;
0146   struct ia_vcc         *iavcc_r = NULL; 
0147 
0148   tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
0149   while (dev->host_tcq_wr != tcq_wr) {
0150      desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
0151      if (!desc1) ;
0152      else if (!dev->desc_tbl[desc1 -1].timestamp) {
0153         IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
0154         *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
0155      }                                 
0156      else if (dev->desc_tbl[desc1 -1].timestamp) {
0157         if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) { 
0158            printk("IA: Fatal err in get_desc\n");
0159            continue;
0160         }
0161         iavcc_r->vc_desc_cnt--;
0162         dev->desc_tbl[desc1 -1].timestamp = 0;
0163         IF_EVENT(printk("ia_hack: return_q skb = 0x%p desc = %d\n",
0164                                    dev->desc_tbl[desc1 -1].txskb, desc1);)
0165         if (iavcc_r->pcr < dev->rate_limit) {
0166            IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
0167            if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
0168               printk("ia_hack_tcq: No memory available\n");
0169         } 
0170         dev->desc_tbl[desc1 -1].iavcc = NULL;
0171         dev->desc_tbl[desc1 -1].txskb = NULL;
0172      }
0173      dev->host_tcq_wr += 2;
0174      if (dev->host_tcq_wr > dev->ffL.tcq_ed) 
0175         dev->host_tcq_wr = dev->ffL.tcq_st;
0176   }
0177 } /* ia_hack_tcq */
0178 
0179 static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
0180   u_short       desc_num, i;
0181   struct ia_vcc         *iavcc_r = NULL; 
0182   unsigned long delta;
0183   static unsigned long timer = 0;
0184   int ltimeout;
0185 
0186   ia_hack_tcq (dev);
0187   if((time_after(jiffies,timer+50)) || ((dev->ffL.tcq_rd==dev->host_tcq_wr))) {
0188      timer = jiffies; 
0189      i=0;
0190      while (i < dev->num_tx_desc) {
0191         if (!dev->desc_tbl[i].timestamp) {
0192            i++;
0193            continue;
0194         }
0195         ltimeout = dev->desc_tbl[i].iavcc->ltimeout; 
0196         delta = jiffies - dev->desc_tbl[i].timestamp;
0197         if (delta >= ltimeout) {
0198            IF_ABR(printk("RECOVER run!! desc_tbl %d = %d  delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
0199            if (dev->ffL.tcq_rd == dev->ffL.tcq_st) 
0200               dev->ffL.tcq_rd =  dev->ffL.tcq_ed;
0201            else 
0202               dev->ffL.tcq_rd -= 2;
0203            *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
0204            if (!dev->desc_tbl[i].txskb || !(iavcc_r = dev->desc_tbl[i].iavcc))
0205               printk("Fatal err, desc table vcc or skb is NULL\n");
0206            else 
0207               iavcc_r->vc_desc_cnt--;
0208            dev->desc_tbl[i].timestamp = 0;
0209            dev->desc_tbl[i].iavcc = NULL;
0210            dev->desc_tbl[i].txskb = NULL;
0211         }
0212         i++;
0213      } /* while */
0214   }
0215   if (dev->ffL.tcq_rd == dev->host_tcq_wr) 
0216      return 0xFFFF;
0217     
0218   /* Get the next available descriptor number from TCQ */
0219   desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
0220 
0221   while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
0222      dev->ffL.tcq_rd += 2;
0223      if (dev->ffL.tcq_rd > dev->ffL.tcq_ed) 
0224     dev->ffL.tcq_rd = dev->ffL.tcq_st;
0225      if (dev->ffL.tcq_rd == dev->host_tcq_wr) 
0226         return 0xFFFF; 
0227      desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
0228   }
0229 
0230   /* get system time */
0231   dev->desc_tbl[desc_num -1].timestamp = jiffies;
0232   return desc_num;
0233 }
0234 
0235 static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
0236   u_char            foundLockUp;
0237   vcstatus_t        *vcstatus;
0238   u_short               *shd_tbl;
0239   u_short               tempCellSlot, tempFract;
0240   struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
0241   struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
0242   u_int  i;
0243 
0244   if (vcc->qos.txtp.traffic_class == ATM_ABR) {
0245      vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
0246      vcstatus->cnt++;
0247      foundLockUp = 0;
0248      if( vcstatus->cnt == 0x05 ) {
0249         abr_vc += vcc->vci;
0250     eabr_vc += vcc->vci;
0251     if( eabr_vc->last_desc ) {
0252        if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
0253               /* Wait for 10 Micro sec */
0254               udelay(10);
0255           if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
0256          foundLockUp = 1;
0257            }
0258        else {
0259           tempCellSlot = abr_vc->last_cell_slot;
0260               tempFract    = abr_vc->fraction;
0261               if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
0262                          && (tempFract == dev->testTable[vcc->vci]->fract))
0263              foundLockUp = 1;           
0264               dev->testTable[vcc->vci]->lastTime = tempCellSlot;   
0265               dev->testTable[vcc->vci]->fract = tempFract; 
0266        }        
0267         } /* last descriptor */        
0268         vcstatus->cnt = 0;      
0269      } /* vcstatus->cnt */
0270     
0271      if (foundLockUp) {
0272         IF_ABR(printk("LOCK UP found\n");) 
0273     writew(0xFFFD, dev->seg_reg+MODE_REG_0);
0274         /* Wait for 10 Micro sec */
0275         udelay(10); 
0276         abr_vc->status &= 0xFFF8;
0277         abr_vc->status |= 0x0001;  /* state is idle */
0278     shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;                
0279     for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
0280     if (i < dev->num_vc)
0281            shd_tbl[i] = vcc->vci;
0282         else
0283            IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
0284         writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
0285         writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
0286         writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);       
0287     vcstatus->cnt = 0;
0288      } /* foundLockUp */
0289 
0290   } /* if an ABR VC */
0291 
0292 
0293 }
0294  
0295 /*
0296 ** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
0297 **
0298 **  +----+----+------------------+-------------------------------+
0299 **  |  R | NZ |  5-bit exponent  |        9-bit mantissa         |
0300 **  +----+----+------------------+-------------------------------+
0301 ** 
0302 **    R = reserved (written as 0)
0303 **    NZ = 0 if 0 cells/sec; 1 otherwise
0304 **
0305 **    if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
0306 */
0307 static u16
0308 cellrate_to_float(u32 cr)
0309 {
0310 
0311 #define NZ      0x4000
0312 #define M_BITS      9       /* Number of bits in mantissa */
0313 #define E_BITS      5       /* Number of bits in exponent */
0314 #define M_MASK      0x1ff       
0315 #define E_MASK      0x1f
0316   u16   flot;
0317   u32   tmp = cr & 0x00ffffff;
0318   int   i   = 0;
0319   if (cr == 0)
0320      return 0;
0321   while (tmp != 1) {
0322      tmp >>= 1;
0323      i++;
0324   }
0325   if (i == M_BITS)
0326      flot = NZ | (i << M_BITS) | (cr & M_MASK);
0327   else if (i < M_BITS)
0328      flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
0329   else
0330      flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
0331   return flot;
0332 }
0333 
0334 #if 0
0335 /*
0336 ** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
0337 */
0338 static u32
0339 float_to_cellrate(u16 rate)
0340 {
0341   u32   exp, mantissa, cps;
0342   if ((rate & NZ) == 0)
0343      return 0;
0344   exp = (rate >> M_BITS) & E_MASK;
0345   mantissa = rate & M_MASK;
0346   if (exp == 0)
0347      return 1;
0348   cps = (1 << M_BITS) | mantissa;
0349   if (exp == M_BITS)
0350      cps = cps;
0351   else if (exp > M_BITS)
0352      cps <<= (exp - M_BITS);
0353   else
0354      cps >>= (M_BITS - exp);
0355   return cps;
0356 }
0357 #endif 
0358 
0359 static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
0360   srv_p->class_type = ATM_ABR;
0361   srv_p->pcr        = dev->LineRate;
0362   srv_p->mcr        = 0;
0363   srv_p->icr        = 0x055cb7;
0364   srv_p->tbe        = 0xffffff;
0365   srv_p->frtt       = 0x3a;
0366   srv_p->rif        = 0xf;
0367   srv_p->rdf        = 0xb;
0368   srv_p->nrm        = 0x4;
0369   srv_p->trm        = 0x7;
0370   srv_p->cdf        = 0x3;
0371   srv_p->adtf       = 50;
0372 }
0373 
0374 static int
0375 ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p, 
0376                                                 struct atm_vcc *vcc, u8 flag)
0377 {
0378   f_vc_abr_entry  *f_abr_vc;
0379   r_vc_abr_entry  *r_abr_vc;
0380   u32       icr;
0381   u8        trm, nrm, crm;
0382   u16       adtf, air, *ptr16;  
0383   f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
0384   f_abr_vc += vcc->vci;       
0385   switch (flag) {
0386      case 1: /* FFRED initialization */
0387 #if 0  /* sanity check */
0388        if (srv_p->pcr == 0)
0389           return INVALID_PCR;
0390        if (srv_p->pcr > dev->LineRate)
0391           srv_p->pcr = dev->LineRate;
0392        if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
0393       return MCR_UNAVAILABLE;
0394        if (srv_p->mcr > srv_p->pcr)
0395       return INVALID_MCR;
0396        if (!(srv_p->icr))
0397       srv_p->icr = srv_p->pcr;
0398        if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
0399       return INVALID_ICR;
0400        if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
0401       return INVALID_TBE;
0402        if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
0403       return INVALID_FRTT;
0404        if (srv_p->nrm > MAX_NRM)
0405       return INVALID_NRM;
0406        if (srv_p->trm > MAX_TRM)
0407       return INVALID_TRM;
0408        if (srv_p->adtf > MAX_ADTF)
0409           return INVALID_ADTF;
0410        else if (srv_p->adtf == 0)
0411       srv_p->adtf = 1;
0412        if (srv_p->cdf > MAX_CDF)
0413       return INVALID_CDF;
0414        if (srv_p->rif > MAX_RIF)
0415       return INVALID_RIF;
0416        if (srv_p->rdf > MAX_RDF)
0417       return INVALID_RDF;
0418 #endif
0419        memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
0420        f_abr_vc->f_vc_type = ABR;
0421        nrm = 2 << srv_p->nrm;     /* (2 ** (srv_p->nrm +1)) */
0422                       /* i.e 2**n = 2 << (n-1) */
0423        f_abr_vc->f_nrm = nrm << 8 | nrm;
0424        trm = 100000/(2 << (16 - srv_p->trm));
0425        if ( trm == 0) trm = 1;
0426        f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
0427        crm = srv_p->tbe / nrm;
0428        if (crm == 0) crm = 1;
0429        f_abr_vc->f_crm = crm & 0xff;
0430        f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
0431        icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
0432                 ((srv_p->tbe/srv_p->frtt)*1000000) :
0433                 (1000000/(srv_p->frtt/srv_p->tbe)));
0434        f_abr_vc->f_icr = cellrate_to_float(icr);
0435        adtf = (10000 * srv_p->adtf)/8192;
0436        if (adtf == 0) adtf = 1; 
0437        f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
0438        f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
0439        f_abr_vc->f_acr = f_abr_vc->f_icr;
0440        f_abr_vc->f_status = 0x0042;
0441        break;
0442     case 0: /* RFRED initialization */  
0443        ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize); 
0444        *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
0445        r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
0446        r_abr_vc += vcc->vci;
0447        r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
0448        air = srv_p->pcr << (15 - srv_p->rif);
0449        if (air == 0) air = 1;
0450        r_abr_vc->r_air = cellrate_to_float(air);
0451        dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
0452        dev->sum_mcr    += srv_p->mcr;
0453        dev->n_abr++;
0454        break;
0455     default:
0456        break;
0457   }
0458   return    0;
0459 }
0460 static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
0461    u32 rateLow=0, rateHigh, rate;
0462    int entries;
0463    struct ia_vcc *ia_vcc;
0464 
0465    int   idealSlot =0, testSlot, toBeAssigned, inc;
0466    u32   spacing;
0467    u16  *SchedTbl, *TstSchedTbl;
0468    u16  cbrVC, vcIndex;
0469    u32   fracSlot    = 0;
0470    u32   sp_mod      = 0;
0471    u32   sp_mod2     = 0;
0472 
0473    /* IpAdjustTrafficParams */
0474    if (vcc->qos.txtp.max_pcr <= 0) {
0475       IF_ERR(printk("PCR for CBR not defined\n");)
0476       return -1;
0477    }
0478    rate = vcc->qos.txtp.max_pcr;
0479    entries = rate / dev->Granularity;
0480    IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
0481                                 entries, rate, dev->Granularity);)
0482    if (entries < 1)
0483       IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");) 
0484    rateLow  =  entries * dev->Granularity;
0485    rateHigh = (entries + 1) * dev->Granularity;
0486    if (3*(rate - rateLow) > (rateHigh - rate))
0487       entries++;
0488    if (entries > dev->CbrRemEntries) {
0489       IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
0490       IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
0491                                        entries, dev->CbrRemEntries);)
0492       return -EBUSY;
0493    }   
0494 
0495    ia_vcc = INPH_IA_VCC(vcc);
0496    ia_vcc->NumCbrEntry = entries; 
0497    dev->sum_mcr += entries * dev->Granularity; 
0498    /* IaFFrednInsertCbrSched */
0499    // Starting at an arbitrary location, place the entries into the table
0500    // as smoothly as possible
0501    cbrVC   = 0;
0502    spacing = dev->CbrTotEntries / entries;
0503    sp_mod  = dev->CbrTotEntries % entries; // get modulo
0504    toBeAssigned = entries;
0505    fracSlot = 0;
0506    vcIndex  = vcc->vci;
0507    IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
0508    while (toBeAssigned)
0509    {
0510       // If this is the first time, start the table loading for this connection
0511       // as close to entryPoint as possible.
0512       if (toBeAssigned == entries)
0513       {
0514          idealSlot = dev->CbrEntryPt;
0515          dev->CbrEntryPt += 2;    // Adding 2 helps to prevent clumping
0516          if (dev->CbrEntryPt >= dev->CbrTotEntries) 
0517             dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
0518       } else {
0519          idealSlot += (u32)(spacing + fracSlot); // Point to the next location
0520          // in the table that would be  smoothest
0521          fracSlot = ((sp_mod + sp_mod2) / entries);  // get new integer part
0522          sp_mod2  = ((sp_mod + sp_mod2) % entries);  // calc new fractional part
0523       }
0524       if (idealSlot >= (int)dev->CbrTotEntries) 
0525          idealSlot -= dev->CbrTotEntries;  
0526       // Continuously check around this ideal value until a null
0527       // location is encountered.
0528       SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize); 
0529       inc = 0;
0530       testSlot = idealSlot;
0531       TstSchedTbl = (u16*)(SchedTbl+testSlot);  //set index and read in value
0532       IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%p, NumToAssign=%d\n",
0533                                 testSlot, TstSchedTbl,toBeAssigned);)
0534       memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
0535       while (cbrVC)  // If another VC at this location, we have to keep looking
0536       {
0537           inc++;
0538           testSlot = idealSlot - inc;
0539           if (testSlot < 0) { // Wrap if necessary
0540              testSlot += dev->CbrTotEntries;
0541              IF_CBR(printk("Testslot Wrap. STable Start=0x%p,Testslot=%d\n",
0542                                                        SchedTbl,testSlot);)
0543           }
0544           TstSchedTbl = (u16 *)(SchedTbl + testSlot);  // set table index
0545           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC)); 
0546           if (!cbrVC)
0547              break;
0548           testSlot = idealSlot + inc;
0549           if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
0550              testSlot -= dev->CbrTotEntries;
0551              IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
0552              IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n", 
0553                                             testSlot, toBeAssigned);)
0554           } 
0555           // set table index and read in value
0556           TstSchedTbl = (u16*)(SchedTbl + testSlot);
0557           IF_CBR(printk("Reading CBR Tbl from 0x%p, CbrVal=0x%x Iteration %d\n",
0558                           TstSchedTbl,cbrVC,inc);)
0559           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
0560        } /* while */
0561        // Move this VCI number into this location of the CBR Sched table.
0562        memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex, sizeof(*TstSchedTbl));
0563        dev->CbrRemEntries--;
0564        toBeAssigned--;
0565    } /* while */ 
0566 
0567    /* IaFFrednCbrEnable */
0568    dev->NumEnabledCBR++;
0569    if (dev->NumEnabledCBR == 1) {
0570        writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
0571        IF_CBR(printk("CBR is enabled\n");)
0572    }
0573    return 0;
0574 }
0575 static void ia_cbrVc_close (struct atm_vcc *vcc) {
0576    IADEV *iadev;
0577    u16 *SchedTbl, NullVci = 0;
0578    u32 i, NumFound;
0579 
0580    iadev = INPH_IA_DEV(vcc->dev);
0581    iadev->NumEnabledCBR--;
0582    SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
0583    if (iadev->NumEnabledCBR == 0) {
0584       writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
0585       IF_CBR (printk("CBR support disabled\n");)
0586    }
0587    NumFound = 0;
0588    for (i=0; i < iadev->CbrTotEntries; i++)
0589    {
0590       if (*SchedTbl == vcc->vci) {
0591          iadev->CbrRemEntries++;
0592          *SchedTbl = NullVci;
0593          IF_CBR(NumFound++;)
0594       }
0595       SchedTbl++;   
0596    } 
0597    IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
0598 }
0599 
0600 static int ia_avail_descs(IADEV *iadev) {
0601    int tmp = 0;
0602    ia_hack_tcq(iadev);
0603    if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
0604       tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
0605    else
0606       tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
0607                    iadev->ffL.tcq_st) / 2;
0608    return tmp;
0609 }    
0610 
0611 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
0612 
0613 static int ia_que_tx (IADEV *iadev) { 
0614    struct sk_buff *skb;
0615    int num_desc;
0616    struct atm_vcc *vcc;
0617    num_desc = ia_avail_descs(iadev);
0618 
0619    while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
0620       if (!(vcc = ATM_SKB(skb)->vcc)) {
0621          dev_kfree_skb_any(skb);
0622          printk("ia_que_tx: Null vcc\n");
0623          break;
0624       }
0625       if (!test_bit(ATM_VF_READY,&vcc->flags)) {
0626          dev_kfree_skb_any(skb);
0627          printk("Free the SKB on closed vci %d \n", vcc->vci);
0628          break;
0629       }
0630       if (ia_pkt_tx (vcc, skb)) {
0631          skb_queue_head(&iadev->tx_backlog, skb);
0632       }
0633       num_desc--;
0634    }
0635    return 0;
0636 }
0637 
0638 static void ia_tx_poll (IADEV *iadev) {
0639    struct atm_vcc *vcc = NULL;
0640    struct sk_buff *skb = NULL, *skb1 = NULL;
0641    struct ia_vcc *iavcc;
0642    IARTN_Q *  rtne;
0643 
0644    ia_hack_tcq(iadev);
0645    while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
0646        skb = rtne->data.txskb;
0647        if (!skb) {
0648            printk("ia_tx_poll: skb is null\n");
0649            goto out;
0650        }
0651        vcc = ATM_SKB(skb)->vcc;
0652        if (!vcc) {
0653            printk("ia_tx_poll: vcc is null\n");
0654            dev_kfree_skb_any(skb);
0655        goto out;
0656        }
0657 
0658        iavcc = INPH_IA_VCC(vcc);
0659        if (!iavcc) {
0660            printk("ia_tx_poll: iavcc is null\n");
0661            dev_kfree_skb_any(skb);
0662        goto out;
0663        }
0664 
0665        skb1 = skb_dequeue(&iavcc->txing_skb);
0666        while (skb1 && (skb1 != skb)) {
0667           if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
0668              printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
0669           }
0670           IF_ERR(printk("Release the SKB not match\n");)
0671           if ((vcc->pop) && (skb1->len != 0))
0672           {
0673              vcc->pop(vcc, skb1);
0674              IF_EVENT(printk("Transmit Done - skb 0x%lx return\n",
0675                                                           (long)skb1);)
0676           }
0677           else 
0678              dev_kfree_skb_any(skb1);
0679           skb1 = skb_dequeue(&iavcc->txing_skb);
0680        }                                                        
0681        if (!skb1) {
0682           IF_EVENT(printk("IA: Vci %d - skb not found requeued\n",vcc->vci);)
0683           ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
0684           break;
0685        }
0686        if ((vcc->pop) && (skb->len != 0))
0687        {
0688           vcc->pop(vcc, skb);
0689           IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
0690        }
0691        else 
0692           dev_kfree_skb_any(skb);
0693        kfree(rtne);
0694     }
0695     ia_que_tx(iadev);
0696 out:
0697     return;
0698 }
0699 #if 0
0700 static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
0701 {
0702         u32 t;
0703     int i;
0704     /*
0705      * Issue a command to enable writes to the NOVRAM
0706      */
0707     NVRAM_CMD (EXTEND + EWEN);
0708     NVRAM_CLR_CE;
0709     /*
0710      * issue the write command
0711      */
0712     NVRAM_CMD(IAWRITE + addr);
0713     /* 
0714      * Send the data, starting with D15, then D14, and so on for 16 bits
0715      */
0716     for (i=15; i>=0; i--) {
0717         NVRAM_CLKOUT (val & 0x8000);
0718         val <<= 1;
0719     }
0720     NVRAM_CLR_CE;
0721     CFG_OR(NVCE);
0722     t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
0723     while (!(t & NVDO))
0724         t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
0725 
0726     NVRAM_CLR_CE;
0727     /*
0728      * disable writes again
0729      */
0730     NVRAM_CMD(EXTEND + EWDS)
0731     NVRAM_CLR_CE;
0732     CFG_AND(~NVDI);
0733 }
0734 #endif
0735 
0736 static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
0737 {
0738     u_short val;
0739         u32 t;
0740     int i;
0741     /*
0742      * Read the first bit that was clocked with the falling edge of
0743      * the last command data clock
0744      */
0745     NVRAM_CMD(IAREAD + addr);
0746     /*
0747      * Now read the rest of the bits, the next bit read is D14, then D13,
0748      * and so on.
0749      */
0750     val = 0;
0751     for (i=15; i>=0; i--) {
0752         NVRAM_CLKIN(t);
0753         val |= (t << i);
0754     }
0755     NVRAM_CLR_CE;
0756     CFG_AND(~NVDI);
0757     return val;
0758 }
0759 
0760 static void ia_hw_type(IADEV *iadev) {
0761    u_short memType = ia_eeprom_get(iadev, 25);   
0762    iadev->memType = memType;
0763    if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
0764       iadev->num_tx_desc = IA_TX_BUF;
0765       iadev->tx_buf_sz = IA_TX_BUF_SZ;
0766       iadev->num_rx_desc = IA_RX_BUF;
0767       iadev->rx_buf_sz = IA_RX_BUF_SZ; 
0768    } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
0769       if (IA_TX_BUF == DFL_TX_BUFFERS)
0770         iadev->num_tx_desc = IA_TX_BUF / 2;
0771       else 
0772         iadev->num_tx_desc = IA_TX_BUF;
0773       iadev->tx_buf_sz = IA_TX_BUF_SZ;
0774       if (IA_RX_BUF == DFL_RX_BUFFERS)
0775         iadev->num_rx_desc = IA_RX_BUF / 2;
0776       else
0777         iadev->num_rx_desc = IA_RX_BUF;
0778       iadev->rx_buf_sz = IA_RX_BUF_SZ;
0779    }
0780    else {
0781       if (IA_TX_BUF == DFL_TX_BUFFERS) 
0782         iadev->num_tx_desc = IA_TX_BUF / 8;
0783       else
0784         iadev->num_tx_desc = IA_TX_BUF;
0785       iadev->tx_buf_sz = IA_TX_BUF_SZ;
0786       if (IA_RX_BUF == DFL_RX_BUFFERS)
0787         iadev->num_rx_desc = IA_RX_BUF / 8;
0788       else
0789         iadev->num_rx_desc = IA_RX_BUF;
0790       iadev->rx_buf_sz = IA_RX_BUF_SZ; 
0791    } 
0792    iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz); 
0793    IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
0794          iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
0795          iadev->rx_buf_sz, iadev->rx_pkt_ram);)
0796 
0797 #if 0
0798    if ((memType & FE_MASK) == FE_SINGLE_MODE) {
0799       iadev->phy_type = PHY_OC3C_S;
0800    else if ((memType & FE_MASK) == FE_UTP_OPTION)
0801       iadev->phy_type = PHY_UTP155;
0802    else
0803      iadev->phy_type = PHY_OC3C_M;
0804 #endif
0805    
0806    iadev->phy_type = memType & FE_MASK;
0807    IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n", 
0808                                          memType,iadev->phy_type);)
0809    if (iadev->phy_type == FE_25MBIT_PHY) 
0810       iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
0811    else if (iadev->phy_type == FE_DS3_PHY)
0812       iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
0813    else if (iadev->phy_type == FE_E3_PHY) 
0814       iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
0815    else
0816        iadev->LineRate = (u32)(ATM_OC3_PCR);
0817    IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
0818 
0819 }
0820 
0821 static u32 ia_phy_read32(struct iadev_priv *ia, unsigned int reg)
0822 {
0823     return readl(ia->phy + (reg >> 2));
0824 }
0825 
0826 static void ia_phy_write32(struct iadev_priv *ia, unsigned int reg, u32 val)
0827 {
0828     writel(val, ia->phy + (reg >> 2));
0829 }
0830 
0831 static void ia_frontend_intr(struct iadev_priv *iadev)
0832 {
0833     u32 status;
0834 
0835     if (iadev->phy_type & FE_25MBIT_PHY) {
0836         status = ia_phy_read32(iadev, MB25_INTR_STATUS);
0837         iadev->carrier_detect = (status & MB25_IS_GSB) ? 1 : 0;
0838     } else if (iadev->phy_type & FE_DS3_PHY) {
0839         ia_phy_read32(iadev, SUNI_DS3_FRM_INTR_STAT);
0840         status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
0841         iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
0842     } else if (iadev->phy_type & FE_E3_PHY) {
0843         ia_phy_read32(iadev, SUNI_E3_FRM_MAINT_INTR_IND);
0844         status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
0845         iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
0846     } else {
0847         status = ia_phy_read32(iadev, SUNI_RSOP_STATUS);
0848         iadev->carrier_detect = (status & SUNI_LOSV) ? 0 : 1;
0849     }
0850 
0851     printk(KERN_INFO "IA: SUNI carrier %s\n",
0852         iadev->carrier_detect ? "detected" : "lost signal");
0853 }
0854 
0855 static void ia_mb25_init(struct iadev_priv *iadev)
0856 {
0857 #if 0
0858    mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
0859 #endif
0860     ia_phy_write32(iadev, MB25_MASTER_CTRL, MB25_MC_DRIC | MB25_MC_DREC);
0861     ia_phy_write32(iadev, MB25_DIAG_CONTROL, 0);
0862 
0863     iadev->carrier_detect =
0864         (ia_phy_read32(iadev, MB25_INTR_STATUS) & MB25_IS_GSB) ? 1 : 0;
0865 }
0866 
0867 struct ia_reg {
0868     u16 reg;
0869     u16 val;
0870 };
0871 
0872 static void ia_phy_write(struct iadev_priv *iadev,
0873              const struct ia_reg *regs, int len)
0874 {
0875     while (len--) {
0876         ia_phy_write32(iadev, regs->reg, regs->val);
0877         regs++;
0878     }
0879 }
0880 
0881 static void ia_suni_pm7345_init_ds3(struct iadev_priv *iadev)
0882 {
0883     static const struct ia_reg suni_ds3_init[] = {
0884         { SUNI_DS3_FRM_INTR_ENBL,   0x17 },
0885         { SUNI_DS3_FRM_CFG,     0x01 },
0886         { SUNI_DS3_TRAN_CFG,        0x01 },
0887         { SUNI_CONFIG,          0 },
0888         { SUNI_SPLR_CFG,        0 },
0889         { SUNI_SPLT_CFG,        0 }
0890     };
0891     u32 status;
0892 
0893     status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
0894     iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
0895 
0896     ia_phy_write(iadev, suni_ds3_init, ARRAY_SIZE(suni_ds3_init));
0897 }
0898 
0899 static void ia_suni_pm7345_init_e3(struct iadev_priv *iadev)
0900 {
0901     static const struct ia_reg suni_e3_init[] = {
0902         { SUNI_E3_FRM_FRAM_OPTIONS,     0x04 },
0903         { SUNI_E3_FRM_MAINT_OPTIONS,        0x20 },
0904         { SUNI_E3_FRM_FRAM_INTR_ENBL,       0x1d },
0905         { SUNI_E3_FRM_MAINT_INTR_ENBL,      0x30 },
0906         { SUNI_E3_TRAN_STAT_DIAG_OPTIONS,   0 },
0907         { SUNI_E3_TRAN_FRAM_OPTIONS,        0x01 },
0908         { SUNI_CONFIG,              SUNI_PM7345_E3ENBL },
0909         { SUNI_SPLR_CFG,            0x41 },
0910         { SUNI_SPLT_CFG,            0x41 }
0911     };
0912     u32 status;
0913 
0914     status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
0915     iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
0916     ia_phy_write(iadev, suni_e3_init, ARRAY_SIZE(suni_e3_init));
0917 }
0918 
0919 static void ia_suni_pm7345_init(struct iadev_priv *iadev)
0920 {
0921     static const struct ia_reg suni_init[] = {
0922         /* Enable RSOP loss of signal interrupt. */
0923         { SUNI_INTR_ENBL,       0x28 },
0924         /* Clear error counters. */
0925         { SUNI_ID_RESET,        0 },
0926         /* Clear "PMCTST" in master test register. */
0927         { SUNI_MASTER_TEST,     0 },
0928 
0929         { SUNI_RXCP_CTRL,       0x2c },
0930         { SUNI_RXCP_FCTRL,      0x81 },
0931 
0932         { SUNI_RXCP_IDLE_PAT_H1,    0 },
0933         { SUNI_RXCP_IDLE_PAT_H2,    0 },
0934         { SUNI_RXCP_IDLE_PAT_H3,    0 },
0935         { SUNI_RXCP_IDLE_PAT_H4,    0x01 },
0936 
0937         { SUNI_RXCP_IDLE_MASK_H1,   0xff },
0938         { SUNI_RXCP_IDLE_MASK_H2,   0xff },
0939         { SUNI_RXCP_IDLE_MASK_H3,   0xff },
0940         { SUNI_RXCP_IDLE_MASK_H4,   0xfe },
0941 
0942         { SUNI_RXCP_CELL_PAT_H1,    0 },
0943         { SUNI_RXCP_CELL_PAT_H2,    0 },
0944         { SUNI_RXCP_CELL_PAT_H3,    0 },
0945         { SUNI_RXCP_CELL_PAT_H4,    0x01 },
0946 
0947         { SUNI_RXCP_CELL_MASK_H1,   0xff },
0948         { SUNI_RXCP_CELL_MASK_H2,   0xff },
0949         { SUNI_RXCP_CELL_MASK_H3,   0xff },
0950         { SUNI_RXCP_CELL_MASK_H4,   0xff },
0951 
0952         { SUNI_TXCP_CTRL,       0xa4 },
0953         { SUNI_TXCP_INTR_EN_STS,    0x10 },
0954         { SUNI_TXCP_IDLE_PAT_H5,    0x55 }
0955     };
0956 
0957     if (iadev->phy_type & FE_DS3_PHY)
0958         ia_suni_pm7345_init_ds3(iadev);
0959     else
0960         ia_suni_pm7345_init_e3(iadev);
0961 
0962     ia_phy_write(iadev, suni_init, ARRAY_SIZE(suni_init));
0963 
0964     ia_phy_write32(iadev, SUNI_CONFIG, ia_phy_read32(iadev, SUNI_CONFIG) &
0965         ~(SUNI_PM7345_LLB | SUNI_PM7345_CLB |
0966           SUNI_PM7345_DLB | SUNI_PM7345_PLB));
0967 #ifdef __SNMP__
0968    suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
0969 #endif /* __SNMP__ */
0970    return;
0971 }
0972 
0973 
0974 /***************************** IA_LIB END *****************************/
0975     
0976 #ifdef CONFIG_ATM_IA_DEBUG
0977 static int tcnter = 0;
0978 static void xdump( u_char*  cp, int  length, char*  prefix )
0979 {
0980     int col, count;
0981     u_char prntBuf[120];
0982     u_char*  pBuf = prntBuf;
0983     count = 0;
0984     while(count < length){
0985         pBuf += sprintf( pBuf, "%s", prefix );
0986         for(col = 0;count + col < length && col < 16; col++){
0987             if (col != 0 && (col % 4) == 0)
0988                 pBuf += sprintf( pBuf, " " );
0989             pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
0990         }
0991         while(col++ < 16){      /* pad end of buffer with blanks */
0992             if ((col % 4) == 0)
0993                 sprintf( pBuf, " " );
0994             pBuf += sprintf( pBuf, "   " );
0995         }
0996         pBuf += sprintf( pBuf, "  " );
0997         for(col = 0;count + col < length && col < 16; col++){
0998         u_char c = cp[count + col];
0999 
1000         if (isascii(c) && isprint(c))
1001             pBuf += sprintf(pBuf, "%c", c);
1002         else
1003             pBuf += sprintf(pBuf, ".");
1004                 }
1005         printk("%s\n", prntBuf);
1006         count += col;
1007         pBuf = prntBuf;
1008     }
1009 
1010 }  /* close xdump(... */
1011 #endif /* CONFIG_ATM_IA_DEBUG */
1012 
1013   
1014 static struct atm_dev *ia_boards = NULL;  
1015   
1016 #define ACTUAL_RAM_BASE \
1017     RAM_BASE*((iadev->mem)/(128 * 1024))  
1018 #define ACTUAL_SEG_RAM_BASE \
1019     IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))  
1020 #define ACTUAL_REASS_RAM_BASE \
1021     IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))  
1022   
1023   
1024 /*-- some utilities and memory allocation stuff will come here -------------*/  
1025   
1026 static void desc_dbg(IADEV *iadev) {
1027 
1028   u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1029   u32 i;
1030   void __iomem *tmp;
1031   // regval = readl((u32)ia_cmds->maddr);
1032   tcq_wr_ptr =  readw(iadev->seg_reg+TCQ_WR_PTR);
1033   printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1034                      tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1035                      readw(iadev->seg_ram+tcq_wr_ptr-2));
1036   printk(" host_tcq_wr = 0x%x  host_tcq_rd = 0x%x \n",  iadev->host_tcq_wr, 
1037                    iadev->ffL.tcq_rd);
1038   tcq_st_ptr =  readw(iadev->seg_reg+TCQ_ST_ADR);
1039   tcq_ed_ptr =  readw(iadev->seg_reg+TCQ_ED_ADR);
1040   printk("tcq_st_ptr = 0x%x    tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1041   i = 0;
1042   while (tcq_st_ptr != tcq_ed_ptr) {
1043       tmp = iadev->seg_ram+tcq_st_ptr;
1044       printk("TCQ slot %d desc = %d  Addr = %p\n", i++, readw(tmp), tmp);
1045       tcq_st_ptr += 2;
1046   }
1047   for(i=0; i <iadev->num_tx_desc; i++)
1048       printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1049 } 
1050   
1051   
1052 /*----------------------------- Receiving side stuff --------------------------*/  
1053  
1054 static void rx_excp_rcvd(struct atm_dev *dev)  
1055 {  
1056 #if 0 /* closing the receiving size will cause too many excp int */  
1057   IADEV *iadev;  
1058   u_short state;  
1059   u_short excpq_rd_ptr;  
1060   //u_short *ptr;  
1061   int vci, error = 1;  
1062   iadev = INPH_IA_DEV(dev);  
1063   state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1064   while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)  
1065   { printk("state = %x \n", state); 
1066         excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;  
1067  printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr); 
1068         if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1069             IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1070         // TODO: update exception stat
1071     vci = readw(iadev->reass_ram+excpq_rd_ptr);  
1072     error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;  
1073         // pwang_test
1074     excpq_rd_ptr += 4;  
1075     if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))  
1076         excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1077     writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);  
1078         state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1079   }  
1080 #endif
1081 }  
1082   
1083 static void free_desc(struct atm_dev *dev, int desc)  
1084 {  
1085     IADEV *iadev;  
1086     iadev = INPH_IA_DEV(dev);  
1087         writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr); 
1088     iadev->rfL.fdq_wr +=2;
1089     if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1090         iadev->rfL.fdq_wr =  iadev->rfL.fdq_st;  
1091     writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);  
1092 }  
1093   
1094   
1095 static int rx_pkt(struct atm_dev *dev)  
1096 {  
1097     IADEV *iadev;  
1098     struct atm_vcc *vcc;  
1099     unsigned short status;  
1100     struct rx_buf_desc __iomem *buf_desc_ptr;  
1101     int desc;   
1102     struct dle* wr_ptr;  
1103     int len;  
1104     struct sk_buff *skb;  
1105     u_int buf_addr, dma_addr;  
1106 
1107     iadev = INPH_IA_DEV(dev);  
1108     if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff)) 
1109     {  
1110         printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);  
1111         return -EINVAL;  
1112     }  
1113     /* mask 1st 3 bits to get the actual descno. */  
1114     desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;  
1115         IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n", 
1116                                     iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1117               printk(" pcq_wr_ptr = 0x%x\n",
1118                                readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1119     /* update the read pointer  - maybe we shud do this in the end*/  
1120     if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed) 
1121         iadev->rfL.pcq_rd = iadev->rfL.pcq_st;  
1122     else  
1123         iadev->rfL.pcq_rd += 2;
1124     writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);  
1125   
1126     /* get the buffer desc entry.  
1127         update stuff. - doesn't seem to be any update necessary  
1128     */  
1129     buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1130     /* make the ptr point to the corresponding buffer desc entry */  
1131     buf_desc_ptr += desc;     
1132         if (!desc || (desc > iadev->num_rx_desc) || 
1133                       ((buf_desc_ptr->vc_index & 0xffff) >= iadev->num_vc)) {
1134             free_desc(dev, desc);
1135             IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1136             return -1;
1137         }
1138     vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];  
1139     if (!vcc)  
1140     {      
1141                 free_desc(dev, desc); 
1142         printk("IA: null vcc, drop PDU\n");  
1143         return -1;  
1144     }  
1145       
1146   
1147     /* might want to check the status bits for errors */  
1148     status = (u_short) (buf_desc_ptr->desc_mode);  
1149     if (status & (RX_CER | RX_PTE | RX_OFL))  
1150     {  
1151                 atomic_inc(&vcc->stats->rx_err);
1152         IF_ERR(printk("IA: bad packet, dropping it");)  
1153                 if (status & RX_CER) { 
1154                     IF_ERR(printk(" cause: packet CRC error\n");)
1155                 }
1156                 else if (status & RX_PTE) {
1157                     IF_ERR(printk(" cause: packet time out\n");)
1158                 }
1159                 else {
1160                     IF_ERR(printk(" cause: buffer overflow\n");)
1161                 }
1162         goto out_free_desc;
1163     }  
1164   
1165     /*  
1166         build DLE.    
1167     */  
1168   
1169     buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;  
1170     dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;  
1171     len = dma_addr - buf_addr;  
1172         if (len > iadev->rx_buf_sz) {
1173            printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1174            atomic_inc(&vcc->stats->rx_err);
1175        goto out_free_desc;
1176         }
1177           
1178         if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1179            if (vcc->vci < 32)
1180               printk("Drop control packets\n");
1181        goto out_free_desc;
1182         }
1183     skb_put(skb,len);  
1184         // pwang_test
1185         ATM_SKB(skb)->vcc = vcc;
1186         ATM_DESC(skb) = desc;        
1187     skb_queue_tail(&iadev->rx_dma_q, skb);  
1188 
1189     /* Build the DLE structure */  
1190     wr_ptr = iadev->rx_dle_q.write;  
1191     wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
1192                           len, DMA_FROM_DEVICE);
1193     wr_ptr->local_pkt_addr = buf_addr;  
1194     wr_ptr->bytes = len;    /* We don't know this do we ?? */  
1195     wr_ptr->mode = DMA_INT_ENABLE;  
1196   
1197     /* shud take care of wrap around here too. */  
1198         if(++wr_ptr == iadev->rx_dle_q.end)
1199              wr_ptr = iadev->rx_dle_q.start;
1200     iadev->rx_dle_q.write = wr_ptr;  
1201     udelay(1);  
1202     /* Increment transaction counter */  
1203     writel(1, iadev->dma+IPHASE5575_RX_COUNTER);   
1204 out:    return 0;  
1205 out_free_desc:
1206         free_desc(dev, desc);
1207         goto out;
1208 }  
1209   
1210 static void rx_intr(struct atm_dev *dev)  
1211 {  
1212   IADEV *iadev;  
1213   u_short status;  
1214   u_short state, i;  
1215   
1216   iadev = INPH_IA_DEV(dev);  
1217   status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;  
1218   IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1219   if (status & RX_PKT_RCVD)  
1220   {  
1221     /* do something */  
1222     /* Basically recvd an interrupt for receiving a packet.  
1223     A descriptor would have been written to the packet complete   
1224     queue. Get all the descriptors and set up dma to move the   
1225     packets till the packet complete queue is empty..  
1226     */  
1227     state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1228         IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);) 
1229     while(!(state & PCQ_EMPTY))  
1230     {  
1231              rx_pkt(dev);  
1232          state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1233     }  
1234         iadev->rxing = 1;
1235   }  
1236   if (status & RX_FREEQ_EMPT)  
1237   {   
1238      if (iadev->rxing) {
1239         iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1240         iadev->rx_tmp_jif = jiffies; 
1241         iadev->rxing = 0;
1242      } 
1243      else if ((time_after(jiffies, iadev->rx_tmp_jif + 50)) &&
1244                ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1245         for (i = 1; i <= iadev->num_rx_desc; i++)
1246                free_desc(dev, i);
1247 printk("Test logic RUN!!!!\n");
1248         writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1249         iadev->rxing = 1;
1250      }
1251      IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)  
1252   }  
1253 
1254   if (status & RX_EXCP_RCVD)  
1255   {  
1256     /* probably need to handle the exception queue also. */  
1257     IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)  
1258     rx_excp_rcvd(dev);  
1259   }  
1260 
1261 
1262   if (status & RX_RAW_RCVD)  
1263   {  
1264     /* need to handle the raw incoming cells. This deepnds on   
1265     whether we have programmed to receive the raw cells or not.  
1266     Else ignore. */  
1267     IF_EVENT(printk("Rx intr status:  RX_RAW_RCVD %08x\n", status);)  
1268   }  
1269 }  
1270   
1271   
1272 static void rx_dle_intr(struct atm_dev *dev)  
1273 {  
1274   IADEV *iadev;  
1275   struct atm_vcc *vcc;   
1276   struct sk_buff *skb;  
1277   int desc;  
1278   u_short state;   
1279   struct dle *dle, *cur_dle;  
1280   u_int dle_lp;  
1281   int len;
1282   iadev = INPH_IA_DEV(dev);  
1283  
1284   /* free all the dles done, that is just update our own dle read pointer   
1285     - do we really need to do this. Think not. */  
1286   /* DMA is done, just get all the recevie buffers from the rx dma queue  
1287     and push them up to the higher layer protocol. Also free the desc  
1288     associated with the buffer. */  
1289   dle = iadev->rx_dle_q.read;  
1290   dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);  
1291   cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));  
1292   while(dle != cur_dle)  
1293   {  
1294       /* free the DMAed skb */  
1295       skb = skb_dequeue(&iadev->rx_dma_q);  
1296       if (!skb)  
1297          goto INCR_DLE;
1298       desc = ATM_DESC(skb);
1299       free_desc(dev, desc);  
1300                
1301       if (!(len = skb->len))
1302       {  
1303           printk("rx_dle_intr: skb len 0\n");  
1304       dev_kfree_skb_any(skb);  
1305       }  
1306       else  
1307       {  
1308           struct cpcs_trailer *trailer;
1309           u_short length;
1310           struct ia_vcc *ia_vcc;
1311 
1312       dma_unmap_single(&iadev->pci->dev, iadev->rx_dle_q.write->sys_pkt_addr,
1313                len, DMA_FROM_DEVICE);
1314           /* no VCC related housekeeping done as yet. lets see */  
1315           vcc = ATM_SKB(skb)->vcc;
1316       if (!vcc) {
1317           printk("IA: null vcc\n");  
1318               dev_kfree_skb_any(skb);
1319               goto INCR_DLE;
1320           }
1321           ia_vcc = INPH_IA_VCC(vcc);
1322           if (ia_vcc == NULL)
1323           {
1324              atomic_inc(&vcc->stats->rx_err);
1325              atm_return(vcc, skb->truesize);
1326              dev_kfree_skb_any(skb);
1327              goto INCR_DLE;
1328            }
1329           // get real pkt length  pwang_test
1330           trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1331                                  skb->len - sizeof(*trailer));
1332       length = swap_byte_order(trailer->length);
1333           if ((length > iadev->rx_buf_sz) || (length > 
1334                               (skb->len - sizeof(struct cpcs_trailer))))
1335           {
1336              atomic_inc(&vcc->stats->rx_err);
1337              IF_ERR(printk("rx_dle_intr: Bad  AAL5 trailer %d (skb len %d)", 
1338                                                             length, skb->len);)
1339              atm_return(vcc, skb->truesize);
1340              dev_kfree_skb_any(skb);
1341              goto INCR_DLE;
1342           }
1343           skb_trim(skb, length);
1344           
1345       /* Display the packet */  
1346       IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);  
1347           xdump(skb->data, skb->len, "RX: ");
1348           printk("\n");)
1349 
1350       IF_RX(printk("rx_dle_intr: skb push");)  
1351       vcc->push(vcc,skb);  
1352       atomic_inc(&vcc->stats->rx);
1353           iadev->rx_pkt_cnt++;
1354       }  
1355 INCR_DLE:
1356       if (++dle == iadev->rx_dle_q.end)  
1357           dle = iadev->rx_dle_q.start;  
1358   }  
1359   iadev->rx_dle_q.read = dle;  
1360   
1361   /* if the interrupts are masked because there were no free desc available,  
1362         unmask them now. */ 
1363   if (!iadev->rxing) {
1364      state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1365      if (!(state & FREEQ_EMPTY)) {
1366         state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1367         writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1368                                       iadev->reass_reg+REASS_MASK_REG);
1369         iadev->rxing++; 
1370      }
1371   }
1372 }  
1373   
1374   
1375 static int open_rx(struct atm_vcc *vcc)  
1376 {  
1377     IADEV *iadev;  
1378     u_short __iomem *vc_table;  
1379     u_short __iomem *reass_ptr;  
1380     IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1381 
1382     if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;    
1383     iadev = INPH_IA_DEV(vcc->dev);  
1384         if (vcc->qos.rxtp.traffic_class == ATM_ABR) {  
1385            if (iadev->phy_type & FE_25MBIT_PHY) {
1386                printk("IA:  ABR not support\n");
1387                return -EINVAL; 
1388            }
1389         }
1390     /* Make only this VCI in the vc table valid and let all   
1391         others be invalid entries */  
1392     vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
1393     vc_table += vcc->vci;
1394     /* mask the last 6 bits and OR it with 3 for 1K VCs */  
1395 
1396         *vc_table = vcc->vci << 6;
1397     /* Also keep a list of open rx vcs so that we can attach them with  
1398         incoming PDUs later. */  
1399     if ((vcc->qos.rxtp.traffic_class == ATM_ABR) || 
1400                                 (vcc->qos.txtp.traffic_class == ATM_ABR))  
1401     {  
1402                 srv_cls_param_t srv_p;
1403                 init_abr_vc(iadev, &srv_p);
1404                 ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1405     } 
1406         else {  /* for UBR  later may need to add CBR logic */
1407             reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
1408             reass_ptr += vcc->vci;
1409             *reass_ptr = NO_AAL5_PKT;
1410         }
1411     
1412     if (iadev->rx_open[vcc->vci])  
1413         printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",  
1414             vcc->dev->number, vcc->vci);  
1415     iadev->rx_open[vcc->vci] = vcc;  
1416     return 0;  
1417 }  
1418   
1419 static int rx_init(struct atm_dev *dev)  
1420 {  
1421     IADEV *iadev;  
1422     struct rx_buf_desc __iomem *buf_desc_ptr;  
1423     unsigned long rx_pkt_start = 0;  
1424     void *dle_addr;  
1425     struct abr_vc_table  *abr_vc_table; 
1426     u16 *vc_table;  
1427     u16 *reass_table;  
1428     int i,j, vcsize_sel;  
1429     u_short freeq_st_adr;  
1430     u_short *freeq_start;  
1431   
1432     iadev = INPH_IA_DEV(dev);  
1433   //    spin_lock_init(&iadev->rx_lock); 
1434   
1435     /* Allocate 4k bytes - more aligned than needed (4k boundary) */
1436     dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
1437                       &iadev->rx_dle_dma, GFP_KERNEL);
1438     if (!dle_addr)  {  
1439         printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1440         goto err_out;
1441     }
1442     iadev->rx_dle_q.start = (struct dle *)dle_addr;
1443     iadev->rx_dle_q.read = iadev->rx_dle_q.start;  
1444     iadev->rx_dle_q.write = iadev->rx_dle_q.start;  
1445     iadev->rx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1446     /* the end of the dle q points to the entry after the last  
1447     DLE that can be used. */  
1448   
1449     /* write the upper 20 bits of the start address to rx list address register */  
1450     /* We know this is 32bit bus addressed so the following is safe */
1451     writel(iadev->rx_dle_dma & 0xfffff000,
1452            iadev->dma + IPHASE5575_RX_LIST_ADDR);  
1453     IF_INIT(printk("Tx Dle list addr: 0x%p value: 0x%0x\n",
1454                       iadev->dma+IPHASE5575_TX_LIST_ADDR,
1455                       readl(iadev->dma + IPHASE5575_TX_LIST_ADDR));
1456     printk("Rx Dle list addr: 0x%p value: 0x%0x\n",
1457                       iadev->dma+IPHASE5575_RX_LIST_ADDR,
1458                       readl(iadev->dma + IPHASE5575_RX_LIST_ADDR));)
1459   
1460     writew(0xffff, iadev->reass_reg+REASS_MASK_REG);  
1461     writew(0, iadev->reass_reg+MODE_REG);  
1462     writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);  
1463   
1464     /* Receive side control memory map  
1465        -------------------------------  
1466   
1467         Buffer descr    0x0000 (736 - 23K)  
1468         VP Table    0x5c00 (256 - 512)  
1469         Except q    0x5e00 (128 - 512)  
1470         Free buffer q   0x6000 (1K - 2K)  
1471         Packet comp q   0x6800 (1K - 2K)  
1472         Reass Table 0x7000 (1K - 2K)  
1473         VC Table    0x7800 (1K - 2K)  
1474         ABR VC Table    0x8000 (1K - 32K)  
1475     */  
1476       
1477     /* Base address for Buffer Descriptor Table */  
1478     writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);  
1479     /* Set the buffer size register */  
1480     writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);  
1481   
1482     /* Initialize each entry in the Buffer Descriptor Table */  
1483         iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1484     buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1485     memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1486     buf_desc_ptr++;  
1487     rx_pkt_start = iadev->rx_pkt_ram;  
1488     for(i=1; i<=iadev->num_rx_desc; i++)  
1489     {  
1490         memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1491         buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;  
1492         buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;  
1493         buf_desc_ptr++;       
1494         rx_pkt_start += iadev->rx_buf_sz;  
1495     }  
1496     IF_INIT(printk("Rx Buffer desc ptr: 0x%p\n", buf_desc_ptr);)
1497         i = FREE_BUF_DESC_Q*iadev->memSize; 
1498     writew(i >> 16,  iadev->reass_reg+REASS_QUEUE_BASE); 
1499         writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1500         writew(i+iadev->num_rx_desc*sizeof(u_short), 
1501                                          iadev->reass_reg+FREEQ_ED_ADR);
1502         writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1503         writew(i+iadev->num_rx_desc*sizeof(u_short), 
1504                                         iadev->reass_reg+FREEQ_WR_PTR);    
1505     /* Fill the FREEQ with all the free descriptors. */  
1506     freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);  
1507     freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);  
1508     for(i=1; i<=iadev->num_rx_desc; i++)  
1509     {  
1510         *freeq_start = (u_short)i;  
1511         freeq_start++;  
1512     }  
1513     IF_INIT(printk("freeq_start: 0x%p\n", freeq_start);)
1514         /* Packet Complete Queue */
1515         i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1516         writew(i, iadev->reass_reg+PCQ_ST_ADR);
1517         writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1518         writew(i, iadev->reass_reg+PCQ_RD_PTR);
1519         writew(i, iadev->reass_reg+PCQ_WR_PTR);
1520 
1521         /* Exception Queue */
1522         i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1523         writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1524         writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q), 
1525                                              iadev->reass_reg+EXCP_Q_ED_ADR);
1526         writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1527         writew(i, iadev->reass_reg+EXCP_Q_WR_PTR); 
1528  
1529         /* Load local copy of FREEQ and PCQ ptrs */
1530         iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1531         iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1532     iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1533     iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1534         iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1535     iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1536     iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1537     iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1538     
1539         IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x", 
1540               iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd, 
1541               iadev->rfL.pcq_wr);)        
1542     /* just for check - no VP TBL */  
1543     /* VP Table */  
1544     /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */  
1545     /* initialize VP Table for invalid VPIs  
1546         - I guess we can write all 1s or 0x000f in the entire memory  
1547           space or something similar.  
1548     */  
1549   
1550     /* This seems to work and looks right to me too !!! */  
1551         i =  REASS_TABLE * iadev->memSize;
1552     writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);   
1553     /* initialize Reassembly table to I don't know what ???? */  
1554     reass_table = (u16 *)(iadev->reass_ram+i);  
1555         j = REASS_TABLE_SZ * iadev->memSize;
1556     for(i=0; i < j; i++)  
1557         *reass_table++ = NO_AAL5_PKT;  
1558        i = 8*1024;
1559        vcsize_sel =  0;
1560        while (i != iadev->num_vc) {
1561           i /= 2;
1562           vcsize_sel++;
1563        }
1564        i = RX_VC_TABLE * iadev->memSize;
1565        writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1566        vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);  
1567         j = RX_VC_TABLE_SZ * iadev->memSize;
1568     for(i = 0; i < j; i++)  
1569     {  
1570         /* shift the reassembly pointer by 3 + lower 3 bits of   
1571         vc_lkup_base register (=3 for 1K VCs) and the last byte   
1572         is those low 3 bits.   
1573         Shall program this later.  
1574         */  
1575         *vc_table = (i << 6) | 15;  /* for invalid VCI */  
1576         vc_table++;  
1577     }  
1578         /* ABR VC table */
1579         i =  ABR_VC_TABLE * iadev->memSize;
1580         writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1581                    
1582         i = ABR_VC_TABLE * iadev->memSize;
1583     abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);  
1584         j = REASS_TABLE_SZ * iadev->memSize;
1585         memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1586         for(i = 0; i < j; i++) {        
1587         abr_vc_table->rdf = 0x0003;
1588                 abr_vc_table->air = 0x5eb1;
1589             abr_vc_table++;     
1590         }  
1591 
1592     /* Initialize other registers */  
1593   
1594     /* VP Filter Register set for VC Reassembly only */  
1595     writew(0xff00, iadev->reass_reg+VP_FILTER);  
1596         writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1597     writew(0x1,  iadev->reass_reg+PROTOCOL_ID);
1598 
1599     /* Packet Timeout Count  related Registers : 
1600        Set packet timeout to occur in about 3 seconds
1601        Set Packet Aging Interval count register to overflow in about 4 us
1602     */  
1603         writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1604 
1605         i = (j >> 6) & 0xFF;
1606         j += 2 * (j - 1);
1607         i |= ((j << 2) & 0xFF00);
1608         writew(i, iadev->reass_reg+TMOUT_RANGE);
1609 
1610         /* initiate the desc_tble */
1611         for(i=0; i<iadev->num_tx_desc;i++)
1612             iadev->desc_tbl[i].timestamp = 0;
1613 
1614     /* to clear the interrupt status register - read it */  
1615     readw(iadev->reass_reg+REASS_INTR_STATUS_REG);   
1616   
1617     /* Mask Register - clear it */  
1618     writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);  
1619   
1620     skb_queue_head_init(&iadev->rx_dma_q);  
1621     iadev->rx_free_desc_qhead = NULL;   
1622 
1623     iadev->rx_open = kcalloc(iadev->num_vc, sizeof(void *), GFP_KERNEL);
1624     if (!iadev->rx_open) {
1625         printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1626         dev->number);  
1627         goto err_free_dle;
1628     }  
1629 
1630         iadev->rxing = 1;
1631         iadev->rx_pkt_cnt = 0;
1632     /* Mode Register */  
1633     writew(R_ONLINE, iadev->reass_reg+MODE_REG);  
1634     return 0;  
1635 
1636 err_free_dle:
1637     dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1638               iadev->rx_dle_dma);
1639 err_out:
1640     return -ENOMEM;
1641 }  
1642   
1643 
1644 /*  
1645     The memory map suggested in appendix A and the coding for it.   
1646     Keeping it around just in case we change our mind later.  
1647   
1648         Buffer descr    0x0000 (128 - 4K)  
1649         UBR sched   0x1000 (1K - 4K)  
1650         UBR Wait q  0x2000 (1K - 4K)  
1651         Commn queues    0x3000 Packet Ready, Trasmit comp(0x3100)  
1652                     (128 - 256) each  
1653         extended VC 0x4000 (1K - 8K)  
1654         ABR sched   0x6000  and ABR wait queue (1K - 2K) each  
1655         CBR sched   0x7000 (as needed)  
1656         VC table    0x8000 (1K - 32K)  
1657 */  
1658   
1659 static void tx_intr(struct atm_dev *dev)  
1660 {  
1661     IADEV *iadev;  
1662     unsigned short status;  
1663         unsigned long flags;
1664 
1665     iadev = INPH_IA_DEV(dev);  
1666   
1667     status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);  
1668         if (status & TRANSMIT_DONE){
1669 
1670            IF_EVENT(printk("Transmit Done Intr logic run\n");)
1671            spin_lock_irqsave(&iadev->tx_lock, flags);
1672            ia_tx_poll(iadev);
1673            spin_unlock_irqrestore(&iadev->tx_lock, flags);
1674            writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1675            if (iadev->close_pending)  
1676                wake_up(&iadev->close_wait);
1677         }         
1678     if (status & TCQ_NOT_EMPTY)  
1679     {  
1680         IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)  
1681     }  
1682 }  
1683   
1684 static void tx_dle_intr(struct atm_dev *dev)
1685 {
1686         IADEV *iadev;
1687         struct dle *dle, *cur_dle; 
1688         struct sk_buff *skb;
1689         struct atm_vcc *vcc;
1690         struct ia_vcc  *iavcc;
1691         u_int dle_lp;
1692         unsigned long flags;
1693 
1694         iadev = INPH_IA_DEV(dev);
1695         spin_lock_irqsave(&iadev->tx_lock, flags);   
1696         dle = iadev->tx_dle_q.read;
1697         dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) & 
1698                                         (sizeof(struct dle)*DLE_ENTRIES - 1);
1699         cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1700         while (dle != cur_dle)
1701         {
1702             /* free the DMAed skb */ 
1703             skb = skb_dequeue(&iadev->tx_dma_q); 
1704             if (!skb) break;
1705 
1706         /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1707         if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1708         dma_unmap_single(&iadev->pci->dev, dle->sys_pkt_addr, skb->len,
1709                  DMA_TO_DEVICE);
1710         }
1711             vcc = ATM_SKB(skb)->vcc;
1712             if (!vcc) {
1713                   printk("tx_dle_intr: vcc is null\n");
1714           spin_unlock_irqrestore(&iadev->tx_lock, flags);
1715                   dev_kfree_skb_any(skb);
1716 
1717                   return;
1718             }
1719             iavcc = INPH_IA_VCC(vcc);
1720             if (!iavcc) {
1721                   printk("tx_dle_intr: iavcc is null\n");
1722           spin_unlock_irqrestore(&iadev->tx_lock, flags);
1723                   dev_kfree_skb_any(skb);
1724                   return;
1725             }
1726             if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1727                if ((vcc->pop) && (skb->len != 0))
1728                {     
1729                  vcc->pop(vcc, skb);
1730                } 
1731                else {
1732                  dev_kfree_skb_any(skb);
1733                }
1734             }
1735             else { /* Hold the rate-limited skb for flow control */
1736                IA_SKB_STATE(skb) |= IA_DLED;
1737                skb_queue_tail(&iavcc->txing_skb, skb);
1738             }
1739             IF_EVENT(printk("tx_dle_intr: enque skb = 0x%p \n", skb);)
1740             if (++dle == iadev->tx_dle_q.end)
1741                  dle = iadev->tx_dle_q.start;
1742         }
1743         iadev->tx_dle_q.read = dle;
1744         spin_unlock_irqrestore(&iadev->tx_lock, flags);
1745 }
1746   
1747 static int open_tx(struct atm_vcc *vcc)  
1748 {  
1749     struct ia_vcc *ia_vcc;  
1750     IADEV *iadev;  
1751     struct main_vc *vc;  
1752     struct ext_vc *evc;  
1753         int ret;
1754     IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)  
1755     if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;  
1756     iadev = INPH_IA_DEV(vcc->dev);  
1757         
1758         if (iadev->phy_type & FE_25MBIT_PHY) {
1759            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1760                printk("IA:  ABR not support\n");
1761                return -EINVAL; 
1762            }
1763       if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1764                printk("IA:  CBR not support\n");
1765                return -EINVAL; 
1766           }
1767         }
1768         ia_vcc =  INPH_IA_VCC(vcc);
1769         memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1770         if (vcc->qos.txtp.max_sdu > 
1771                          (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1772            printk("IA:  SDU size over (%d) the configured SDU size %d\n",
1773           vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1774        vcc->dev_data = NULL;
1775            kfree(ia_vcc);
1776            return -EINVAL; 
1777         }
1778     ia_vcc->vc_desc_cnt = 0;
1779         ia_vcc->txing = 1;
1780 
1781         /* find pcr */
1782         if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR) 
1783            vcc->qos.txtp.pcr = iadev->LineRate;
1784         else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1785            vcc->qos.txtp.pcr = iadev->LineRate;
1786         else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0)) 
1787            vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1788         if (vcc->qos.txtp.pcr > iadev->LineRate)
1789              vcc->qos.txtp.pcr = iadev->LineRate;
1790         ia_vcc->pcr = vcc->qos.txtp.pcr;
1791 
1792         if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1793         else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1794         else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1795         else ia_vcc->ltimeout = 2700 * HZ  / ia_vcc->pcr;
1796         if (ia_vcc->pcr < iadev->rate_limit)
1797            skb_queue_head_init (&ia_vcc->txing_skb);
1798         if (ia_vcc->pcr < iadev->rate_limit) {
1799        struct sock *sk = sk_atm(vcc);
1800 
1801        if (vcc->qos.txtp.max_sdu != 0) {
1802                if (ia_vcc->pcr > 60000)
1803                   sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1804                else if (ia_vcc->pcr > 2000)
1805                   sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1806                else
1807                  sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1808            }
1809            else
1810              sk->sk_sndbuf = 24576;
1811         }
1812            
1813     vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;  
1814     evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;  
1815     vc += vcc->vci;  
1816     evc += vcc->vci;  
1817     memset((caddr_t)vc, 0, sizeof(*vc));  
1818     memset((caddr_t)evc, 0, sizeof(*evc));  
1819       
1820     /* store the most significant 4 bits of vci as the last 4 bits   
1821         of first part of atm header.  
1822        store the last 12 bits of vci as first 12 bits of the second  
1823         part of the atm header.  
1824     */  
1825     evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;  
1826     evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;  
1827  
1828     /* check the following for different traffic classes */  
1829     if (vcc->qos.txtp.traffic_class == ATM_UBR)  
1830     {  
1831         vc->type = UBR;  
1832                 vc->status = CRC_APPEND;
1833         vc->acr = cellrate_to_float(iadev->LineRate);  
1834                 if (vcc->qos.txtp.pcr > 0) 
1835                    vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);  
1836                 IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n", 
1837                                              vcc->qos.txtp.max_pcr,vc->acr);)
1838     }  
1839     else if (vcc->qos.txtp.traffic_class == ATM_ABR)  
1840     {       srv_cls_param_t srv_p;
1841         IF_ABR(printk("Tx ABR VCC\n");)  
1842                 init_abr_vc(iadev, &srv_p);
1843                 if (vcc->qos.txtp.pcr > 0) 
1844                    srv_p.pcr = vcc->qos.txtp.pcr;
1845                 if (vcc->qos.txtp.min_pcr > 0) {
1846                    int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1847                    if (tmpsum > iadev->LineRate)
1848                        return -EBUSY;
1849                    srv_p.mcr = vcc->qos.txtp.min_pcr;
1850                    iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1851                 } 
1852                 else srv_p.mcr = 0;
1853                 if (vcc->qos.txtp.icr)
1854                    srv_p.icr = vcc->qos.txtp.icr;
1855                 if (vcc->qos.txtp.tbe)
1856                    srv_p.tbe = vcc->qos.txtp.tbe;
1857                 if (vcc->qos.txtp.frtt)
1858                    srv_p.frtt = vcc->qos.txtp.frtt;
1859                 if (vcc->qos.txtp.rif)
1860                    srv_p.rif = vcc->qos.txtp.rif;
1861                 if (vcc->qos.txtp.rdf)
1862                    srv_p.rdf = vcc->qos.txtp.rdf;
1863                 if (vcc->qos.txtp.nrm_pres)
1864                    srv_p.nrm = vcc->qos.txtp.nrm;
1865                 if (vcc->qos.txtp.trm_pres)
1866                    srv_p.trm = vcc->qos.txtp.trm;
1867                 if (vcc->qos.txtp.adtf_pres)
1868                    srv_p.adtf = vcc->qos.txtp.adtf;
1869                 if (vcc->qos.txtp.cdf_pres)
1870                    srv_p.cdf = vcc->qos.txtp.cdf;    
1871                 if (srv_p.icr > srv_p.pcr)
1872                    srv_p.icr = srv_p.pcr;    
1873                 IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d  mcr = %d\n", 
1874                                                       srv_p.pcr, srv_p.mcr);)
1875         ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1876     } else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1877                 if (iadev->phy_type & FE_25MBIT_PHY) {
1878                     printk("IA:  CBR not support\n");
1879                     return -EINVAL; 
1880                 }
1881                 if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1882                    IF_CBR(printk("PCR is not available\n");)
1883                    return -1;
1884                 }
1885                 vc->type = CBR;
1886                 vc->status = CRC_APPEND;
1887                 if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {     
1888                     return ret;
1889                 }
1890     } else {
1891         printk("iadev:  Non UBR, ABR and CBR traffic not supported\n");
1892     }
1893         
1894         iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1895     IF_EVENT(printk("ia open_tx returning \n");)  
1896     return 0;  
1897 }  
1898   
1899   
1900 static int tx_init(struct atm_dev *dev)  
1901 {  
1902     IADEV *iadev;  
1903     struct tx_buf_desc *buf_desc_ptr;
1904     unsigned int tx_pkt_start;  
1905     void *dle_addr;  
1906     int i;  
1907     u_short tcq_st_adr;  
1908     u_short *tcq_start;  
1909     u_short prq_st_adr;  
1910     u_short *prq_start;  
1911     struct main_vc *vc;  
1912     struct ext_vc *evc;   
1913         u_short tmp16;
1914         u32 vcsize_sel;
1915  
1916     iadev = INPH_IA_DEV(dev);  
1917         spin_lock_init(&iadev->tx_lock);
1918  
1919     IF_INIT(printk("Tx MASK REG: 0x%0x\n", 
1920                                 readw(iadev->seg_reg+SEG_MASK_REG));)  
1921 
1922     /* Allocate 4k (boundary aligned) bytes */
1923     dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
1924                       &iadev->tx_dle_dma, GFP_KERNEL);
1925     if (!dle_addr)  {
1926         printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1927         goto err_out;
1928     }
1929     iadev->tx_dle_q.start = (struct dle*)dle_addr;  
1930     iadev->tx_dle_q.read = iadev->tx_dle_q.start;  
1931     iadev->tx_dle_q.write = iadev->tx_dle_q.start;  
1932     iadev->tx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1933 
1934     /* write the upper 20 bits of the start address to tx list address register */  
1935     writel(iadev->tx_dle_dma & 0xfffff000,
1936            iadev->dma + IPHASE5575_TX_LIST_ADDR);  
1937     writew(0xffff, iadev->seg_reg+SEG_MASK_REG);  
1938     writew(0, iadev->seg_reg+MODE_REG_0);  
1939     writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);  
1940         iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1941         iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1942         iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1943   
1944     /*  
1945        Transmit side control memory map  
1946        --------------------------------    
1947      Buffer descr   0x0000 (128 - 4K)  
1948      Commn queues   0x1000  Transmit comp, Packet ready(0x1400)   
1949                     (512 - 1K) each  
1950                     TCQ - 4K, PRQ - 5K  
1951      CBR Table  0x1800 (as needed) - 6K  
1952      UBR Table  0x3000 (1K - 4K) - 12K  
1953      UBR Wait queue 0x4000 (1K - 4K) - 16K  
1954      ABR sched  0x5000  and ABR wait queue (1K - 2K) each  
1955                 ABR Tbl - 20K, ABR Wq - 22K   
1956      extended VC    0x6000 (1K - 8K) - 24K  
1957      VC Table   0x8000 (1K - 32K) - 32K  
1958       
1959     Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl  
1960     and Wait q, which can be allotted later.  
1961     */  
1962      
1963     /* Buffer Descriptor Table Base address */  
1964     writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);  
1965   
1966     /* initialize each entry in the buffer descriptor table */  
1967     buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);  
1968     memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1969     buf_desc_ptr++;  
1970     tx_pkt_start = TX_PACKET_RAM;  
1971     for(i=1; i<=iadev->num_tx_desc; i++)  
1972     {  
1973         memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1974         buf_desc_ptr->desc_mode = AAL5;  
1975         buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;  
1976         buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;  
1977         buf_desc_ptr++;       
1978         tx_pkt_start += iadev->tx_buf_sz;  
1979     }  
1980     iadev->tx_buf = kmalloc_array(iadev->num_tx_desc,
1981                       sizeof(*iadev->tx_buf),
1982                       GFP_KERNEL);
1983         if (!iadev->tx_buf) {
1984             printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1985         goto err_free_dle;
1986         }
1987         for (i= 0; i< iadev->num_tx_desc; i++)
1988         {
1989         struct cpcs_trailer *cpcs;
1990  
1991             cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
1992             if(!cpcs) {                
1993         printk(KERN_ERR DEV_LABEL " couldn't get freepage\n"); 
1994         goto err_free_tx_bufs;
1995             }
1996         iadev->tx_buf[i].cpcs = cpcs;
1997         iadev->tx_buf[i].dma_addr = dma_map_single(&iadev->pci->dev,
1998                                cpcs,
1999                                sizeof(*cpcs),
2000                                DMA_TO_DEVICE);
2001         }
2002     iadev->desc_tbl = kmalloc_array(iadev->num_tx_desc,
2003                     sizeof(*iadev->desc_tbl),
2004                     GFP_KERNEL);
2005     if (!iadev->desc_tbl) {
2006         printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
2007         goto err_free_all_tx_bufs;
2008     }
2009   
2010     /* Communication Queues base address */  
2011         i = TX_COMP_Q * iadev->memSize;
2012     writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);  
2013   
2014     /* Transmit Complete Queue */  
2015     writew(i, iadev->seg_reg+TCQ_ST_ADR);  
2016     writew(i, iadev->seg_reg+TCQ_RD_PTR);  
2017     writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR); 
2018     iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
2019         writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
2020                                               iadev->seg_reg+TCQ_ED_ADR); 
2021     /* Fill the TCQ with all the free descriptors. */  
2022     tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);  
2023     tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);  
2024     for(i=1; i<=iadev->num_tx_desc; i++)  
2025     {  
2026         *tcq_start = (u_short)i;  
2027         tcq_start++;  
2028     }  
2029   
2030     /* Packet Ready Queue */  
2031         i = PKT_RDY_Q * iadev->memSize; 
2032     writew(i, iadev->seg_reg+PRQ_ST_ADR);  
2033     writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
2034                                               iadev->seg_reg+PRQ_ED_ADR);
2035     writew(i, iadev->seg_reg+PRQ_RD_PTR);  
2036     writew(i, iadev->seg_reg+PRQ_WR_PTR);  
2037      
2038         /* Load local copy of PRQ and TCQ ptrs */
2039         iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2040     iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2041     iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2042 
2043     iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2044     iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2045     iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2046 
2047     /* Just for safety initializing the queue to have desc 1 always */  
2048     /* Fill the PRQ with all the free descriptors. */  
2049     prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);  
2050     prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);  
2051     for(i=1; i<=iadev->num_tx_desc; i++)  
2052     {  
2053         *prq_start = (u_short)0;    /* desc 1 in all entries */  
2054         prq_start++;  
2055     }  
2056     /* CBR Table */  
2057         IF_INIT(printk("Start CBR Init\n");)
2058 #if 1  /* for 1K VC board, CBR_PTR_BASE is 0 */
2059         writew(0,iadev->seg_reg+CBR_PTR_BASE);
2060 #else /* Charlie's logic is wrong ? */
2061         tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2062         IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2063         writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2064 #endif
2065 
2066         IF_INIT(printk("value in register = 0x%x\n",
2067                                    readw(iadev->seg_reg+CBR_PTR_BASE));)
2068         tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2069         writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2070         IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2071                                         readw(iadev->seg_reg+CBR_TAB_BEG));)
2072         writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2073         tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2074         writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2075         IF_INIT(printk("iadev->seg_reg = 0x%p CBR_PTR_BASE = 0x%x\n",
2076                iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2077         IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2078           readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2079           readw(iadev->seg_reg+CBR_TAB_END+1));)
2080 
2081         /* Initialize the CBR Schedualing Table */
2082         memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize, 
2083                                                           0, iadev->num_vc*6); 
2084         iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2085         iadev->CbrEntryPt = 0;
2086         iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2087         iadev->NumEnabledCBR = 0;
2088 
2089     /* UBR scheduling Table and wait queue */  
2090     /* initialize all bytes of UBR scheduler table and wait queue to 0   
2091         - SCHEDSZ is 1K (# of entries).  
2092         - UBR Table size is 4K  
2093         - UBR wait queue is 4K  
2094        since the table and wait queues are contiguous, all the bytes   
2095        can be initialized by one memeset.
2096     */  
2097         
2098         vcsize_sel = 0;
2099         i = 8*1024;
2100         while (i != iadev->num_vc) {
2101           i /= 2;
2102           vcsize_sel++;
2103         }
2104  
2105         i = MAIN_VC_TABLE * iadev->memSize;
2106         writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2107         i =  EXT_VC_TABLE * iadev->memSize;
2108         writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2109         i = UBR_SCHED_TABLE * iadev->memSize;
2110         writew((i & 0xffff) >> 11,  iadev->seg_reg+UBR_SBPTR_BASE);
2111         i = UBR_WAIT_Q * iadev->memSize; 
2112         writew((i >> 7) & 0xffff,  iadev->seg_reg+UBRWQ_BASE);
2113     memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2114                                                        0, iadev->num_vc*8);
2115     /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/  
2116     /* initialize all bytes of ABR scheduler table and wait queue to 0   
2117         - SCHEDSZ is 1K (# of entries).  
2118         - ABR Table size is 2K  
2119         - ABR wait queue is 2K  
2120        since the table and wait queues are contiguous, all the bytes   
2121        can be initialized by one memeset.
2122     */  
2123         i = ABR_SCHED_TABLE * iadev->memSize;
2124         writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2125         i = ABR_WAIT_Q * iadev->memSize;
2126         writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2127  
2128         i = ABR_SCHED_TABLE*iadev->memSize;
2129     memset((caddr_t)(iadev->seg_ram+i),  0, iadev->num_vc*4);
2130     vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;  
2131     evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;  
2132     iadev->testTable = kmalloc_array(iadev->num_vc,
2133                      sizeof(*iadev->testTable),
2134                      GFP_KERNEL);
2135         if (!iadev->testTable) {
2136            printk("Get freepage  failed\n");
2137        goto err_free_desc_tbl;
2138         }
2139     for(i=0; i<iadev->num_vc; i++)  
2140     {  
2141         memset((caddr_t)vc, 0, sizeof(*vc));  
2142         memset((caddr_t)evc, 0, sizeof(*evc));  
2143                 iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2144                         GFP_KERNEL);
2145         if (!iadev->testTable[i])
2146             goto err_free_test_tables;
2147                 iadev->testTable[i]->lastTime = 0;
2148         iadev->testTable[i]->fract = 0;
2149                 iadev->testTable[i]->vc_status = VC_UBR;
2150         vc++;  
2151         evc++;  
2152     }  
2153   
2154     /* Other Initialization */  
2155       
2156     /* Max Rate Register */  
2157         if (iadev->phy_type & FE_25MBIT_PHY) {
2158        writew(RATE25, iadev->seg_reg+MAXRATE);  
2159        writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
2160         }
2161         else {
2162        writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2163        writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
2164         }
2165     /* Set Idle Header Reigisters to be sure */  
2166     writew(0, iadev->seg_reg+IDLEHEADHI);  
2167     writew(0, iadev->seg_reg+IDLEHEADLO);  
2168   
2169     /* Program ABR UBR Priority Register  as  PRI_ABR_UBR_EQUAL */
2170         writew(0xaa00, iadev->seg_reg+ABRUBR_ARB); 
2171 
2172         iadev->close_pending = 0;
2173         init_waitqueue_head(&iadev->close_wait);
2174         init_waitqueue_head(&iadev->timeout_wait);
2175     skb_queue_head_init(&iadev->tx_dma_q);  
2176     ia_init_rtn_q(&iadev->tx_return_q);  
2177 
2178     /* RM Cell Protocol ID and Message Type */  
2179     writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);  
2180         skb_queue_head_init (&iadev->tx_backlog);
2181   
2182     /* Mode Register 1 */  
2183     writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);  
2184   
2185     /* Mode Register 0 */  
2186     writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);  
2187   
2188     /* Interrupt Status Register - read to clear */  
2189     readw(iadev->seg_reg+SEG_INTR_STATUS_REG);  
2190   
2191     /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */  
2192         writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2193         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);  
2194         iadev->tx_pkt_cnt = 0;
2195         iadev->rate_limit = iadev->LineRate / 3;
2196   
2197     return 0;
2198 
2199 err_free_test_tables:
2200     while (--i >= 0)
2201         kfree(iadev->testTable[i]);
2202     kfree(iadev->testTable);
2203 err_free_desc_tbl:
2204     kfree(iadev->desc_tbl);
2205 err_free_all_tx_bufs:
2206     i = iadev->num_tx_desc;
2207 err_free_tx_bufs:
2208     while (--i >= 0) {
2209         struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2210 
2211         dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
2212                  sizeof(*desc->cpcs), DMA_TO_DEVICE);
2213         kfree(desc->cpcs);
2214     }
2215     kfree(iadev->tx_buf);
2216 err_free_dle:
2217     dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2218               iadev->tx_dle_dma);
2219 err_out:
2220     return -ENOMEM;
2221 }   
2222    
2223 static irqreturn_t ia_int(int irq, void *dev_id)  
2224 {  
2225    struct atm_dev *dev;  
2226    IADEV *iadev;  
2227    unsigned int status;  
2228    int handled = 0;
2229 
2230    dev = dev_id;  
2231    iadev = INPH_IA_DEV(dev);  
2232    while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))  
2233    { 
2234     handled = 1;
2235         IF_EVENT(printk("ia_int: status = 0x%x\n", status);) 
2236     if (status & STAT_REASSINT)  
2237     {  
2238        /* do something */  
2239        IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);) 
2240        rx_intr(dev);  
2241     }  
2242     if (status & STAT_DLERINT)  
2243     {  
2244        /* Clear this bit by writing a 1 to it. */  
2245        writel(STAT_DLERINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2246        rx_dle_intr(dev);  
2247     }  
2248     if (status & STAT_SEGINT)  
2249     {  
2250        /* do something */ 
2251            IF_EVENT(printk("IA: tx_intr \n");) 
2252        tx_intr(dev);  
2253     }  
2254     if (status & STAT_DLETINT)  
2255     {  
2256        writel(STAT_DLETINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2257        tx_dle_intr(dev);  
2258     }  
2259     if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))  
2260     {  
2261            if (status & STAT_FEINT) 
2262                ia_frontend_intr(iadev);
2263     }  
2264    }
2265    return IRQ_RETVAL(handled);
2266 }  
2267       
2268       
2269       
2270 /*----------------------------- entries --------------------------------*/  
2271 static int get_esi(struct atm_dev *dev)  
2272 {  
2273     IADEV *iadev;  
2274     int i;  
2275     u32 mac1;  
2276     u16 mac2;  
2277       
2278     iadev = INPH_IA_DEV(dev);  
2279     mac1 = cpu_to_be32(le32_to_cpu(readl(  
2280                 iadev->reg+IPHASE5575_MAC1)));  
2281     mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));  
2282     IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)  
2283     for (i=0; i<MAC1_LEN; i++)  
2284         dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));  
2285       
2286     for (i=0; i<MAC2_LEN; i++)  
2287         dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));  
2288     return 0;  
2289 }  
2290       
2291 static int reset_sar(struct atm_dev *dev)  
2292 {  
2293     IADEV *iadev;  
2294     int i, error = 1;  
2295     unsigned int pci[64];  
2296       
2297     iadev = INPH_IA_DEV(dev);  
2298     for(i=0; i<64; i++)  
2299       if ((error = pci_read_config_dword(iadev->pci,  
2300                 i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)  
2301           return error;  
2302     writel(0, iadev->reg+IPHASE5575_EXT_RESET);  
2303     for(i=0; i<64; i++)  
2304       if ((error = pci_write_config_dword(iadev->pci,  
2305                     i*4, pci[i])) != PCIBIOS_SUCCESSFUL)  
2306         return error;  
2307     udelay(5);  
2308     return 0;  
2309 }  
2310       
2311       
2312 static int ia_init(struct atm_dev *dev)
2313 {  
2314     IADEV *iadev;  
2315     unsigned long real_base;
2316     void __iomem *base;
2317     unsigned short command;  
2318     int error, i; 
2319       
2320     /* The device has been identified and registered. Now we read   
2321        necessary configuration info like memory base address,   
2322        interrupt number etc */  
2323       
2324     IF_INIT(printk(">ia_init\n");)  
2325     dev->ci_range.vpi_bits = 0;  
2326     dev->ci_range.vci_bits = NR_VCI_LD;  
2327 
2328     iadev = INPH_IA_DEV(dev);  
2329     real_base = pci_resource_start (iadev->pci, 0);
2330     iadev->irq = iadev->pci->irq;
2331           
2332     error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
2333     if (error) {
2334         printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",  
2335                 dev->number,error);  
2336         return -EINVAL;  
2337     }  
2338     IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",  
2339             dev->number, iadev->pci->revision, real_base, iadev->irq);)
2340       
2341     /* find mapping size of board */  
2342       
2343     iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2344 
2345         if (iadev->pci_map_size == 0x100000){
2346           iadev->num_vc = 4096;
2347       dev->ci_range.vci_bits = NR_VCI_4K_LD;  
2348           iadev->memSize = 4;
2349         }
2350         else if (iadev->pci_map_size == 0x40000) {
2351           iadev->num_vc = 1024;
2352           iadev->memSize = 1;
2353         }
2354         else {
2355            printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2356            return -EINVAL;
2357         }
2358     IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)  
2359       
2360     /* enable bus mastering */
2361     pci_set_master(iadev->pci);
2362 
2363     /*  
2364      * Delay at least 1us before doing any mem accesses (how 'bout 10?)  
2365      */  
2366     udelay(10);  
2367       
2368     /* mapping the physical address to a virtual address in address space */  
2369     base = ioremap(real_base,iadev->pci_map_size);  /* ioremap is not resolved ??? */  
2370       
2371     if (!base)  
2372     {  
2373         printk(DEV_LABEL " (itf %d): can't set up page mapping\n",  
2374                 dev->number);  
2375         return -ENOMEM;
2376     }  
2377     IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",  
2378             dev->number, iadev->pci->revision, base, iadev->irq);)
2379       
2380     /* filling the iphase dev structure */  
2381     iadev->mem = iadev->pci_map_size /2;  
2382     iadev->real_base = real_base;  
2383     iadev->base = base;  
2384           
2385     /* Bus Interface Control Registers */  
2386     iadev->reg = base + REG_BASE;
2387     /* Segmentation Control Registers */  
2388     iadev->seg_reg = base + SEG_BASE;
2389     /* Reassembly Control Registers */  
2390     iadev->reass_reg = base + REASS_BASE;  
2391     /* Front end/ DMA control registers */  
2392     iadev->phy = base + PHY_BASE;  
2393     iadev->dma = base + PHY_BASE;  
2394     /* RAM - Segmentation RAm and Reassembly RAM */  
2395     iadev->ram = base + ACTUAL_RAM_BASE;  
2396     iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;  
2397     iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;  
2398   
2399     /* lets print out the above */  
2400     IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n", 
2401           iadev->reg,iadev->seg_reg,iadev->reass_reg, 
2402           iadev->phy, iadev->ram, iadev->seg_ram, 
2403           iadev->reass_ram);) 
2404       
2405     /* lets try reading the MAC address */  
2406     error = get_esi(dev);  
2407     if (error) {
2408       iounmap(iadev->base);
2409       return error;  
2410     }
2411         printk("IA: ");
2412     for (i=0; i < ESI_LEN; i++)  
2413                 printk("%s%02X",i ? "-" : "",dev->esi[i]);  
2414         printk("\n");  
2415   
2416         /* reset SAR */  
2417         if (reset_sar(dev)) {
2418        iounmap(iadev->base);
2419            printk("IA: reset SAR fail, please try again\n");
2420            return 1;
2421         }
2422     return 0;  
2423 }  
2424 
2425 static void ia_update_stats(IADEV *iadev) {
2426     if (!iadev->carrier_detect)
2427         return;
2428     iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2429     iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2430     iadev->drop_rxpkt +=  readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2431     iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2432     iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2433     iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2434     return;
2435 }
2436   
2437 static void ia_led_timer(struct timer_list *unused) {
2438     unsigned long flags;
2439     static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2440         u_char i;
2441         static u32 ctrl_reg; 
2442         for (i = 0; i < iadev_count; i++) {
2443            if (ia_dev[i]) {
2444           ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2445           if (blinking[i] == 0) {
2446          blinking[i]++;
2447                  ctrl_reg &= (~CTRL_LED);
2448                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2449                  ia_update_stats(ia_dev[i]);
2450               }
2451               else {
2452          blinking[i] = 0;
2453          ctrl_reg |= CTRL_LED;
2454                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2455                  spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2456                  if (ia_dev[i]->close_pending)  
2457                     wake_up(&ia_dev[i]->close_wait);
2458                  ia_tx_poll(ia_dev[i]);
2459                  spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2460               }
2461            }
2462         }
2463     mod_timer(&ia_timer, jiffies + HZ / 4);
2464     return;
2465 }
2466 
2467 static void ia_phy_put(struct atm_dev *dev, unsigned char value,   
2468     unsigned long addr)  
2469 {  
2470     writel(value, INPH_IA_DEV(dev)->phy+addr);  
2471 }  
2472   
2473 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)  
2474 {  
2475     return readl(INPH_IA_DEV(dev)->phy+addr);  
2476 }  
2477 
2478 static void ia_free_tx(IADEV *iadev)
2479 {
2480     int i;
2481 
2482     kfree(iadev->desc_tbl);
2483     for (i = 0; i < iadev->num_vc; i++)
2484         kfree(iadev->testTable[i]);
2485     kfree(iadev->testTable);
2486     for (i = 0; i < iadev->num_tx_desc; i++) {
2487         struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2488 
2489         dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
2490                  sizeof(*desc->cpcs), DMA_TO_DEVICE);
2491         kfree(desc->cpcs);
2492     }
2493     kfree(iadev->tx_buf);
2494     dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2495               iadev->tx_dle_dma);
2496 }
2497 
2498 static void ia_free_rx(IADEV *iadev)
2499 {
2500     kfree(iadev->rx_open);
2501     dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2502               iadev->rx_dle_dma);
2503 }
2504 
2505 static int ia_start(struct atm_dev *dev)
2506 {  
2507     IADEV *iadev;  
2508     int error;  
2509     unsigned char phy;  
2510     u32 ctrl_reg;  
2511     IF_EVENT(printk(">ia_start\n");)  
2512     iadev = INPH_IA_DEV(dev);  
2513         if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) {
2514                 printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",  
2515                     dev->number, iadev->irq);  
2516         error = -EAGAIN;
2517         goto err_out;
2518         }  
2519         /* @@@ should release IRQ on error */  
2520     /* enabling memory + master */  
2521         if ((error = pci_write_config_word(iadev->pci,   
2522                 PCI_COMMAND,   
2523                 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))   
2524     {  
2525                 printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"  
2526                     "master (0x%x)\n",dev->number, error);  
2527         error = -EIO;  
2528         goto err_free_irq;
2529         }  
2530     udelay(10);  
2531   
2532     /* Maybe we should reset the front end, initialize Bus Interface Control   
2533         Registers and see. */  
2534   
2535     IF_INIT(printk("Bus ctrl reg: %08x\n", 
2536                             readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)  
2537     ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);  
2538     ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))  
2539             | CTRL_B8  
2540             | CTRL_B16  
2541             | CTRL_B32  
2542             | CTRL_B48  
2543             | CTRL_B64  
2544             | CTRL_B128  
2545             | CTRL_ERRMASK  
2546             | CTRL_DLETMASK     /* shud be removed l8r */  
2547             | CTRL_DLERMASK  
2548             | CTRL_SEGMASK  
2549             | CTRL_REASSMASK      
2550             | CTRL_FEMASK  
2551             | CTRL_CSPREEMPT;  
2552   
2553        writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);   
2554   
2555     IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", 
2556                            readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));  
2557        printk("Bus status reg after init: %08x\n", 
2558                             readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)  
2559     
2560         ia_hw_type(iadev); 
2561     error = tx_init(dev);  
2562     if (error)
2563         goto err_free_irq;
2564     error = rx_init(dev);  
2565     if (error)
2566         goto err_free_tx;
2567   
2568     ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);  
2569         writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);   
2570     IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", 
2571                                readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)  
2572         phy = 0; /* resolve compiler complaint */
2573         IF_INIT ( 
2574     if ((phy=ia_phy_get(dev,0)) == 0x30)  
2575         printk("IA: pm5346,rev.%d\n",phy&0x0f);  
2576     else  
2577         printk("IA: utopia,rev.%0x\n",phy);) 
2578 
2579     if (iadev->phy_type &  FE_25MBIT_PHY)
2580            ia_mb25_init(iadev);
2581     else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2582            ia_suni_pm7345_init(iadev);
2583     else {
2584         error = suni_init(dev);
2585         if (error)
2586             goto err_free_rx;
2587         if (dev->phy->start) {
2588             error = dev->phy->start(dev);
2589             if (error)
2590                 goto err_free_rx;
2591         }
2592         /* Get iadev->carrier_detect status */
2593         ia_frontend_intr(iadev);
2594     }
2595     return 0;
2596 
2597 err_free_rx:
2598     ia_free_rx(iadev);
2599 err_free_tx:
2600     ia_free_tx(iadev);
2601 err_free_irq:
2602     free_irq(iadev->irq, dev);  
2603 err_out:
2604     return error;
2605 }  
2606   
2607 static void ia_close(struct atm_vcc *vcc)  
2608 {
2609     DEFINE_WAIT(wait);
2610         u16 *vc_table;
2611         IADEV *iadev;
2612         struct ia_vcc *ia_vcc;
2613         struct sk_buff *skb = NULL;
2614         struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2615         unsigned long closetime, flags;
2616 
2617         iadev = INPH_IA_DEV(vcc->dev);
2618         ia_vcc = INPH_IA_VCC(vcc);
2619     if (!ia_vcc) return;  
2620 
2621         IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d  vci = %d\n", 
2622                                               ia_vcc->vc_desc_cnt,vcc->vci);)
2623     clear_bit(ATM_VF_READY,&vcc->flags);
2624         skb_queue_head_init (&tmp_tx_backlog);
2625         skb_queue_head_init (&tmp_vcc_backlog); 
2626         if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2627            iadev->close_pending++;
2628        prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
2629        schedule_timeout(msecs_to_jiffies(500));
2630        finish_wait(&iadev->timeout_wait, &wait);
2631            spin_lock_irqsave(&iadev->tx_lock, flags); 
2632            while((skb = skb_dequeue(&iadev->tx_backlog))) {
2633               if (ATM_SKB(skb)->vcc == vcc){ 
2634                  if (vcc->pop) vcc->pop(vcc, skb);
2635                  else dev_kfree_skb_any(skb);
2636               }
2637               else 
2638                  skb_queue_tail(&tmp_tx_backlog, skb);
2639            } 
2640            while((skb = skb_dequeue(&tmp_tx_backlog))) 
2641              skb_queue_tail(&iadev->tx_backlog, skb);
2642            IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);) 
2643            closetime = 300000 / ia_vcc->pcr;
2644            if (closetime == 0)
2645               closetime = 1;
2646            spin_unlock_irqrestore(&iadev->tx_lock, flags);
2647            wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
2648            spin_lock_irqsave(&iadev->tx_lock, flags);
2649            iadev->close_pending--;
2650            iadev->testTable[vcc->vci]->lastTime = 0;
2651            iadev->testTable[vcc->vci]->fract = 0; 
2652            iadev->testTable[vcc->vci]->vc_status = VC_UBR; 
2653            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2654               if (vcc->qos.txtp.min_pcr > 0)
2655                  iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2656            }
2657            if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2658               ia_vcc = INPH_IA_VCC(vcc); 
2659               iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2660               ia_cbrVc_close (vcc);
2661            }
2662            spin_unlock_irqrestore(&iadev->tx_lock, flags);
2663         }
2664         
2665         if (vcc->qos.rxtp.traffic_class != ATM_NONE) {   
2666            // reset reass table
2667            vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2668            vc_table += vcc->vci; 
2669            *vc_table = NO_AAL5_PKT;
2670            // reset vc table
2671            vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2672            vc_table += vcc->vci;
2673            *vc_table = (vcc->vci << 6) | 15;
2674            if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2675               struct abr_vc_table __iomem *abr_vc_table = 
2676                                 (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2677               abr_vc_table +=  vcc->vci;
2678               abr_vc_table->rdf = 0x0003;
2679               abr_vc_table->air = 0x5eb1;
2680            }                                 
2681            // Drain the packets
2682            rx_dle_intr(vcc->dev); 
2683            iadev->rx_open[vcc->vci] = NULL;
2684         }
2685     kfree(INPH_IA_VCC(vcc));  
2686         ia_vcc = NULL;
2687         vcc->dev_data = NULL;
2688         clear_bit(ATM_VF_ADDR,&vcc->flags);
2689         return;        
2690 }  
2691   
2692 static int ia_open(struct atm_vcc *vcc)
2693 {  
2694     struct ia_vcc *ia_vcc;  
2695     int error;  
2696     if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))  
2697     {  
2698         IF_EVENT(printk("ia: not partially allocated resources\n");)  
2699         vcc->dev_data = NULL;
2700     }  
2701     if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)  
2702     {  
2703         IF_EVENT(printk("iphase open: unspec part\n");)  
2704         set_bit(ATM_VF_ADDR,&vcc->flags);
2705     }  
2706     if (vcc->qos.aal != ATM_AAL5)  
2707         return -EINVAL;  
2708     IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n", 
2709                                  vcc->dev->number, vcc->vpi, vcc->vci);)  
2710   
2711     /* Device dependent initialization */  
2712     ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);  
2713     if (!ia_vcc) return -ENOMEM;  
2714     vcc->dev_data = ia_vcc;
2715   
2716     if ((error = open_rx(vcc)))  
2717     {  
2718         IF_EVENT(printk("iadev: error in open_rx, closing\n");)  
2719         ia_close(vcc);  
2720         return error;  
2721     }  
2722   
2723     if ((error = open_tx(vcc)))  
2724     {  
2725         IF_EVENT(printk("iadev: error in open_tx, closing\n");)  
2726         ia_close(vcc);  
2727         return error;  
2728     }  
2729   
2730     set_bit(ATM_VF_READY,&vcc->flags);
2731 
2732 #if 0
2733         {
2734            static u8 first = 1; 
2735            if (first) {
2736               ia_timer.expires = jiffies + 3*HZ;
2737               add_timer(&ia_timer);
2738               first = 0;
2739            }           
2740         }
2741 #endif
2742     IF_EVENT(printk("ia open returning\n");)  
2743     return 0;  
2744 }  
2745   
2746 static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)  
2747 {  
2748     IF_EVENT(printk(">ia_change_qos\n");)  
2749     return 0;  
2750 }  
2751   
2752 static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)  
2753 {  
2754    IA_CMDBUF ia_cmds;
2755    IADEV *iadev;
2756    int i, board;
2757    u16 __user *tmps;
2758    IF_EVENT(printk(">ia_ioctl\n");)  
2759    if (cmd != IA_CMD) {
2760       if (!dev->phy->ioctl) return -EINVAL;
2761       return dev->phy->ioctl(dev,cmd,arg);
2762    }
2763    if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT; 
2764    board = ia_cmds.status;
2765 
2766     if ((board < 0) || (board > iadev_count))
2767         board = 0;
2768     board = array_index_nospec(board, iadev_count + 1);
2769 
2770    iadev = ia_dev[board];
2771    switch (ia_cmds.cmd) {
2772    case MEMDUMP:
2773    {
2774     switch (ia_cmds.sub_cmd) {
2775           case MEMDUMP_SEGREG:
2776          if (!capable(CAP_NET_ADMIN)) return -EPERM;
2777              tmps = (u16 __user *)ia_cmds.buf;
2778              for(i=0; i<0x80; i+=2, tmps++)
2779                 if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2780              ia_cmds.status = 0;
2781              ia_cmds.len = 0x80;
2782              break;
2783           case MEMDUMP_REASSREG:
2784          if (!capable(CAP_NET_ADMIN)) return -EPERM;
2785              tmps = (u16 __user *)ia_cmds.buf;
2786              for(i=0; i<0x80; i+=2, tmps++)
2787                 if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2788              ia_cmds.status = 0;
2789              ia_cmds.len = 0x80;
2790              break;
2791           case MEMDUMP_FFL:
2792           {  
2793              ia_regs_t       *regs_local;
2794              ffredn_t        *ffL;
2795              rfredn_t        *rfL;
2796                      
2797          if (!capable(CAP_NET_ADMIN)) return -EPERM;
2798          regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
2799          if (!regs_local) return -ENOMEM;
2800          ffL = &regs_local->ffredn;
2801          rfL = &regs_local->rfredn;
2802              /* Copy real rfred registers into the local copy */
2803          for (i=0; i<(sizeof (rfredn_t))/4; i++)
2804                 ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2805                 /* Copy real ffred registers into the local copy */
2806          for (i=0; i<(sizeof (ffredn_t))/4; i++)
2807                 ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2808 
2809              if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2810                 kfree(regs_local);
2811                 return -EFAULT;
2812              }
2813              kfree(regs_local);
2814              printk("Board %d registers dumped\n", board);
2815              ia_cmds.status = 0;                  
2816      }  
2817              break;        
2818          case READ_REG:
2819          {  
2820          if (!capable(CAP_NET_ADMIN)) return -EPERM;
2821              desc_dbg(iadev); 
2822              ia_cmds.status = 0; 
2823          }
2824              break;
2825          case 0x6:
2826          {  
2827              ia_cmds.status = 0; 
2828              printk("skb = 0x%p\n", skb_peek(&iadev->tx_backlog));
2829              printk("rtn_q: 0x%p\n",ia_deque_rtn_q(&iadev->tx_return_q));
2830          }
2831              break;
2832          case 0x8:
2833          {
2834              struct k_sonet_stats *stats;
2835              stats = &PRIV(_ia_dev[board])->sonet_stats;
2836              printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2837              printk("line_bip   : %d\n", atomic_read(&stats->line_bip));
2838              printk("path_bip   : %d\n", atomic_read(&stats->path_bip));
2839              printk("line_febe  : %d\n", atomic_read(&stats->line_febe));
2840              printk("path_febe  : %d\n", atomic_read(&stats->path_febe));
2841              printk("corr_hcs   : %d\n", atomic_read(&stats->corr_hcs));
2842              printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2843              printk("tx_cells   : %d\n", atomic_read(&stats->tx_cells));
2844              printk("rx_cells   : %d\n", atomic_read(&stats->rx_cells));
2845          }
2846             ia_cmds.status = 0;
2847             break;
2848          case 0x9:
2849         if (!capable(CAP_NET_ADMIN)) return -EPERM;
2850             for (i = 1; i <= iadev->num_rx_desc; i++)
2851                free_desc(_ia_dev[board], i);
2852             writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD), 
2853                                             iadev->reass_reg+REASS_MASK_REG);
2854             iadev->rxing = 1;
2855             
2856             ia_cmds.status = 0;
2857             break;
2858 
2859          case 0xb:
2860         if (!capable(CAP_NET_ADMIN)) return -EPERM;
2861             ia_frontend_intr(iadev);
2862             break;
2863          case 0xa:
2864         if (!capable(CAP_NET_ADMIN)) return -EPERM;
2865          {  
2866              ia_cmds.status = 0; 
2867              IADebugFlag = ia_cmds.maddr;
2868              printk("New debug option loaded\n");
2869          }
2870              break;
2871          default:
2872              ia_cmds.status = 0;
2873              break;
2874       } 
2875    }
2876       break;
2877    default:
2878       break;
2879 
2880    }    
2881    return 0;  
2882 }  
2883   
2884 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2885         IADEV *iadev;
2886         struct dle *wr_ptr;
2887         struct tx_buf_desc __iomem *buf_desc_ptr;
2888         int desc;
2889         int comp_code;
2890         int total_len;
2891         struct cpcs_trailer *trailer;
2892         struct ia_vcc *iavcc;
2893 
2894         iadev = INPH_IA_DEV(vcc->dev);  
2895         iavcc = INPH_IA_VCC(vcc);
2896         if (!iavcc->txing) {
2897            printk("discard packet on closed VC\n");
2898            if (vcc->pop)
2899         vcc->pop(vcc, skb);
2900            else
2901         dev_kfree_skb_any(skb);
2902        return 0;
2903         }
2904 
2905         if (skb->len > iadev->tx_buf_sz - 8) {
2906            printk("Transmit size over tx buffer size\n");
2907            if (vcc->pop)
2908                  vcc->pop(vcc, skb);
2909            else
2910                  dev_kfree_skb_any(skb);
2911           return 0;
2912         }
2913         if ((unsigned long)skb->data & 3) {
2914            printk("Misaligned SKB\n");
2915            if (vcc->pop)
2916                  vcc->pop(vcc, skb);
2917            else
2918                  dev_kfree_skb_any(skb);
2919            return 0;
2920         }       
2921     /* Get a descriptor number from our free descriptor queue  
2922        We get the descr number from the TCQ now, since I am using  
2923        the TCQ as a free buffer queue. Initially TCQ will be   
2924        initialized with all the descriptors and is hence, full.  
2925     */
2926     desc = get_desc (iadev, iavcc);
2927     if (desc == 0xffff) 
2928         return 1;
2929     comp_code = desc >> 13;  
2930     desc &= 0x1fff;  
2931   
2932     if ((desc == 0) || (desc > iadev->num_tx_desc))  
2933     {  
2934         IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);) 
2935                 atomic_inc(&vcc->stats->tx);
2936         if (vcc->pop)   
2937             vcc->pop(vcc, skb);   
2938         else  
2939             dev_kfree_skb_any(skb);
2940         return 0;   /* return SUCCESS */
2941     }  
2942   
2943     if (comp_code)  
2944     {  
2945         IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n", 
2946                                                             desc, comp_code);)  
2947     }  
2948        
2949         /* remember the desc and vcc mapping */
2950         iavcc->vc_desc_cnt++;
2951         iadev->desc_tbl[desc-1].iavcc = iavcc;
2952         iadev->desc_tbl[desc-1].txskb = skb;
2953         IA_SKB_STATE(skb) = 0;
2954 
2955         iadev->ffL.tcq_rd += 2;
2956         if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2957         iadev->ffL.tcq_rd  = iadev->ffL.tcq_st;
2958     writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2959   
2960     /* Put the descriptor number in the packet ready queue  
2961         and put the updated write pointer in the DLE field   
2962     */   
2963     *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc; 
2964 
2965     iadev->ffL.prq_wr += 2;
2966         if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2967                 iadev->ffL.prq_wr = iadev->ffL.prq_st;
2968       
2969     /* Figure out the exact length of the packet and padding required to 
2970            make it  aligned on a 48 byte boundary.  */
2971     total_len = skb->len + sizeof(struct cpcs_trailer);  
2972     total_len = ((total_len + 47) / 48) * 48;
2973     IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)  
2974  
2975     /* Put the packet in a tx buffer */   
2976     trailer = iadev->tx_buf[desc-1].cpcs;
2977         IF_TX(printk("Sent: skb = 0x%p skb->data: 0x%p len: %d, desc: %d\n",
2978                   skb, skb->data, skb->len, desc);)
2979     trailer->control = 0; 
2980         /*big endian*/ 
2981     trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2982     trailer->crc32 = 0; /* not needed - dummy bytes */  
2983 
2984     /* Display the packet */  
2985     IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n", 
2986                                                         skb->len, tcnter++);  
2987         xdump(skb->data, skb->len, "TX: ");
2988         printk("\n");)
2989 
2990     /* Build the buffer descriptor */  
2991     buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
2992     buf_desc_ptr += desc;   /* points to the corresponding entry */  
2993     buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;   
2994     /* Huh ? p.115 of users guide describes this as a read-only register */
2995         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2996     buf_desc_ptr->vc_index = vcc->vci;
2997     buf_desc_ptr->bytes = total_len;  
2998 
2999         if (vcc->qos.txtp.traffic_class == ATM_ABR)  
3000        clear_lockup (vcc, iadev);
3001 
3002     /* Build the DLE structure */  
3003     wr_ptr = iadev->tx_dle_q.write;  
3004     memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));  
3005     wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
3006                           skb->len, DMA_TO_DEVICE);
3007     wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) | 
3008                                                   buf_desc_ptr->buf_start_lo;  
3009     /* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */
3010     wr_ptr->bytes = skb->len;  
3011 
3012         /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3013         if ((wr_ptr->bytes >> 2) == 0xb)
3014            wr_ptr->bytes = 0x30;
3015 
3016     wr_ptr->mode = TX_DLE_PSI; 
3017     wr_ptr->prq_wr_ptr_data = 0;
3018   
3019     /* end is not to be used for the DLE q */  
3020     if (++wr_ptr == iadev->tx_dle_q.end)  
3021         wr_ptr = iadev->tx_dle_q.start;  
3022         
3023         /* Build trailer dle */
3024         wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3025         wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) | 
3026           buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3027 
3028         wr_ptr->bytes = sizeof(struct cpcs_trailer);
3029         wr_ptr->mode = DMA_INT_ENABLE; 
3030         wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3031         
3032         /* end is not to be used for the DLE q */
3033         if (++wr_ptr == iadev->tx_dle_q.end)  
3034                 wr_ptr = iadev->tx_dle_q.start;
3035 
3036     iadev->tx_dle_q.write = wr_ptr;  
3037         ATM_DESC(skb) = vcc->vci;
3038         skb_queue_tail(&iadev->tx_dma_q, skb);
3039 
3040         atomic_inc(&vcc->stats->tx);
3041         iadev->tx_pkt_cnt++;
3042     /* Increment transaction counter */  
3043     writel(2, iadev->dma+IPHASE5575_TX_COUNTER);  
3044         
3045 #if 0        
3046         /* add flow control logic */ 
3047         if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3048           if (iavcc->vc_desc_cnt > 10) {
3049              vcc->tx_quota =  vcc->tx_quota * 3 / 4;
3050             printk("Tx1:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3051               iavcc->flow_inc = -1;
3052               iavcc->saved_tx_quota = vcc->tx_quota;
3053            } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3054              // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3055              printk("Tx2:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota ); 
3056               iavcc->flow_inc = 0;
3057            }
3058         }
3059 #endif
3060     IF_TX(printk("ia send done\n");)  
3061     return 0;  
3062 }  
3063 
3064 static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3065 {
3066         IADEV *iadev; 
3067         unsigned long flags;
3068 
3069         iadev = INPH_IA_DEV(vcc->dev);
3070         if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3071         {
3072             if (!skb)
3073                 printk(KERN_CRIT "null skb in ia_send\n");
3074             else dev_kfree_skb_any(skb);
3075             return -EINVAL;
3076         }                         
3077         spin_lock_irqsave(&iadev->tx_lock, flags); 
3078         if (!test_bit(ATM_VF_READY,&vcc->flags)){ 
3079             dev_kfree_skb_any(skb);
3080             spin_unlock_irqrestore(&iadev->tx_lock, flags);
3081             return -EINVAL; 
3082         }
3083         ATM_SKB(skb)->vcc = vcc;
3084  
3085         if (skb_peek(&iadev->tx_backlog)) {
3086            skb_queue_tail(&iadev->tx_backlog, skb);
3087         }
3088         else {
3089            if (ia_pkt_tx (vcc, skb)) {
3090               skb_queue_tail(&iadev->tx_backlog, skb);
3091            }
3092         }
3093         spin_unlock_irqrestore(&iadev->tx_lock, flags);
3094         return 0;
3095 
3096 }
3097 
3098 static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3099 { 
3100   int   left = *pos, n;   
3101   char  *tmpPtr;
3102   IADEV *iadev = INPH_IA_DEV(dev);
3103   if(!left--) {
3104      if (iadev->phy_type == FE_25MBIT_PHY) {
3105        n = sprintf(page, "  Board Type         :  Iphase5525-1KVC-128K\n");
3106        return n;
3107      }
3108      if (iadev->phy_type == FE_DS3_PHY)
3109         n = sprintf(page, "  Board Type         :  Iphase-ATM-DS3");
3110      else if (iadev->phy_type == FE_E3_PHY)
3111         n = sprintf(page, "  Board Type         :  Iphase-ATM-E3");
3112      else if (iadev->phy_type == FE_UTP_OPTION)
3113          n = sprintf(page, "  Board Type         :  Iphase-ATM-UTP155"); 
3114      else
3115         n = sprintf(page, "  Board Type         :  Iphase-ATM-OC3");
3116      tmpPtr = page + n;
3117      if (iadev->pci_map_size == 0x40000)
3118         n += sprintf(tmpPtr, "-1KVC-");
3119      else
3120         n += sprintf(tmpPtr, "-4KVC-");  
3121      tmpPtr = page + n; 
3122      if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3123         n += sprintf(tmpPtr, "1M  \n");
3124      else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3125         n += sprintf(tmpPtr, "512K\n");
3126      else
3127        n += sprintf(tmpPtr, "128K\n");
3128      return n;
3129   }
3130   if (!left) {
3131      return  sprintf(page, "  Number of Tx Buffer:  %u\n"
3132                            "  Size of Tx Buffer  :  %u\n"
3133                            "  Number of Rx Buffer:  %u\n"
3134                            "  Size of Rx Buffer  :  %u\n"
3135                            "  Packets Received   :  %u\n"
3136                            "  Packets Transmitted:  %u\n"
3137                            "  Cells Received     :  %u\n"
3138                            "  Cells Transmitted  :  %u\n"
3139                            "  Board Dropped Cells:  %u\n"
3140                            "  Board Dropped Pkts :  %u\n",
3141                            iadev->num_tx_desc,  iadev->tx_buf_sz,
3142                            iadev->num_rx_desc,  iadev->rx_buf_sz,
3143                            iadev->rx_pkt_cnt,   iadev->tx_pkt_cnt,
3144                            iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3145                            iadev->drop_rxcell, iadev->drop_rxpkt);                        
3146   }
3147   return 0;
3148 }
3149   
3150 static const struct atmdev_ops ops = {  
3151     .open       = ia_open,  
3152     .close      = ia_close,  
3153     .ioctl      = ia_ioctl,  
3154     .send       = ia_send,  
3155     .phy_put    = ia_phy_put,  
3156     .phy_get    = ia_phy_get,  
3157     .change_qos = ia_change_qos,  
3158     .proc_read  = ia_proc_read,
3159     .owner      = THIS_MODULE,
3160 };  
3161       
3162 static int ia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3163 {  
3164     struct atm_dev *dev;  
3165     IADEV *iadev;  
3166     int ret;
3167 
3168     iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
3169     if (!iadev) {
3170         ret = -ENOMEM;
3171         goto err_out;
3172     }
3173 
3174     iadev->pci = pdev;
3175 
3176     IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3177         pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3178     if (pci_enable_device(pdev)) {
3179         ret = -ENODEV;
3180         goto err_out_free_iadev;
3181     }
3182     dev = atm_dev_register(DEV_LABEL, &pdev->dev, &ops, -1, NULL);
3183     if (!dev) {
3184         ret = -ENOMEM;
3185         goto err_out_disable_dev;
3186     }
3187     dev->dev_data = iadev;
3188     IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3189     IF_INIT(printk("dev_id = 0x%p iadev->LineRate = %d \n", dev,
3190         iadev->LineRate);)
3191 
3192     pci_set_drvdata(pdev, dev);
3193 
3194     ia_dev[iadev_count] = iadev;
3195     _ia_dev[iadev_count] = dev;
3196     iadev_count++;
3197     if (ia_init(dev) || ia_start(dev)) {  
3198         IF_INIT(printk("IA register failed!\n");)
3199         iadev_count--;
3200         ia_dev[iadev_count] = NULL;
3201         _ia_dev[iadev_count] = NULL;
3202         ret = -EINVAL;
3203         goto err_out_deregister_dev;
3204     }
3205     IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3206 
3207     iadev->next_board = ia_boards;  
3208     ia_boards = dev;  
3209 
3210     return 0;
3211 
3212 err_out_deregister_dev:
3213     atm_dev_deregister(dev);  
3214 err_out_disable_dev:
3215     pci_disable_device(pdev);
3216 err_out_free_iadev:
3217     kfree(iadev);
3218 err_out:
3219     return ret;
3220 }
3221 
3222 static void ia_remove_one(struct pci_dev *pdev)
3223 {
3224     struct atm_dev *dev = pci_get_drvdata(pdev);
3225     IADEV *iadev = INPH_IA_DEV(dev);
3226 
3227     /* Disable phy interrupts */
3228     ia_phy_put(dev, ia_phy_get(dev, SUNI_RSOP_CIE) & ~(SUNI_RSOP_CIE_LOSE),
3229                    SUNI_RSOP_CIE);
3230     udelay(1);
3231 
3232     if (dev->phy && dev->phy->stop)
3233         dev->phy->stop(dev);
3234 
3235     /* De-register device */  
3236         free_irq(iadev->irq, dev);
3237     iadev_count--;
3238     ia_dev[iadev_count] = NULL;
3239     _ia_dev[iadev_count] = NULL;
3240     IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);)
3241     atm_dev_deregister(dev);
3242 
3243         iounmap(iadev->base);  
3244     pci_disable_device(pdev);
3245 
3246     ia_free_rx(iadev);
3247     ia_free_tx(iadev);
3248 
3249         kfree(iadev);
3250 }
3251 
3252 static const struct pci_device_id ia_pci_tbl[] = {
3253     { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3254     { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3255     { 0,}
3256 };
3257 MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3258 
3259 static struct pci_driver ia_driver = {
3260     .name =         DEV_LABEL,
3261     .id_table =     ia_pci_tbl,
3262     .probe =        ia_init_one,
3263     .remove =       ia_remove_one,
3264 };
3265 
3266 static int __init ia_module_init(void)
3267 {
3268     int ret;
3269 
3270     ret = pci_register_driver(&ia_driver);
3271     if (ret >= 0) {
3272         ia_timer.expires = jiffies + 3*HZ;
3273         add_timer(&ia_timer); 
3274     } else
3275         printk(KERN_ERR DEV_LABEL ": no adapter found\n");  
3276     return ret;
3277 }
3278 
3279 static void __exit ia_module_exit(void)
3280 {
3281     pci_unregister_driver(&ia_driver);
3282 
3283     del_timer_sync(&ia_timer);
3284 }
3285 
3286 module_init(ia_module_init);
3287 module_exit(ia_module_exit);