]> err.no Git - linux-2.6/blob - drivers/atm/iphase.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
[linux-2.6] / drivers / atm / iphase.c
1 /******************************************************************************
2          iphase.c: Device driver for Interphase ATM PCI adapter cards 
3                     Author: Peter Wang  <pwang@iphase.com>            
4                    Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5                    Interphase Corporation  <www.iphase.com>           
6                                Version: 1.0                           
7 *******************************************************************************
8       
9       This software may be used and distributed according to the terms
10       of the GNU General Public License (GPL), incorporated herein by reference.
11       Drivers based on this skeleton fall under the GPL and must retain
12       the authorship (implicit copyright) notice.
13
14       This program is distributed in the hope that it will be useful, but
15       WITHOUT ANY WARRANTY; without even the implied warranty of
16       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17       General Public License for more details.
18       
19       Modified from an incomplete driver for Interphase 5575 1KVC 1M card which 
20       was originally written by Monalisa Agrawal at UNH. Now this driver 
21       supports a variety of varients of Interphase ATM PCI (i)Chip adapter 
22       card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM) 
23       in terms of PHY type, the size of control memory and the size of 
24       packet memory. The followings are the change log and history:
25      
26           Bugfix the Mona's UBR driver.
27           Modify the basic memory allocation and dma logic.
28           Port the driver to the latest kernel from 2.0.46.
29           Complete the ABR logic of the driver, and added the ABR work-
30               around for the hardware anormalies.
31           Add the CBR support.
32           Add the flow control logic to the driver to allow rate-limit VC.
33           Add 4K VC support to the board with 512K control memory.
34           Add the support of all the variants of the Interphase ATM PCI 
35           (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
36           (25M UTP25) and x531 (DS3 and E3).
37           Add SMP support.
38
39       Support and updates available at: ftp://ftp.iphase.com/pub/atm
40
41 *******************************************************************************/
42
43 #include <linux/module.h>  
44 #include <linux/kernel.h>  
45 #include <linux/mm.h>  
46 #include <linux/pci.h>  
47 #include <linux/errno.h>  
48 #include <linux/atm.h>  
49 #include <linux/atmdev.h>  
50 #include <linux/sonet.h>  
51 #include <linux/skbuff.h>  
52 #include <linux/time.h>  
53 #include <linux/delay.h>  
54 #include <linux/uio.h>  
55 #include <linux/init.h>  
56 #include <linux/wait.h>
57 #include <asm/system.h>  
58 #include <asm/io.h>  
59 #include <asm/atomic.h>  
60 #include <asm/uaccess.h>  
61 #include <asm/string.h>  
62 #include <asm/byteorder.h>  
63 #include <linux/vmalloc.h>
64 #include <linux/jiffies.h>
65 #include "iphase.h"               
66 #include "suni.h"                 
67 #define swap(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))  
68
69 #define PRIV(dev) ((struct suni_priv *) dev->phy_data)
70
71 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
72 static void desc_dbg(IADEV *iadev);
73
74 static IADEV *ia_dev[8];
75 static struct atm_dev *_ia_dev[8];
76 static int iadev_count;
77 static void ia_led_timer(unsigned long arg);
78 static DEFINE_TIMER(ia_timer, ia_led_timer, 0, 0);
79 static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
80 static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
81 static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
82             |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0; 
83
84 module_param(IA_TX_BUF, int, 0);
85 module_param(IA_TX_BUF_SZ, int, 0);
86 module_param(IA_RX_BUF, int, 0);
87 module_param(IA_RX_BUF_SZ, int, 0);
88 module_param(IADebugFlag, uint, 0644);
89
90 MODULE_LICENSE("GPL");
91
92 /**************************** IA_LIB **********************************/
93
94 static void ia_init_rtn_q (IARTN_Q *que) 
95
96    que->next = NULL; 
97    que->tail = NULL; 
98 }
99
100 static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data) 
101 {
102    data->next = NULL;
103    if (que->next == NULL) 
104       que->next = que->tail = data;
105    else {
106       data->next = que->next;
107       que->next = data;
108    } 
109    return;
110 }
111
112 static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
113    IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
114    if (!entry) return -1;
115    entry->data = data;
116    entry->next = NULL;
117    if (que->next == NULL) 
118       que->next = que->tail = entry;
119    else {
120       que->tail->next = entry;
121       que->tail = que->tail->next;
122    }      
123    return 1;
124 }
125
126 static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
127    IARTN_Q *tmpdata;
128    if (que->next == NULL)
129       return NULL;
130    tmpdata = que->next;
131    if ( que->next == que->tail)  
132       que->next = que->tail = NULL;
133    else 
134       que->next = que->next->next;
135    return tmpdata;
136 }
137
138 static void ia_hack_tcq(IADEV *dev) {
139
140   u_short               desc1;
141   u_short               tcq_wr;
142   struct ia_vcc         *iavcc_r = NULL; 
143
144   tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
145   while (dev->host_tcq_wr != tcq_wr) {
146      desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
147      if (!desc1) ;
148      else if (!dev->desc_tbl[desc1 -1].timestamp) {
149         IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
150         *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
151      }                                 
152      else if (dev->desc_tbl[desc1 -1].timestamp) {
153         if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) { 
154            printk("IA: Fatal err in get_desc\n");
155            continue;
156         }
157         iavcc_r->vc_desc_cnt--;
158         dev->desc_tbl[desc1 -1].timestamp = 0;
159         IF_EVENT(printk("ia_hack: return_q skb = 0x%x desc = %d\n", 
160                                    (u32)dev->desc_tbl[desc1 -1].txskb, desc1);)
161         if (iavcc_r->pcr < dev->rate_limit) {
162            IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
163            if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
164               printk("ia_hack_tcq: No memory available\n");
165         } 
166         dev->desc_tbl[desc1 -1].iavcc = NULL;
167         dev->desc_tbl[desc1 -1].txskb = NULL;
168      }
169      dev->host_tcq_wr += 2;
170      if (dev->host_tcq_wr > dev->ffL.tcq_ed) 
171         dev->host_tcq_wr = dev->ffL.tcq_st;
172   }
173 } /* ia_hack_tcq */
174
175 static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
176   u_short               desc_num, i;
177   struct sk_buff        *skb;
178   struct ia_vcc         *iavcc_r = NULL; 
179   unsigned long delta;
180   static unsigned long timer = 0;
181   int ltimeout;
182
183   ia_hack_tcq (dev);
184   if((time_after(jiffies,timer+50)) || ((dev->ffL.tcq_rd==dev->host_tcq_wr))) {
185      timer = jiffies; 
186      i=0;
187      while (i < dev->num_tx_desc) {
188         if (!dev->desc_tbl[i].timestamp) {
189            i++;
190            continue;
191         }
192         ltimeout = dev->desc_tbl[i].iavcc->ltimeout; 
193         delta = jiffies - dev->desc_tbl[i].timestamp;
194         if (delta >= ltimeout) {
195            IF_ABR(printk("RECOVER run!! desc_tbl %d = %d  delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
196            if (dev->ffL.tcq_rd == dev->ffL.tcq_st) 
197               dev->ffL.tcq_rd =  dev->ffL.tcq_ed;
198            else 
199               dev->ffL.tcq_rd -= 2;
200            *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
201            if (!(skb = dev->desc_tbl[i].txskb) || 
202                           !(iavcc_r = dev->desc_tbl[i].iavcc))
203               printk("Fatal err, desc table vcc or skb is NULL\n");
204            else 
205               iavcc_r->vc_desc_cnt--;
206            dev->desc_tbl[i].timestamp = 0;
207            dev->desc_tbl[i].iavcc = NULL;
208            dev->desc_tbl[i].txskb = NULL;
209         }
210         i++;
211      } /* while */
212   }
213   if (dev->ffL.tcq_rd == dev->host_tcq_wr) 
214      return 0xFFFF;
215     
216   /* Get the next available descriptor number from TCQ */
217   desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
218
219   while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
220      dev->ffL.tcq_rd += 2;
221      if (dev->ffL.tcq_rd > dev->ffL.tcq_ed) 
222      dev->ffL.tcq_rd = dev->ffL.tcq_st;
223      if (dev->ffL.tcq_rd == dev->host_tcq_wr) 
224         return 0xFFFF; 
225      desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
226   }
227
228   /* get system time */
229   dev->desc_tbl[desc_num -1].timestamp = jiffies;
230   return desc_num;
231 }
232
233 static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
234   u_char                foundLockUp;
235   vcstatus_t            *vcstatus;
236   u_short               *shd_tbl;
237   u_short               tempCellSlot, tempFract;
238   struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
239   struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
240   u_int  i;
241
242   if (vcc->qos.txtp.traffic_class == ATM_ABR) {
243      vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
244      vcstatus->cnt++;
245      foundLockUp = 0;
246      if( vcstatus->cnt == 0x05 ) {
247         abr_vc += vcc->vci;
248         eabr_vc += vcc->vci;
249         if( eabr_vc->last_desc ) {
250            if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
251               /* Wait for 10 Micro sec */
252               udelay(10);
253               if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
254                  foundLockUp = 1;
255            }
256            else {
257               tempCellSlot = abr_vc->last_cell_slot;
258               tempFract    = abr_vc->fraction;
259               if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
260                          && (tempFract == dev->testTable[vcc->vci]->fract))
261                  foundLockUp = 1;                   
262               dev->testTable[vcc->vci]->lastTime = tempCellSlot;   
263               dev->testTable[vcc->vci]->fract = tempFract; 
264            }        
265         } /* last descriptor */            
266         vcstatus->cnt = 0;      
267      } /* vcstatus->cnt */
268         
269      if (foundLockUp) {
270         IF_ABR(printk("LOCK UP found\n");) 
271         writew(0xFFFD, dev->seg_reg+MODE_REG_0);
272         /* Wait for 10 Micro sec */
273         udelay(10); 
274         abr_vc->status &= 0xFFF8;
275         abr_vc->status |= 0x0001;  /* state is idle */
276         shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;                
277         for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
278         if (i < dev->num_vc)
279            shd_tbl[i] = vcc->vci;
280         else
281            IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
282         writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
283         writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
284         writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);       
285         vcstatus->cnt = 0;
286      } /* foundLockUp */
287
288   } /* if an ABR VC */
289
290
291 }
292  
293 /*
294 ** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
295 **
296 **  +----+----+------------------+-------------------------------+
297 **  |  R | NZ |  5-bit exponent  |        9-bit mantissa         |
298 **  +----+----+------------------+-------------------------------+
299 ** 
300 **    R = reserved (written as 0)
301 **    NZ = 0 if 0 cells/sec; 1 otherwise
302 **
303 **    if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
304 */
305 static u16
306 cellrate_to_float(u32 cr)
307 {
308
309 #define NZ              0x4000
310 #define M_BITS          9               /* Number of bits in mantissa */
311 #define E_BITS          5               /* Number of bits in exponent */
312 #define M_MASK          0x1ff           
313 #define E_MASK          0x1f
314   u16   flot;
315   u32   tmp = cr & 0x00ffffff;
316   int   i   = 0;
317   if (cr == 0)
318      return 0;
319   while (tmp != 1) {
320      tmp >>= 1;
321      i++;
322   }
323   if (i == M_BITS)
324      flot = NZ | (i << M_BITS) | (cr & M_MASK);
325   else if (i < M_BITS)
326      flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
327   else
328      flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
329   return flot;
330 }
331
332 #if 0
333 /*
334 ** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
335 */
336 static u32
337 float_to_cellrate(u16 rate)
338 {
339   u32   exp, mantissa, cps;
340   if ((rate & NZ) == 0)
341      return 0;
342   exp = (rate >> M_BITS) & E_MASK;
343   mantissa = rate & M_MASK;
344   if (exp == 0)
345      return 1;
346   cps = (1 << M_BITS) | mantissa;
347   if (exp == M_BITS)
348      cps = cps;
349   else if (exp > M_BITS)
350      cps <<= (exp - M_BITS);
351   else
352      cps >>= (M_BITS - exp);
353   return cps;
354 }
355 #endif 
356
357 static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
358   srv_p->class_type = ATM_ABR;
359   srv_p->pcr        = dev->LineRate;
360   srv_p->mcr        = 0;
361   srv_p->icr        = 0x055cb7;
362   srv_p->tbe        = 0xffffff;
363   srv_p->frtt       = 0x3a;
364   srv_p->rif        = 0xf;
365   srv_p->rdf        = 0xb;
366   srv_p->nrm        = 0x4;
367   srv_p->trm        = 0x7;
368   srv_p->cdf        = 0x3;
369   srv_p->adtf       = 50;
370 }
371
372 static int
373 ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p, 
374                                                 struct atm_vcc *vcc, u8 flag)
375 {
376   f_vc_abr_entry  *f_abr_vc;
377   r_vc_abr_entry  *r_abr_vc;
378   u32           icr;
379   u8            trm, nrm, crm;
380   u16           adtf, air, *ptr16;      
381   f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
382   f_abr_vc += vcc->vci;       
383   switch (flag) {
384      case 1: /* FFRED initialization */
385 #if 0  /* sanity check */
386        if (srv_p->pcr == 0)
387           return INVALID_PCR;
388        if (srv_p->pcr > dev->LineRate)
389           srv_p->pcr = dev->LineRate;
390        if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
391           return MCR_UNAVAILABLE;
392        if (srv_p->mcr > srv_p->pcr)
393           return INVALID_MCR;
394        if (!(srv_p->icr))
395           srv_p->icr = srv_p->pcr;
396        if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
397           return INVALID_ICR;
398        if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
399           return INVALID_TBE;
400        if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
401           return INVALID_FRTT;
402        if (srv_p->nrm > MAX_NRM)
403           return INVALID_NRM;
404        if (srv_p->trm > MAX_TRM)
405           return INVALID_TRM;
406        if (srv_p->adtf > MAX_ADTF)
407           return INVALID_ADTF;
408        else if (srv_p->adtf == 0)
409           srv_p->adtf = 1;
410        if (srv_p->cdf > MAX_CDF)
411           return INVALID_CDF;
412        if (srv_p->rif > MAX_RIF)
413           return INVALID_RIF;
414        if (srv_p->rdf > MAX_RDF)
415           return INVALID_RDF;
416 #endif
417        memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
418        f_abr_vc->f_vc_type = ABR;
419        nrm = 2 << srv_p->nrm;     /* (2 ** (srv_p->nrm +1)) */
420                                   /* i.e 2**n = 2 << (n-1) */
421        f_abr_vc->f_nrm = nrm << 8 | nrm;
422        trm = 100000/(2 << (16 - srv_p->trm));
423        if ( trm == 0) trm = 1;
424        f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
425        crm = srv_p->tbe / nrm;
426        if (crm == 0) crm = 1;
427        f_abr_vc->f_crm = crm & 0xff;
428        f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
429        icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
430                                 ((srv_p->tbe/srv_p->frtt)*1000000) :
431                                 (1000000/(srv_p->frtt/srv_p->tbe)));
432        f_abr_vc->f_icr = cellrate_to_float(icr);
433        adtf = (10000 * srv_p->adtf)/8192;
434        if (adtf == 0) adtf = 1; 
435        f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
436        f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
437        f_abr_vc->f_acr = f_abr_vc->f_icr;
438        f_abr_vc->f_status = 0x0042;
439        break;
440     case 0: /* RFRED initialization */  
441        ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize); 
442        *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
443        r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
444        r_abr_vc += vcc->vci;
445        r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
446        air = srv_p->pcr << (15 - srv_p->rif);
447        if (air == 0) air = 1;
448        r_abr_vc->r_air = cellrate_to_float(air);
449        dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
450        dev->sum_mcr        += srv_p->mcr;
451        dev->n_abr++;
452        break;
453     default:
454        break;
455   }
456   return        0;
457 }
458 static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
459    u32 rateLow=0, rateHigh, rate;
460    int entries;
461    struct ia_vcc *ia_vcc;
462
463    int   idealSlot =0, testSlot, toBeAssigned, inc;
464    u32   spacing;
465    u16  *SchedTbl, *TstSchedTbl;
466    u16  cbrVC, vcIndex;
467    u32   fracSlot    = 0;
468    u32   sp_mod      = 0;
469    u32   sp_mod2     = 0;
470
471    /* IpAdjustTrafficParams */
472    if (vcc->qos.txtp.max_pcr <= 0) {
473       IF_ERR(printk("PCR for CBR not defined\n");)
474       return -1;
475    }
476    rate = vcc->qos.txtp.max_pcr;
477    entries = rate / dev->Granularity;
478    IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
479                                 entries, rate, dev->Granularity);)
480    if (entries < 1)
481       IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");) 
482    rateLow  =  entries * dev->Granularity;
483    rateHigh = (entries + 1) * dev->Granularity;
484    if (3*(rate - rateLow) > (rateHigh - rate))
485       entries++;
486    if (entries > dev->CbrRemEntries) {
487       IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
488       IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
489                                        entries, dev->CbrRemEntries);)
490       return -EBUSY;
491    }   
492
493    ia_vcc = INPH_IA_VCC(vcc);
494    ia_vcc->NumCbrEntry = entries; 
495    dev->sum_mcr += entries * dev->Granularity; 
496    /* IaFFrednInsertCbrSched */
497    // Starting at an arbitrary location, place the entries into the table
498    // as smoothly as possible
499    cbrVC   = 0;
500    spacing = dev->CbrTotEntries / entries;
501    sp_mod  = dev->CbrTotEntries % entries; // get modulo
502    toBeAssigned = entries;
503    fracSlot = 0;
504    vcIndex  = vcc->vci;
505    IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
506    while (toBeAssigned)
507    {
508       // If this is the first time, start the table loading for this connection
509       // as close to entryPoint as possible.
510       if (toBeAssigned == entries)
511       {
512          idealSlot = dev->CbrEntryPt;
513          dev->CbrEntryPt += 2;    // Adding 2 helps to prevent clumping
514          if (dev->CbrEntryPt >= dev->CbrTotEntries) 
515             dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
516       } else {
517          idealSlot += (u32)(spacing + fracSlot); // Point to the next location
518          // in the table that would be  smoothest
519          fracSlot = ((sp_mod + sp_mod2) / entries);  // get new integer part
520          sp_mod2  = ((sp_mod + sp_mod2) % entries);  // calc new fractional part
521       }
522       if (idealSlot >= (int)dev->CbrTotEntries) 
523          idealSlot -= dev->CbrTotEntries;  
524       // Continuously check around this ideal value until a null
525       // location is encountered.
526       SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize); 
527       inc = 0;
528       testSlot = idealSlot;
529       TstSchedTbl = (u16*)(SchedTbl+testSlot);  //set index and read in value
530       IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%x, NumToAssign=%d\n",
531                                 testSlot, (u32)TstSchedTbl,toBeAssigned);) 
532       memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
533       while (cbrVC)  // If another VC at this location, we have to keep looking
534       {
535           inc++;
536           testSlot = idealSlot - inc;
537           if (testSlot < 0) { // Wrap if necessary
538              testSlot += dev->CbrTotEntries;
539              IF_CBR(printk("Testslot Wrap. STable Start=0x%x,Testslot=%d\n",
540                                                        (u32)SchedTbl,testSlot);)
541           }
542           TstSchedTbl = (u16 *)(SchedTbl + testSlot);  // set table index
543           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC)); 
544           if (!cbrVC)
545              break;
546           testSlot = idealSlot + inc;
547           if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
548              testSlot -= dev->CbrTotEntries;
549              IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
550              IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n", 
551                                             testSlot, toBeAssigned);)
552           } 
553           // set table index and read in value
554           TstSchedTbl = (u16*)(SchedTbl + testSlot);
555           IF_CBR(printk("Reading CBR Tbl from 0x%x, CbrVal=0x%x Iteration %d\n",
556                           (u32)TstSchedTbl,cbrVC,inc);) 
557           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
558        } /* while */
559        // Move this VCI number into this location of the CBR Sched table.
560        memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex,sizeof(TstSchedTbl));
561        dev->CbrRemEntries--;
562        toBeAssigned--;
563    } /* while */ 
564
565    /* IaFFrednCbrEnable */
566    dev->NumEnabledCBR++;
567    if (dev->NumEnabledCBR == 1) {
568        writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
569        IF_CBR(printk("CBR is enabled\n");)
570    }
571    return 0;
572 }
573 static void ia_cbrVc_close (struct atm_vcc *vcc) {
574    IADEV *iadev;
575    u16 *SchedTbl, NullVci = 0;
576    u32 i, NumFound;
577
578    iadev = INPH_IA_DEV(vcc->dev);
579    iadev->NumEnabledCBR--;
580    SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
581    if (iadev->NumEnabledCBR == 0) {
582       writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
583       IF_CBR (printk("CBR support disabled\n");)
584    }
585    NumFound = 0;
586    for (i=0; i < iadev->CbrTotEntries; i++)
587    {
588       if (*SchedTbl == vcc->vci) {
589          iadev->CbrRemEntries++;
590          *SchedTbl = NullVci;
591          IF_CBR(NumFound++;)
592       }
593       SchedTbl++;   
594    } 
595    IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
596 }
597
598 static int ia_avail_descs(IADEV *iadev) {
599    int tmp = 0;
600    ia_hack_tcq(iadev);
601    if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
602       tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
603    else
604       tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
605                    iadev->ffL.tcq_st) / 2;
606    return tmp;
607 }    
608
609 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
610
611 static int ia_que_tx (IADEV *iadev) { 
612    struct sk_buff *skb;
613    int num_desc;
614    struct atm_vcc *vcc;
615    struct ia_vcc *iavcc;
616    num_desc = ia_avail_descs(iadev);
617
618    while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
619       if (!(vcc = ATM_SKB(skb)->vcc)) {
620          dev_kfree_skb_any(skb);
621          printk("ia_que_tx: Null vcc\n");
622          break;
623       }
624       if (!test_bit(ATM_VF_READY,&vcc->flags)) {
625          dev_kfree_skb_any(skb);
626          printk("Free the SKB on closed vci %d \n", vcc->vci);
627          break;
628       }
629       iavcc = INPH_IA_VCC(vcc);
630       if (ia_pkt_tx (vcc, skb)) {
631          skb_queue_head(&iadev->tx_backlog, skb);
632       }
633       num_desc--;
634    }
635    return 0;
636 }
637
638 static void ia_tx_poll (IADEV *iadev) {
639    struct atm_vcc *vcc = NULL;
640    struct sk_buff *skb = NULL, *skb1 = NULL;
641    struct ia_vcc *iavcc;
642    IARTN_Q *  rtne;
643
644    ia_hack_tcq(iadev);
645    while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
646        skb = rtne->data.txskb;
647        if (!skb) {
648            printk("ia_tx_poll: skb is null\n");
649            goto out;
650        }
651        vcc = ATM_SKB(skb)->vcc;
652        if (!vcc) {
653            printk("ia_tx_poll: vcc is null\n");
654            dev_kfree_skb_any(skb);
655            goto out;
656        }
657
658        iavcc = INPH_IA_VCC(vcc);
659        if (!iavcc) {
660            printk("ia_tx_poll: iavcc is null\n");
661            dev_kfree_skb_any(skb);
662            goto out;
663        }
664
665        skb1 = skb_dequeue(&iavcc->txing_skb);
666        while (skb1 && (skb1 != skb)) {
667           if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
668              printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
669           }
670           IF_ERR(printk("Release the SKB not match\n");)
671           if ((vcc->pop) && (skb1->len != 0))
672           {
673              vcc->pop(vcc, skb1);
674              IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n",
675                                                           (long)skb1);)
676           }
677           else 
678              dev_kfree_skb_any(skb1);
679           skb1 = skb_dequeue(&iavcc->txing_skb);
680        }                                                        
681        if (!skb1) {
682           IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);)
683           ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
684           break;
685        }
686        if ((vcc->pop) && (skb->len != 0))
687        {
688           vcc->pop(vcc, skb);
689           IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
690        }
691        else 
692           dev_kfree_skb_any(skb);
693        kfree(rtne);
694     }
695     ia_que_tx(iadev);
696 out:
697     return;
698 }
699 #if 0
700 static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
701 {
702         u32     t;
703         int     i;
704         /*
705          * Issue a command to enable writes to the NOVRAM
706          */
707         NVRAM_CMD (EXTEND + EWEN);
708         NVRAM_CLR_CE;
709         /*
710          * issue the write command
711          */
712         NVRAM_CMD(IAWRITE + addr);
713         /* 
714          * Send the data, starting with D15, then D14, and so on for 16 bits
715          */
716         for (i=15; i>=0; i--) {
717                 NVRAM_CLKOUT (val & 0x8000);
718                 val <<= 1;
719         }
720         NVRAM_CLR_CE;
721         CFG_OR(NVCE);
722         t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
723         while (!(t & NVDO))
724                 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
725
726         NVRAM_CLR_CE;
727         /*
728          * disable writes again
729          */
730         NVRAM_CMD(EXTEND + EWDS)
731         NVRAM_CLR_CE;
732         CFG_AND(~NVDI);
733 }
734 #endif
735
736 static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
737 {
738         u_short val;
739         u32     t;
740         int     i;
741         /*
742          * Read the first bit that was clocked with the falling edge of the
743          * the last command data clock
744          */
745         NVRAM_CMD(IAREAD + addr);
746         /*
747          * Now read the rest of the bits, the next bit read is D14, then D13,
748          * and so on.
749          */
750         val = 0;
751         for (i=15; i>=0; i--) {
752                 NVRAM_CLKIN(t);
753                 val |= (t << i);
754         }
755         NVRAM_CLR_CE;
756         CFG_AND(~NVDI);
757         return val;
758 }
759
760 static void ia_hw_type(IADEV *iadev) {
761    u_short memType = ia_eeprom_get(iadev, 25);   
762    iadev->memType = memType;
763    if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
764       iadev->num_tx_desc = IA_TX_BUF;
765       iadev->tx_buf_sz = IA_TX_BUF_SZ;
766       iadev->num_rx_desc = IA_RX_BUF;
767       iadev->rx_buf_sz = IA_RX_BUF_SZ; 
768    } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
769       if (IA_TX_BUF == DFL_TX_BUFFERS)
770         iadev->num_tx_desc = IA_TX_BUF / 2;
771       else 
772         iadev->num_tx_desc = IA_TX_BUF;
773       iadev->tx_buf_sz = IA_TX_BUF_SZ;
774       if (IA_RX_BUF == DFL_RX_BUFFERS)
775         iadev->num_rx_desc = IA_RX_BUF / 2;
776       else
777         iadev->num_rx_desc = IA_RX_BUF;
778       iadev->rx_buf_sz = IA_RX_BUF_SZ;
779    }
780    else {
781       if (IA_TX_BUF == DFL_TX_BUFFERS) 
782         iadev->num_tx_desc = IA_TX_BUF / 8;
783       else
784         iadev->num_tx_desc = IA_TX_BUF;
785       iadev->tx_buf_sz = IA_TX_BUF_SZ;
786       if (IA_RX_BUF == DFL_RX_BUFFERS)
787         iadev->num_rx_desc = IA_RX_BUF / 8;
788       else
789         iadev->num_rx_desc = IA_RX_BUF;
790       iadev->rx_buf_sz = IA_RX_BUF_SZ; 
791    } 
792    iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz); 
793    IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
794          iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
795          iadev->rx_buf_sz, iadev->rx_pkt_ram);)
796
797 #if 0
798    if ((memType & FE_MASK) == FE_SINGLE_MODE) {
799       iadev->phy_type = PHY_OC3C_S;
800    else if ((memType & FE_MASK) == FE_UTP_OPTION)
801       iadev->phy_type = PHY_UTP155;
802    else
803      iadev->phy_type = PHY_OC3C_M;
804 #endif
805    
806    iadev->phy_type = memType & FE_MASK;
807    IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n", 
808                                          memType,iadev->phy_type);)
809    if (iadev->phy_type == FE_25MBIT_PHY) 
810       iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
811    else if (iadev->phy_type == FE_DS3_PHY)
812       iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
813    else if (iadev->phy_type == FE_E3_PHY) 
814       iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
815    else
816        iadev->LineRate = (u32)(ATM_OC3_PCR);
817    IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
818
819 }
820
821 static void IaFrontEndIntr(IADEV *iadev) {
822   volatile IA_SUNI *suni;
823   volatile ia_mb25_t *mb25;
824   volatile suni_pm7345_t *suni_pm7345;
825   u32 intr_status;
826   u_int frmr_intr;
827
828   if(iadev->phy_type & FE_25MBIT_PHY) {
829      mb25 = (ia_mb25_t*)iadev->phy;
830      iadev->carrier_detect =  Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
831   } else if (iadev->phy_type & FE_DS3_PHY) {
832      suni_pm7345 = (suni_pm7345_t *)iadev->phy;
833      /* clear FRMR interrupts */
834      frmr_intr   = suni_pm7345->suni_ds3_frm_intr_stat; 
835      iadev->carrier_detect =  
836            Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV));
837   } else if (iadev->phy_type & FE_E3_PHY ) {
838      suni_pm7345 = (suni_pm7345_t *)iadev->phy;
839      frmr_intr   = suni_pm7345->suni_e3_frm_maint_intr_ind;
840      iadev->carrier_detect =
841            Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat&SUNI_E3_LOS));
842   }
843   else { 
844      suni = (IA_SUNI *)iadev->phy;
845      intr_status = suni->suni_rsop_status & 0xff;
846      iadev->carrier_detect = Boolean(!(suni->suni_rsop_status & SUNI_LOSV));
847   }
848   if (iadev->carrier_detect)
849     printk("IA: SUNI carrier detected\n");
850   else
851     printk("IA: SUNI carrier lost signal\n"); 
852   return;
853 }
854
855 static void ia_mb25_init (IADEV *iadev)
856 {
857    volatile ia_mb25_t  *mb25 = (ia_mb25_t*)iadev->phy;
858 #if 0
859    mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
860 #endif
861    mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC;
862    mb25->mb25_diag_control = 0;
863    /*
864     * Initialize carrier detect state
865     */
866    iadev->carrier_detect =  Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
867    return;
868 }                   
869
870 static void ia_suni_pm7345_init (IADEV *iadev)
871 {
872    volatile suni_pm7345_t *suni_pm7345 = (suni_pm7345_t *)iadev->phy;
873    if (iadev->phy_type & FE_DS3_PHY)
874    {
875       iadev->carrier_detect = 
876           Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV)); 
877       suni_pm7345->suni_ds3_frm_intr_enbl = 0x17;
878       suni_pm7345->suni_ds3_frm_cfg = 1;
879       suni_pm7345->suni_ds3_tran_cfg = 1;
880       suni_pm7345->suni_config = 0;
881       suni_pm7345->suni_splr_cfg = 0;
882       suni_pm7345->suni_splt_cfg = 0;
883    }
884    else 
885    {
886       iadev->carrier_detect = 
887           Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat & SUNI_E3_LOS));
888       suni_pm7345->suni_e3_frm_fram_options = 0x4;
889       suni_pm7345->suni_e3_frm_maint_options = 0x20;
890       suni_pm7345->suni_e3_frm_fram_intr_enbl = 0x1d;
891       suni_pm7345->suni_e3_frm_maint_intr_enbl = 0x30;
892       suni_pm7345->suni_e3_tran_stat_diag_options = 0x0;
893       suni_pm7345->suni_e3_tran_fram_options = 0x1;
894       suni_pm7345->suni_config = SUNI_PM7345_E3ENBL;
895       suni_pm7345->suni_splr_cfg = 0x41;
896       suni_pm7345->suni_splt_cfg = 0x41;
897    } 
898    /*
899     * Enable RSOP loss of signal interrupt.
900     */
901    suni_pm7345->suni_intr_enbl = 0x28;
902  
903    /*
904     * Clear error counters
905     */
906    suni_pm7345->suni_id_reset = 0;
907
908    /*
909     * Clear "PMCTST" in master test register.
910     */
911    suni_pm7345->suni_master_test = 0;
912
913    suni_pm7345->suni_rxcp_ctrl = 0x2c;
914    suni_pm7345->suni_rxcp_fctrl = 0x81;
915  
916    suni_pm7345->suni_rxcp_idle_pat_h1 =
917         suni_pm7345->suni_rxcp_idle_pat_h2 =
918         suni_pm7345->suni_rxcp_idle_pat_h3 = 0;
919    suni_pm7345->suni_rxcp_idle_pat_h4 = 1;
920  
921    suni_pm7345->suni_rxcp_idle_mask_h1 = 0xff;
922    suni_pm7345->suni_rxcp_idle_mask_h2 = 0xff;
923    suni_pm7345->suni_rxcp_idle_mask_h3 = 0xff;
924    suni_pm7345->suni_rxcp_idle_mask_h4 = 0xfe;
925  
926    suni_pm7345->suni_rxcp_cell_pat_h1 =
927         suni_pm7345->suni_rxcp_cell_pat_h2 =
928         suni_pm7345->suni_rxcp_cell_pat_h3 = 0;
929    suni_pm7345->suni_rxcp_cell_pat_h4 = 1;
930  
931    suni_pm7345->suni_rxcp_cell_mask_h1 =
932         suni_pm7345->suni_rxcp_cell_mask_h2 =
933         suni_pm7345->suni_rxcp_cell_mask_h3 =
934         suni_pm7345->suni_rxcp_cell_mask_h4 = 0xff;
935  
936    suni_pm7345->suni_txcp_ctrl = 0xa4;
937    suni_pm7345->suni_txcp_intr_en_sts = 0x10;
938    suni_pm7345->suni_txcp_idle_pat_h5 = 0x55;
939  
940    suni_pm7345->suni_config &= ~(SUNI_PM7345_LLB |
941                                  SUNI_PM7345_CLB |
942                                  SUNI_PM7345_DLB |
943                                   SUNI_PM7345_PLB);
944 #ifdef __SNMP__
945    suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
946 #endif /* __SNMP__ */
947    return;
948 }
949
950
951 /***************************** IA_LIB END *****************************/
952     
953 #ifdef CONFIG_ATM_IA_DEBUG
954 static int tcnter = 0;
955 static void xdump( u_char*  cp, int  length, char*  prefix )
956 {
957     int col, count;
958     u_char prntBuf[120];
959     u_char*  pBuf = prntBuf;
960     count = 0;
961     while(count < length){
962         pBuf += sprintf( pBuf, "%s", prefix );
963         for(col = 0;count + col < length && col < 16; col++){
964             if (col != 0 && (col % 4) == 0)
965                 pBuf += sprintf( pBuf, " " );
966             pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
967         }
968         while(col++ < 16){      /* pad end of buffer with blanks */
969             if ((col % 4) == 0)
970                 sprintf( pBuf, " " );
971             pBuf += sprintf( pBuf, "   " );
972         }
973         pBuf += sprintf( pBuf, "  " );
974         for(col = 0;count + col < length && col < 16; col++){
975             if (isprint((int)cp[count + col]))
976                 pBuf += sprintf( pBuf, "%c", cp[count + col] );
977             else
978                 pBuf += sprintf( pBuf, "." );
979                 }
980         sprintf( pBuf, "\n" );
981         // SPrint(prntBuf);
982         printk(prntBuf);
983         count += col;
984         pBuf = prntBuf;
985     }
986
987 }  /* close xdump(... */
988 #endif /* CONFIG_ATM_IA_DEBUG */
989
990   
991 static struct atm_dev *ia_boards = NULL;  
992   
993 #define ACTUAL_RAM_BASE \
994         RAM_BASE*((iadev->mem)/(128 * 1024))  
995 #define ACTUAL_SEG_RAM_BASE \
996         IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))  
997 #define ACTUAL_REASS_RAM_BASE \
998         IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))  
999   
1000   
1001 /*-- some utilities and memory allocation stuff will come here -------------*/  
1002   
1003 static void desc_dbg(IADEV *iadev) {
1004
1005   u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1006   u32 i;
1007   void __iomem *tmp;
1008   // regval = readl((u32)ia_cmds->maddr);
1009   tcq_wr_ptr =  readw(iadev->seg_reg+TCQ_WR_PTR);
1010   printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1011                      tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1012                      readw(iadev->seg_ram+tcq_wr_ptr-2));
1013   printk(" host_tcq_wr = 0x%x  host_tcq_rd = 0x%x \n",  iadev->host_tcq_wr, 
1014                    iadev->ffL.tcq_rd);
1015   tcq_st_ptr =  readw(iadev->seg_reg+TCQ_ST_ADR);
1016   tcq_ed_ptr =  readw(iadev->seg_reg+TCQ_ED_ADR);
1017   printk("tcq_st_ptr = 0x%x    tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1018   i = 0;
1019   while (tcq_st_ptr != tcq_ed_ptr) {
1020       tmp = iadev->seg_ram+tcq_st_ptr;
1021       printk("TCQ slot %d desc = %d  Addr = %p\n", i++, readw(tmp), tmp);
1022       tcq_st_ptr += 2;
1023   }
1024   for(i=0; i <iadev->num_tx_desc; i++)
1025       printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1026
1027   
1028   
1029 /*----------------------------- Recieving side stuff --------------------------*/  
1030  
1031 static void rx_excp_rcvd(struct atm_dev *dev)  
1032 {  
1033 #if 0 /* closing the receiving size will cause too many excp int */  
1034   IADEV *iadev;  
1035   u_short state;  
1036   u_short excpq_rd_ptr;  
1037   //u_short *ptr;  
1038   int vci, error = 1;  
1039   iadev = INPH_IA_DEV(dev);  
1040   state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1041   while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)  
1042   { printk("state = %x \n", state); 
1043         excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;  
1044  printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr); 
1045         if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1046             IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1047         // TODO: update exception stat
1048         vci = readw(iadev->reass_ram+excpq_rd_ptr);  
1049         error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;  
1050         // pwang_test
1051         excpq_rd_ptr += 4;  
1052         if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))  
1053             excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1054         writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);  
1055         state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1056   }  
1057 #endif
1058 }  
1059   
1060 static void free_desc(struct atm_dev *dev, int desc)  
1061 {  
1062         IADEV *iadev;  
1063         iadev = INPH_IA_DEV(dev);  
1064         writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr); 
1065         iadev->rfL.fdq_wr +=2;
1066         if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1067                 iadev->rfL.fdq_wr =  iadev->rfL.fdq_st;  
1068         writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);  
1069 }  
1070   
1071   
1072 static int rx_pkt(struct atm_dev *dev)  
1073 {  
1074         IADEV *iadev;  
1075         struct atm_vcc *vcc;  
1076         unsigned short status;  
1077         struct rx_buf_desc __iomem *buf_desc_ptr;  
1078         int desc;   
1079         struct dle* wr_ptr;  
1080         int len;  
1081         struct sk_buff *skb;  
1082         u_int buf_addr, dma_addr;  
1083
1084         iadev = INPH_IA_DEV(dev);  
1085         if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff)) 
1086         {  
1087             printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);  
1088             return -EINVAL;  
1089         }  
1090         /* mask 1st 3 bits to get the actual descno. */  
1091         desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;  
1092         IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n", 
1093                                     iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1094               printk(" pcq_wr_ptr = 0x%x\n",
1095                                readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1096         /* update the read pointer  - maybe we shud do this in the end*/  
1097         if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed) 
1098                 iadev->rfL.pcq_rd = iadev->rfL.pcq_st;  
1099         else  
1100                 iadev->rfL.pcq_rd += 2;
1101         writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);  
1102   
1103         /* get the buffer desc entry.  
1104                 update stuff. - doesn't seem to be any update necessary  
1105         */  
1106         buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1107         /* make the ptr point to the corresponding buffer desc entry */  
1108         buf_desc_ptr += desc;     
1109         if (!desc || (desc > iadev->num_rx_desc) || 
1110                       ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) { 
1111             free_desc(dev, desc);
1112             IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1113             return -1;
1114         }
1115         vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];  
1116         if (!vcc)  
1117         {      
1118                 free_desc(dev, desc); 
1119                 printk("IA: null vcc, drop PDU\n");  
1120                 return -1;  
1121         }  
1122           
1123   
1124         /* might want to check the status bits for errors */  
1125         status = (u_short) (buf_desc_ptr->desc_mode);  
1126         if (status & (RX_CER | RX_PTE | RX_OFL))  
1127         {  
1128                 atomic_inc(&vcc->stats->rx_err);
1129                 IF_ERR(printk("IA: bad packet, dropping it");)  
1130                 if (status & RX_CER) { 
1131                     IF_ERR(printk(" cause: packet CRC error\n");)
1132                 }
1133                 else if (status & RX_PTE) {
1134                     IF_ERR(printk(" cause: packet time out\n");)
1135                 }
1136                 else {
1137                     IF_ERR(printk(" cause: buffer over flow\n");)
1138                 }
1139                 goto out_free_desc;
1140         }  
1141   
1142         /*  
1143                 build DLE.        
1144         */  
1145   
1146         buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;  
1147         dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;  
1148         len = dma_addr - buf_addr;  
1149         if (len > iadev->rx_buf_sz) {
1150            printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1151            atomic_inc(&vcc->stats->rx_err);
1152            goto out_free_desc;
1153         }
1154                   
1155         if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1156            if (vcc->vci < 32)
1157               printk("Drop control packets\n");
1158               goto out_free_desc;
1159         }
1160         skb_put(skb,len);  
1161         // pwang_test
1162         ATM_SKB(skb)->vcc = vcc;
1163         ATM_DESC(skb) = desc;        
1164         skb_queue_tail(&iadev->rx_dma_q, skb);  
1165
1166         /* Build the DLE structure */  
1167         wr_ptr = iadev->rx_dle_q.write;  
1168         wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
1169                 len, PCI_DMA_FROMDEVICE);
1170         wr_ptr->local_pkt_addr = buf_addr;  
1171         wr_ptr->bytes = len;    /* We don't know this do we ?? */  
1172         wr_ptr->mode = DMA_INT_ENABLE;  
1173   
1174         /* shud take care of wrap around here too. */  
1175         if(++wr_ptr == iadev->rx_dle_q.end)
1176              wr_ptr = iadev->rx_dle_q.start;
1177         iadev->rx_dle_q.write = wr_ptr;  
1178         udelay(1);  
1179         /* Increment transaction counter */  
1180         writel(1, iadev->dma+IPHASE5575_RX_COUNTER);   
1181 out:    return 0;  
1182 out_free_desc:
1183         free_desc(dev, desc);
1184         goto out;
1185 }  
1186   
1187 static void rx_intr(struct atm_dev *dev)  
1188 {  
1189   IADEV *iadev;  
1190   u_short status;  
1191   u_short state, i;  
1192   
1193   iadev = INPH_IA_DEV(dev);  
1194   status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;  
1195   IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1196   if (status & RX_PKT_RCVD)  
1197   {  
1198         /* do something */  
1199         /* Basically recvd an interrupt for receving a packet.  
1200         A descriptor would have been written to the packet complete   
1201         queue. Get all the descriptors and set up dma to move the   
1202         packets till the packet complete queue is empty..  
1203         */  
1204         state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1205         IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);) 
1206         while(!(state & PCQ_EMPTY))  
1207         {  
1208              rx_pkt(dev);  
1209              state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1210         }  
1211         iadev->rxing = 1;
1212   }  
1213   if (status & RX_FREEQ_EMPT)  
1214   {   
1215      if (iadev->rxing) {
1216         iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1217         iadev->rx_tmp_jif = jiffies; 
1218         iadev->rxing = 0;
1219      } 
1220      else if ((time_after(jiffies, iadev->rx_tmp_jif + 50)) &&
1221                ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1222         for (i = 1; i <= iadev->num_rx_desc; i++)
1223                free_desc(dev, i);
1224 printk("Test logic RUN!!!!\n");
1225         writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1226         iadev->rxing = 1;
1227      }
1228      IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)  
1229   }  
1230
1231   if (status & RX_EXCP_RCVD)  
1232   {  
1233         /* probably need to handle the exception queue also. */  
1234         IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)  
1235         rx_excp_rcvd(dev);  
1236   }  
1237
1238
1239   if (status & RX_RAW_RCVD)  
1240   {  
1241         /* need to handle the raw incoming cells. This deepnds on   
1242         whether we have programmed to receive the raw cells or not.  
1243         Else ignore. */  
1244         IF_EVENT(printk("Rx intr status:  RX_RAW_RCVD %08x\n", status);)  
1245   }  
1246 }  
1247   
1248   
1249 static void rx_dle_intr(struct atm_dev *dev)  
1250 {  
1251   IADEV *iadev;  
1252   struct atm_vcc *vcc;   
1253   struct sk_buff *skb;  
1254   int desc;  
1255   u_short state;   
1256   struct dle *dle, *cur_dle;  
1257   u_int dle_lp;  
1258   int len;
1259   iadev = INPH_IA_DEV(dev);  
1260  
1261   /* free all the dles done, that is just update our own dle read pointer   
1262         - do we really need to do this. Think not. */  
1263   /* DMA is done, just get all the recevie buffers from the rx dma queue  
1264         and push them up to the higher layer protocol. Also free the desc  
1265         associated with the buffer. */  
1266   dle = iadev->rx_dle_q.read;  
1267   dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);  
1268   cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));  
1269   while(dle != cur_dle)  
1270   {  
1271       /* free the DMAed skb */  
1272       skb = skb_dequeue(&iadev->rx_dma_q);  
1273       if (!skb)  
1274          goto INCR_DLE;
1275       desc = ATM_DESC(skb);
1276       free_desc(dev, desc);  
1277                
1278       if (!(len = skb->len))
1279       {  
1280           printk("rx_dle_intr: skb len 0\n");  
1281           dev_kfree_skb_any(skb);  
1282       }  
1283       else  
1284       {  
1285           struct cpcs_trailer *trailer;
1286           u_short length;
1287           struct ia_vcc *ia_vcc;
1288
1289           pci_unmap_single(iadev->pci, iadev->rx_dle_q.write->sys_pkt_addr,
1290                 len, PCI_DMA_FROMDEVICE);
1291           /* no VCC related housekeeping done as yet. lets see */  
1292           vcc = ATM_SKB(skb)->vcc;
1293           if (!vcc) {
1294               printk("IA: null vcc\n");  
1295               dev_kfree_skb_any(skb);
1296               goto INCR_DLE;
1297           }
1298           ia_vcc = INPH_IA_VCC(vcc);
1299           if (ia_vcc == NULL)
1300           {
1301              atomic_inc(&vcc->stats->rx_err);
1302              dev_kfree_skb_any(skb);
1303              atm_return(vcc, atm_guess_pdu2truesize(len));
1304              goto INCR_DLE;
1305            }
1306           // get real pkt length  pwang_test
1307           trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1308                                  skb->len - sizeof(*trailer));
1309           length =  swap(trailer->length);
1310           if ((length > iadev->rx_buf_sz) || (length > 
1311                               (skb->len - sizeof(struct cpcs_trailer))))
1312           {
1313              atomic_inc(&vcc->stats->rx_err);
1314              IF_ERR(printk("rx_dle_intr: Bad  AAL5 trailer %d (skb len %d)", 
1315                                                             length, skb->len);)
1316              dev_kfree_skb_any(skb);
1317              atm_return(vcc, atm_guess_pdu2truesize(len));
1318              goto INCR_DLE;
1319           }
1320           skb_trim(skb, length);
1321           
1322           /* Display the packet */  
1323           IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);  
1324           xdump(skb->data, skb->len, "RX: ");
1325           printk("\n");)
1326
1327           IF_RX(printk("rx_dle_intr: skb push");)  
1328           vcc->push(vcc,skb);  
1329           atomic_inc(&vcc->stats->rx);
1330           iadev->rx_pkt_cnt++;
1331       }  
1332 INCR_DLE:
1333       if (++dle == iadev->rx_dle_q.end)  
1334           dle = iadev->rx_dle_q.start;  
1335   }  
1336   iadev->rx_dle_q.read = dle;  
1337   
1338   /* if the interrupts are masked because there were no free desc available,  
1339                 unmask them now. */ 
1340   if (!iadev->rxing) {
1341      state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1342      if (!(state & FREEQ_EMPTY)) {
1343         state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1344         writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1345                                       iadev->reass_reg+REASS_MASK_REG);
1346         iadev->rxing++; 
1347      }
1348   }
1349 }  
1350   
1351   
1352 static int open_rx(struct atm_vcc *vcc)  
1353 {  
1354         IADEV *iadev;  
1355         u_short __iomem *vc_table;  
1356         u_short __iomem *reass_ptr;  
1357         IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1358
1359         if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;    
1360         iadev = INPH_IA_DEV(vcc->dev);  
1361         if (vcc->qos.rxtp.traffic_class == ATM_ABR) {  
1362            if (iadev->phy_type & FE_25MBIT_PHY) {
1363                printk("IA:  ABR not support\n");
1364                return -EINVAL; 
1365            }
1366         }
1367         /* Make only this VCI in the vc table valid and let all   
1368                 others be invalid entries */  
1369         vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
1370         vc_table += vcc->vci;
1371         /* mask the last 6 bits and OR it with 3 for 1K VCs */  
1372
1373         *vc_table = vcc->vci << 6;
1374         /* Also keep a list of open rx vcs so that we can attach them with  
1375                 incoming PDUs later. */  
1376         if ((vcc->qos.rxtp.traffic_class == ATM_ABR) || 
1377                                 (vcc->qos.txtp.traffic_class == ATM_ABR))  
1378         {  
1379                 srv_cls_param_t srv_p;
1380                 init_abr_vc(iadev, &srv_p);
1381                 ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1382         } 
1383         else {  /* for UBR  later may need to add CBR logic */
1384                 reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
1385                 reass_ptr += vcc->vci;
1386                 *reass_ptr = NO_AAL5_PKT;
1387         }
1388         
1389         if (iadev->rx_open[vcc->vci])  
1390                 printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",  
1391                         vcc->dev->number, vcc->vci);  
1392         iadev->rx_open[vcc->vci] = vcc;  
1393         return 0;  
1394 }  
1395   
1396 static int rx_init(struct atm_dev *dev)  
1397 {  
1398         IADEV *iadev;  
1399         struct rx_buf_desc __iomem *buf_desc_ptr;  
1400         unsigned long rx_pkt_start = 0;  
1401         void *dle_addr;  
1402         struct abr_vc_table  *abr_vc_table; 
1403         u16 *vc_table;  
1404         u16 *reass_table;  
1405         int i,j, vcsize_sel;  
1406         u_short freeq_st_adr;  
1407         u_short *freeq_start;  
1408   
1409         iadev = INPH_IA_DEV(dev);  
1410   //    spin_lock_init(&iadev->rx_lock); 
1411   
1412         /* Allocate 4k bytes - more aligned than needed (4k boundary) */
1413         dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1414                                         &iadev->rx_dle_dma);  
1415         if (!dle_addr)  {  
1416                 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1417                 goto err_out;
1418         }
1419         iadev->rx_dle_q.start = (struct dle *)dle_addr;
1420         iadev->rx_dle_q.read = iadev->rx_dle_q.start;  
1421         iadev->rx_dle_q.write = iadev->rx_dle_q.start;  
1422         iadev->rx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1423         /* the end of the dle q points to the entry after the last  
1424         DLE that can be used. */  
1425   
1426         /* write the upper 20 bits of the start address to rx list address register */  
1427         /* We know this is 32bit bus addressed so the following is safe */
1428         writel(iadev->rx_dle_dma & 0xfffff000,
1429                iadev->dma + IPHASE5575_RX_LIST_ADDR);  
1430         IF_INIT(printk("Tx Dle list addr: 0x%08x value: 0x%0x\n", 
1431                       (u32)(iadev->dma+IPHASE5575_TX_LIST_ADDR), 
1432                       *(u32*)(iadev->dma+IPHASE5575_TX_LIST_ADDR));  
1433         printk("Rx Dle list addr: 0x%08x value: 0x%0x\n", 
1434                       (u32)(iadev->dma+IPHASE5575_RX_LIST_ADDR), 
1435                       *(u32*)(iadev->dma+IPHASE5575_RX_LIST_ADDR));)  
1436   
1437         writew(0xffff, iadev->reass_reg+REASS_MASK_REG);  
1438         writew(0, iadev->reass_reg+MODE_REG);  
1439         writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);  
1440   
1441         /* Receive side control memory map  
1442            -------------------------------  
1443   
1444                 Buffer descr    0x0000 (736 - 23K)  
1445                 VP Table        0x5c00 (256 - 512)  
1446                 Except q        0x5e00 (128 - 512)  
1447                 Free buffer q   0x6000 (1K - 2K)  
1448                 Packet comp q   0x6800 (1K - 2K)  
1449                 Reass Table     0x7000 (1K - 2K)  
1450                 VC Table        0x7800 (1K - 2K)  
1451                 ABR VC Table    0x8000 (1K - 32K)  
1452         */  
1453           
1454         /* Base address for Buffer Descriptor Table */  
1455         writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);  
1456         /* Set the buffer size register */  
1457         writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);  
1458   
1459         /* Initialize each entry in the Buffer Descriptor Table */  
1460         iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1461         buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1462         memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1463         buf_desc_ptr++;  
1464         rx_pkt_start = iadev->rx_pkt_ram;  
1465         for(i=1; i<=iadev->num_rx_desc; i++)  
1466         {  
1467                 memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1468                 buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;  
1469                 buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;  
1470                 buf_desc_ptr++;           
1471                 rx_pkt_start += iadev->rx_buf_sz;  
1472         }  
1473         IF_INIT(printk("Rx Buffer desc ptr: 0x%0x\n", (u32)(buf_desc_ptr));)  
1474         i = FREE_BUF_DESC_Q*iadev->memSize; 
1475         writew(i >> 16,  iadev->reass_reg+REASS_QUEUE_BASE); 
1476         writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1477         writew(i+iadev->num_rx_desc*sizeof(u_short), 
1478                                          iadev->reass_reg+FREEQ_ED_ADR);
1479         writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1480         writew(i+iadev->num_rx_desc*sizeof(u_short), 
1481                                         iadev->reass_reg+FREEQ_WR_PTR);    
1482         /* Fill the FREEQ with all the free descriptors. */  
1483         freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);  
1484         freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);  
1485         for(i=1; i<=iadev->num_rx_desc; i++)  
1486         {  
1487                 *freeq_start = (u_short)i;  
1488                 freeq_start++;  
1489         }  
1490         IF_INIT(printk("freeq_start: 0x%0x\n", (u32)freeq_start);)  
1491         /* Packet Complete Queue */
1492         i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1493         writew(i, iadev->reass_reg+PCQ_ST_ADR);
1494         writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1495         writew(i, iadev->reass_reg+PCQ_RD_PTR);
1496         writew(i, iadev->reass_reg+PCQ_WR_PTR);
1497
1498         /* Exception Queue */
1499         i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1500         writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1501         writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q), 
1502                                              iadev->reass_reg+EXCP_Q_ED_ADR);
1503         writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1504         writew(i, iadev->reass_reg+EXCP_Q_WR_PTR); 
1505  
1506         /* Load local copy of FREEQ and PCQ ptrs */
1507         iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1508         iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1509         iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1510         iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1511         iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1512         iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1513         iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1514         iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1515         
1516         IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x", 
1517               iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd, 
1518               iadev->rfL.pcq_wr);)                
1519         /* just for check - no VP TBL */  
1520         /* VP Table */  
1521         /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */  
1522         /* initialize VP Table for invalid VPIs  
1523                 - I guess we can write all 1s or 0x000f in the entire memory  
1524                   space or something similar.  
1525         */  
1526   
1527         /* This seems to work and looks right to me too !!! */  
1528         i =  REASS_TABLE * iadev->memSize;
1529         writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);   
1530         /* initialize Reassembly table to I don't know what ???? */  
1531         reass_table = (u16 *)(iadev->reass_ram+i);  
1532         j = REASS_TABLE_SZ * iadev->memSize;
1533         for(i=0; i < j; i++)  
1534                 *reass_table++ = NO_AAL5_PKT;  
1535        i = 8*1024;
1536        vcsize_sel =  0;
1537        while (i != iadev->num_vc) {
1538           i /= 2;
1539           vcsize_sel++;
1540        }
1541        i = RX_VC_TABLE * iadev->memSize;
1542        writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1543        vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);  
1544         j = RX_VC_TABLE_SZ * iadev->memSize;
1545         for(i = 0; i < j; i++)  
1546         {  
1547                 /* shift the reassembly pointer by 3 + lower 3 bits of   
1548                 vc_lkup_base register (=3 for 1K VCs) and the last byte   
1549                 is those low 3 bits.   
1550                 Shall program this later.  
1551                 */  
1552                 *vc_table = (i << 6) | 15;      /* for invalid VCI */  
1553                 vc_table++;  
1554         }  
1555         /* ABR VC table */
1556         i =  ABR_VC_TABLE * iadev->memSize;
1557         writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1558                    
1559         i = ABR_VC_TABLE * iadev->memSize;
1560         abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);  
1561         j = REASS_TABLE_SZ * iadev->memSize;
1562         memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1563         for(i = 0; i < j; i++) {                
1564                 abr_vc_table->rdf = 0x0003;
1565                 abr_vc_table->air = 0x5eb1;
1566                 abr_vc_table++;         
1567         }  
1568
1569         /* Initialize other registers */  
1570   
1571         /* VP Filter Register set for VC Reassembly only */  
1572         writew(0xff00, iadev->reass_reg+VP_FILTER);  
1573         writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1574         writew(0x1,  iadev->reass_reg+PROTOCOL_ID);
1575
1576         /* Packet Timeout Count  related Registers : 
1577            Set packet timeout to occur in about 3 seconds
1578            Set Packet Aging Interval count register to overflow in about 4 us
1579         */  
1580         writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1581
1582         i = (j >> 6) & 0xFF;
1583         j += 2 * (j - 1);
1584         i |= ((j << 2) & 0xFF00);
1585         writew(i, iadev->reass_reg+TMOUT_RANGE);
1586
1587         /* initiate the desc_tble */
1588         for(i=0; i<iadev->num_tx_desc;i++)
1589             iadev->desc_tbl[i].timestamp = 0;
1590
1591         /* to clear the interrupt status register - read it */  
1592         readw(iadev->reass_reg+REASS_INTR_STATUS_REG);   
1593   
1594         /* Mask Register - clear it */  
1595         writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);  
1596   
1597         skb_queue_head_init(&iadev->rx_dma_q);  
1598         iadev->rx_free_desc_qhead = NULL;   
1599
1600         iadev->rx_open = kzalloc(4 * iadev->num_vc, GFP_KERNEL);
1601         if (!iadev->rx_open) {
1602                 printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1603                 dev->number);  
1604                 goto err_free_dle;
1605         }  
1606
1607         iadev->rxing = 1;
1608         iadev->rx_pkt_cnt = 0;
1609         /* Mode Register */  
1610         writew(R_ONLINE, iadev->reass_reg+MODE_REG);  
1611         return 0;  
1612
1613 err_free_dle:
1614         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1615                             iadev->rx_dle_dma);  
1616 err_out:
1617         return -ENOMEM;
1618 }  
1619   
1620
1621 /*  
1622         The memory map suggested in appendix A and the coding for it.   
1623         Keeping it around just in case we change our mind later.  
1624   
1625                 Buffer descr    0x0000 (128 - 4K)  
1626                 UBR sched       0x1000 (1K - 4K)  
1627                 UBR Wait q      0x2000 (1K - 4K)  
1628                 Commn queues    0x3000 Packet Ready, Trasmit comp(0x3100)  
1629                                         (128 - 256) each  
1630                 extended VC     0x4000 (1K - 8K)  
1631                 ABR sched       0x6000  and ABR wait queue (1K - 2K) each  
1632                 CBR sched       0x7000 (as needed)  
1633                 VC table        0x8000 (1K - 32K)  
1634 */  
1635   
1636 static void tx_intr(struct atm_dev *dev)  
1637 {  
1638         IADEV *iadev;  
1639         unsigned short status;  
1640         unsigned long flags;
1641
1642         iadev = INPH_IA_DEV(dev);  
1643   
1644         status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);  
1645         if (status & TRANSMIT_DONE){
1646
1647            IF_EVENT(printk("Tansmit Done Intr logic run\n");)
1648            spin_lock_irqsave(&iadev->tx_lock, flags);
1649            ia_tx_poll(iadev);
1650            spin_unlock_irqrestore(&iadev->tx_lock, flags);
1651            writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1652            if (iadev->close_pending)  
1653                wake_up(&iadev->close_wait);
1654         }         
1655         if (status & TCQ_NOT_EMPTY)  
1656         {  
1657             IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)  
1658         }  
1659 }  
1660   
1661 static void tx_dle_intr(struct atm_dev *dev)
1662 {
1663         IADEV *iadev;
1664         struct dle *dle, *cur_dle; 
1665         struct sk_buff *skb;
1666         struct atm_vcc *vcc;
1667         struct ia_vcc  *iavcc;
1668         u_int dle_lp;
1669         unsigned long flags;
1670
1671         iadev = INPH_IA_DEV(dev);
1672         spin_lock_irqsave(&iadev->tx_lock, flags);   
1673         dle = iadev->tx_dle_q.read;
1674         dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) & 
1675                                         (sizeof(struct dle)*DLE_ENTRIES - 1);
1676         cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1677         while (dle != cur_dle)
1678         {
1679             /* free the DMAed skb */ 
1680             skb = skb_dequeue(&iadev->tx_dma_q); 
1681             if (!skb) break;
1682
1683             /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1684             if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1685                 pci_unmap_single(iadev->pci, dle->sys_pkt_addr, skb->len,
1686                                  PCI_DMA_TODEVICE);
1687             }
1688             vcc = ATM_SKB(skb)->vcc;
1689             if (!vcc) {
1690                   printk("tx_dle_intr: vcc is null\n");
1691                   spin_unlock_irqrestore(&iadev->tx_lock, flags);
1692                   dev_kfree_skb_any(skb);
1693
1694                   return;
1695             }
1696             iavcc = INPH_IA_VCC(vcc);
1697             if (!iavcc) {
1698                   printk("tx_dle_intr: iavcc is null\n");
1699                   spin_unlock_irqrestore(&iadev->tx_lock, flags);
1700                   dev_kfree_skb_any(skb);
1701                   return;
1702             }
1703             if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1704                if ((vcc->pop) && (skb->len != 0))
1705                {     
1706                  vcc->pop(vcc, skb);
1707                } 
1708                else {
1709                  dev_kfree_skb_any(skb);
1710                }
1711             }
1712             else { /* Hold the rate-limited skb for flow control */
1713                IA_SKB_STATE(skb) |= IA_DLED;
1714                skb_queue_tail(&iavcc->txing_skb, skb);
1715             }
1716             IF_EVENT(printk("tx_dle_intr: enque skb = 0x%x \n", (u32)skb);)
1717             if (++dle == iadev->tx_dle_q.end)
1718                  dle = iadev->tx_dle_q.start;
1719         }
1720         iadev->tx_dle_q.read = dle;
1721         spin_unlock_irqrestore(&iadev->tx_lock, flags);
1722 }
1723   
1724 static int open_tx(struct atm_vcc *vcc)  
1725 {  
1726         struct ia_vcc *ia_vcc;  
1727         IADEV *iadev;  
1728         struct main_vc *vc;  
1729         struct ext_vc *evc;  
1730         int ret;
1731         IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)  
1732         if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;  
1733         iadev = INPH_IA_DEV(vcc->dev);  
1734         
1735         if (iadev->phy_type & FE_25MBIT_PHY) {
1736            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1737                printk("IA:  ABR not support\n");
1738                return -EINVAL; 
1739            }
1740           if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1741                printk("IA:  CBR not support\n");
1742                return -EINVAL; 
1743           }
1744         }
1745         ia_vcc =  INPH_IA_VCC(vcc);
1746         memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1747         if (vcc->qos.txtp.max_sdu > 
1748                          (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1749            printk("IA:  SDU size over (%d) the configured SDU size %d\n",
1750                   vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1751            vcc->dev_data = NULL;
1752            kfree(ia_vcc);
1753            return -EINVAL; 
1754         }
1755         ia_vcc->vc_desc_cnt = 0;
1756         ia_vcc->txing = 1;
1757
1758         /* find pcr */
1759         if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR) 
1760            vcc->qos.txtp.pcr = iadev->LineRate;
1761         else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1762            vcc->qos.txtp.pcr = iadev->LineRate;
1763         else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0)) 
1764            vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1765         if (vcc->qos.txtp.pcr > iadev->LineRate)
1766              vcc->qos.txtp.pcr = iadev->LineRate;
1767         ia_vcc->pcr = vcc->qos.txtp.pcr;
1768
1769         if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1770         else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1771         else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1772         else ia_vcc->ltimeout = 2700 * HZ  / ia_vcc->pcr;
1773         if (ia_vcc->pcr < iadev->rate_limit)
1774            skb_queue_head_init (&ia_vcc->txing_skb);
1775         if (ia_vcc->pcr < iadev->rate_limit) {
1776            struct sock *sk = sk_atm(vcc);
1777
1778            if (vcc->qos.txtp.max_sdu != 0) {
1779                if (ia_vcc->pcr > 60000)
1780                   sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1781                else if (ia_vcc->pcr > 2000)
1782                   sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1783                else
1784                  sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1785            }
1786            else
1787              sk->sk_sndbuf = 24576;
1788         }
1789            
1790         vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;  
1791         evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;  
1792         vc += vcc->vci;  
1793         evc += vcc->vci;  
1794         memset((caddr_t)vc, 0, sizeof(*vc));  
1795         memset((caddr_t)evc, 0, sizeof(*evc));  
1796           
1797         /* store the most significant 4 bits of vci as the last 4 bits   
1798                 of first part of atm header.  
1799            store the last 12 bits of vci as first 12 bits of the second  
1800                 part of the atm header.  
1801         */  
1802         evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;  
1803         evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;  
1804  
1805         /* check the following for different traffic classes */  
1806         if (vcc->qos.txtp.traffic_class == ATM_UBR)  
1807         {  
1808                 vc->type = UBR;  
1809                 vc->status = CRC_APPEND;
1810                 vc->acr = cellrate_to_float(iadev->LineRate);  
1811                 if (vcc->qos.txtp.pcr > 0) 
1812                    vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);  
1813                 IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n", 
1814                                              vcc->qos.txtp.max_pcr,vc->acr);)
1815         }  
1816         else if (vcc->qos.txtp.traffic_class == ATM_ABR)  
1817         {       srv_cls_param_t srv_p;
1818                 IF_ABR(printk("Tx ABR VCC\n");)  
1819                 init_abr_vc(iadev, &srv_p);
1820                 if (vcc->qos.txtp.pcr > 0) 
1821                    srv_p.pcr = vcc->qos.txtp.pcr;
1822                 if (vcc->qos.txtp.min_pcr > 0) {
1823                    int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1824                    if (tmpsum > iadev->LineRate)
1825                        return -EBUSY;
1826                    srv_p.mcr = vcc->qos.txtp.min_pcr;
1827                    iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1828                 } 
1829                 else srv_p.mcr = 0;
1830                 if (vcc->qos.txtp.icr)
1831                    srv_p.icr = vcc->qos.txtp.icr;
1832                 if (vcc->qos.txtp.tbe)
1833                    srv_p.tbe = vcc->qos.txtp.tbe;
1834                 if (vcc->qos.txtp.frtt)
1835                    srv_p.frtt = vcc->qos.txtp.frtt;
1836                 if (vcc->qos.txtp.rif)
1837                    srv_p.rif = vcc->qos.txtp.rif;
1838                 if (vcc->qos.txtp.rdf)
1839                    srv_p.rdf = vcc->qos.txtp.rdf;
1840                 if (vcc->qos.txtp.nrm_pres)
1841                    srv_p.nrm = vcc->qos.txtp.nrm;
1842                 if (vcc->qos.txtp.trm_pres)
1843                    srv_p.trm = vcc->qos.txtp.trm;
1844                 if (vcc->qos.txtp.adtf_pres)
1845                    srv_p.adtf = vcc->qos.txtp.adtf;
1846                 if (vcc->qos.txtp.cdf_pres)
1847                    srv_p.cdf = vcc->qos.txtp.cdf;    
1848                 if (srv_p.icr > srv_p.pcr)
1849                    srv_p.icr = srv_p.pcr;    
1850                 IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d  mcr = %d\n", 
1851                                                       srv_p.pcr, srv_p.mcr);)
1852                 ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1853         } else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1854                 if (iadev->phy_type & FE_25MBIT_PHY) {
1855                     printk("IA:  CBR not support\n");
1856                     return -EINVAL; 
1857                 }
1858                 if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1859                    IF_CBR(printk("PCR is not availble\n");)
1860                    return -1;
1861                 }
1862                 vc->type = CBR;
1863                 vc->status = CRC_APPEND;
1864                 if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {     
1865                     return ret;
1866                 }
1867        } 
1868         else  
1869            printk("iadev:  Non UBR, ABR and CBR traffic not supportedn"); 
1870         
1871         iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1872         IF_EVENT(printk("ia open_tx returning \n");)  
1873         return 0;  
1874 }  
1875   
1876   
1877 static int tx_init(struct atm_dev *dev)  
1878 {  
1879         IADEV *iadev;  
1880         struct tx_buf_desc *buf_desc_ptr;
1881         unsigned int tx_pkt_start;  
1882         void *dle_addr;  
1883         int i;  
1884         u_short tcq_st_adr;  
1885         u_short *tcq_start;  
1886         u_short prq_st_adr;  
1887         u_short *prq_start;  
1888         struct main_vc *vc;  
1889         struct ext_vc *evc;   
1890         u_short tmp16;
1891         u32 vcsize_sel;
1892  
1893         iadev = INPH_IA_DEV(dev);  
1894         spin_lock_init(&iadev->tx_lock);
1895  
1896         IF_INIT(printk("Tx MASK REG: 0x%0x\n", 
1897                                 readw(iadev->seg_reg+SEG_MASK_REG));)  
1898
1899         /* Allocate 4k (boundary aligned) bytes */
1900         dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1901                                         &iadev->tx_dle_dma);  
1902         if (!dle_addr)  {
1903                 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1904                 goto err_out;
1905         }
1906         iadev->tx_dle_q.start = (struct dle*)dle_addr;  
1907         iadev->tx_dle_q.read = iadev->tx_dle_q.start;  
1908         iadev->tx_dle_q.write = iadev->tx_dle_q.start;  
1909         iadev->tx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1910
1911         /* write the upper 20 bits of the start address to tx list address register */  
1912         writel(iadev->tx_dle_dma & 0xfffff000,
1913                iadev->dma + IPHASE5575_TX_LIST_ADDR);  
1914         writew(0xffff, iadev->seg_reg+SEG_MASK_REG);  
1915         writew(0, iadev->seg_reg+MODE_REG_0);  
1916         writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);  
1917         iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1918         iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1919         iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1920   
1921         /*  
1922            Transmit side control memory map  
1923            --------------------------------    
1924          Buffer descr   0x0000 (128 - 4K)  
1925          Commn queues   0x1000  Transmit comp, Packet ready(0x1400)   
1926                                         (512 - 1K) each  
1927                                         TCQ - 4K, PRQ - 5K  
1928          CBR Table      0x1800 (as needed) - 6K  
1929          UBR Table      0x3000 (1K - 4K) - 12K  
1930          UBR Wait queue 0x4000 (1K - 4K) - 16K  
1931          ABR sched      0x5000  and ABR wait queue (1K - 2K) each  
1932                                 ABR Tbl - 20K, ABR Wq - 22K   
1933          extended VC    0x6000 (1K - 8K) - 24K  
1934          VC Table       0x8000 (1K - 32K) - 32K  
1935           
1936         Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl  
1937         and Wait q, which can be allotted later.  
1938         */  
1939      
1940         /* Buffer Descriptor Table Base address */  
1941         writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);  
1942   
1943         /* initialize each entry in the buffer descriptor table */  
1944         buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);  
1945         memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1946         buf_desc_ptr++;  
1947         tx_pkt_start = TX_PACKET_RAM;  
1948         for(i=1; i<=iadev->num_tx_desc; i++)  
1949         {  
1950                 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1951                 buf_desc_ptr->desc_mode = AAL5;  
1952                 buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;  
1953                 buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;  
1954                 buf_desc_ptr++;           
1955                 tx_pkt_start += iadev->tx_buf_sz;  
1956         }  
1957         iadev->tx_buf = kmalloc(iadev->num_tx_desc*sizeof(struct cpcs_trailer_desc), GFP_KERNEL);
1958         if (!iadev->tx_buf) {
1959             printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1960             goto err_free_dle;
1961         }
1962         for (i= 0; i< iadev->num_tx_desc; i++)
1963         {
1964             struct cpcs_trailer *cpcs;
1965  
1966             cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
1967             if(!cpcs) {                
1968                 printk(KERN_ERR DEV_LABEL " couldn't get freepage\n"); 
1969                 goto err_free_tx_bufs;
1970             }
1971             iadev->tx_buf[i].cpcs = cpcs;
1972             iadev->tx_buf[i].dma_addr = pci_map_single(iadev->pci,
1973                 cpcs, sizeof(*cpcs), PCI_DMA_TODEVICE);
1974         }
1975         iadev->desc_tbl = kmalloc(iadev->num_tx_desc *
1976                                    sizeof(struct desc_tbl_t), GFP_KERNEL);
1977         if (!iadev->desc_tbl) {
1978                 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1979                 goto err_free_all_tx_bufs;
1980         }
1981   
1982         /* Communication Queues base address */  
1983         i = TX_COMP_Q * iadev->memSize;
1984         writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);  
1985   
1986         /* Transmit Complete Queue */  
1987         writew(i, iadev->seg_reg+TCQ_ST_ADR);  
1988         writew(i, iadev->seg_reg+TCQ_RD_PTR);  
1989         writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR); 
1990         iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
1991         writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
1992                                               iadev->seg_reg+TCQ_ED_ADR); 
1993         /* Fill the TCQ with all the free descriptors. */  
1994         tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);  
1995         tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);  
1996         for(i=1; i<=iadev->num_tx_desc; i++)  
1997         {  
1998                 *tcq_start = (u_short)i;  
1999                 tcq_start++;  
2000         }  
2001   
2002         /* Packet Ready Queue */  
2003         i = PKT_RDY_Q * iadev->memSize; 
2004         writew(i, iadev->seg_reg+PRQ_ST_ADR);  
2005         writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
2006                                               iadev->seg_reg+PRQ_ED_ADR);
2007         writew(i, iadev->seg_reg+PRQ_RD_PTR);  
2008         writew(i, iadev->seg_reg+PRQ_WR_PTR);  
2009          
2010         /* Load local copy of PRQ and TCQ ptrs */
2011         iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2012         iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2013         iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2014
2015         iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2016         iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2017         iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2018
2019         /* Just for safety initializing the queue to have desc 1 always */  
2020         /* Fill the PRQ with all the free descriptors. */  
2021         prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);  
2022         prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);  
2023         for(i=1; i<=iadev->num_tx_desc; i++)  
2024         {  
2025                 *prq_start = (u_short)0;        /* desc 1 in all entries */  
2026                 prq_start++;  
2027         }  
2028         /* CBR Table */  
2029         IF_INIT(printk("Start CBR Init\n");)
2030 #if 1  /* for 1K VC board, CBR_PTR_BASE is 0 */
2031         writew(0,iadev->seg_reg+CBR_PTR_BASE);
2032 #else /* Charlie's logic is wrong ? */
2033         tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2034         IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2035         writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2036 #endif
2037
2038         IF_INIT(printk("value in register = 0x%x\n",
2039                                    readw(iadev->seg_reg+CBR_PTR_BASE));)
2040         tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2041         writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2042         IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2043                                         readw(iadev->seg_reg+CBR_TAB_BEG));)
2044         writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2045         tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2046         writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2047         IF_INIT(printk("iadev->seg_reg = 0x%x CBR_PTR_BASE = 0x%x\n",
2048                (u32)iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2049         IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2050           readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2051           readw(iadev->seg_reg+CBR_TAB_END+1));)
2052
2053         /* Initialize the CBR Schedualing Table */
2054         memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize, 
2055                                                           0, iadev->num_vc*6); 
2056         iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2057         iadev->CbrEntryPt = 0;
2058         iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2059         iadev->NumEnabledCBR = 0;
2060
2061         /* UBR scheduling Table and wait queue */  
2062         /* initialize all bytes of UBR scheduler table and wait queue to 0   
2063                 - SCHEDSZ is 1K (# of entries).  
2064                 - UBR Table size is 4K  
2065                 - UBR wait queue is 4K  
2066            since the table and wait queues are contiguous, all the bytes   
2067            can be initialized by one memeset.  
2068         */  
2069         
2070         vcsize_sel = 0;
2071         i = 8*1024;
2072         while (i != iadev->num_vc) {
2073           i /= 2;
2074           vcsize_sel++;
2075         }
2076  
2077         i = MAIN_VC_TABLE * iadev->memSize;
2078         writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2079         i =  EXT_VC_TABLE * iadev->memSize;
2080         writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2081         i = UBR_SCHED_TABLE * iadev->memSize;
2082         writew((i & 0xffff) >> 11,  iadev->seg_reg+UBR_SBPTR_BASE);
2083         i = UBR_WAIT_Q * iadev->memSize; 
2084         writew((i >> 7) & 0xffff,  iadev->seg_reg+UBRWQ_BASE);
2085         memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2086                                                        0, iadev->num_vc*8);
2087         /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/  
2088         /* initialize all bytes of ABR scheduler table and wait queue to 0   
2089                 - SCHEDSZ is 1K (# of entries).  
2090                 - ABR Table size is 2K  
2091                 - ABR wait queue is 2K  
2092            since the table and wait queues are contiguous, all the bytes   
2093            can be intialized by one memeset.  
2094         */  
2095         i = ABR_SCHED_TABLE * iadev->memSize;
2096         writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2097         i = ABR_WAIT_Q * iadev->memSize;
2098         writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2099  
2100         i = ABR_SCHED_TABLE*iadev->memSize;
2101         memset((caddr_t)(iadev->seg_ram+i),  0, iadev->num_vc*4);
2102         vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;  
2103         evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;  
2104         iadev->testTable = kmalloc(sizeof(long)*iadev->num_vc, GFP_KERNEL); 
2105         if (!iadev->testTable) {
2106            printk("Get freepage  failed\n");
2107            goto err_free_desc_tbl;
2108         }
2109         for(i=0; i<iadev->num_vc; i++)  
2110         {  
2111                 memset((caddr_t)vc, 0, sizeof(*vc));  
2112                 memset((caddr_t)evc, 0, sizeof(*evc));  
2113                 iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2114                                                 GFP_KERNEL);
2115                 if (!iadev->testTable[i])
2116                         goto err_free_test_tables;
2117                 iadev->testTable[i]->lastTime = 0;
2118                 iadev->testTable[i]->fract = 0;
2119                 iadev->testTable[i]->vc_status = VC_UBR;
2120                 vc++;  
2121                 evc++;  
2122         }  
2123   
2124         /* Other Initialization */  
2125           
2126         /* Max Rate Register */  
2127         if (iadev->phy_type & FE_25MBIT_PHY) {
2128            writew(RATE25, iadev->seg_reg+MAXRATE);  
2129            writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
2130         }
2131         else {
2132            writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2133            writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
2134         }
2135         /* Set Idle Header Reigisters to be sure */  
2136         writew(0, iadev->seg_reg+IDLEHEADHI);  
2137         writew(0, iadev->seg_reg+IDLEHEADLO);  
2138   
2139         /* Program ABR UBR Priority Register  as  PRI_ABR_UBR_EQUAL */
2140         writew(0xaa00, iadev->seg_reg+ABRUBR_ARB); 
2141
2142         iadev->close_pending = 0;
2143         init_waitqueue_head(&iadev->close_wait);
2144         init_waitqueue_head(&iadev->timeout_wait);
2145         skb_queue_head_init(&iadev->tx_dma_q);  
2146         ia_init_rtn_q(&iadev->tx_return_q);  
2147
2148         /* RM Cell Protocol ID and Message Type */  
2149         writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);  
2150         skb_queue_head_init (&iadev->tx_backlog);
2151   
2152         /* Mode Register 1 */  
2153         writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);  
2154   
2155         /* Mode Register 0 */  
2156         writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);  
2157   
2158         /* Interrupt Status Register - read to clear */  
2159         readw(iadev->seg_reg+SEG_INTR_STATUS_REG);  
2160   
2161         /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */  
2162         writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2163         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);  
2164         iadev->tx_pkt_cnt = 0;
2165         iadev->rate_limit = iadev->LineRate / 3;
2166   
2167         return 0;
2168
2169 err_free_test_tables:
2170         while (--i >= 0)
2171                 kfree(iadev->testTable[i]);
2172         kfree(iadev->testTable);
2173 err_free_desc_tbl:
2174         kfree(iadev->desc_tbl);
2175 err_free_all_tx_bufs:
2176         i = iadev->num_tx_desc;
2177 err_free_tx_bufs:
2178         while (--i >= 0) {
2179                 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2180
2181                 pci_unmap_single(iadev->pci, desc->dma_addr,
2182                         sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2183                 kfree(desc->cpcs);
2184         }
2185         kfree(iadev->tx_buf);
2186 err_free_dle:
2187         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2188                             iadev->tx_dle_dma);  
2189 err_out:
2190         return -ENOMEM;
2191 }   
2192    
2193 static irqreturn_t ia_int(int irq, void *dev_id)  
2194 {  
2195    struct atm_dev *dev;  
2196    IADEV *iadev;  
2197    unsigned int status;  
2198    int handled = 0;
2199
2200    dev = dev_id;  
2201    iadev = INPH_IA_DEV(dev);  
2202    while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))  
2203    { 
2204         handled = 1;
2205         IF_EVENT(printk("ia_int: status = 0x%x\n", status);) 
2206         if (status & STAT_REASSINT)  
2207         {  
2208            /* do something */  
2209            IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);) 
2210            rx_intr(dev);  
2211         }  
2212         if (status & STAT_DLERINT)  
2213         {  
2214            /* Clear this bit by writing a 1 to it. */  
2215            *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLERINT;
2216            rx_dle_intr(dev);  
2217         }  
2218         if (status & STAT_SEGINT)  
2219         {  
2220            /* do something */ 
2221            IF_EVENT(printk("IA: tx_intr \n");) 
2222            tx_intr(dev);  
2223         }  
2224         if (status & STAT_DLETINT)  
2225         {  
2226            *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLETINT;  
2227            tx_dle_intr(dev);  
2228         }  
2229         if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))  
2230         {  
2231            if (status & STAT_FEINT) 
2232                IaFrontEndIntr(iadev);
2233         }  
2234    }
2235    return IRQ_RETVAL(handled);
2236 }  
2237           
2238           
2239           
2240 /*----------------------------- entries --------------------------------*/  
2241 static int get_esi(struct atm_dev *dev)  
2242 {  
2243         IADEV *iadev;  
2244         int i;  
2245         u32 mac1;  
2246         u16 mac2;  
2247           
2248         iadev = INPH_IA_DEV(dev);  
2249         mac1 = cpu_to_be32(le32_to_cpu(readl(  
2250                                 iadev->reg+IPHASE5575_MAC1)));  
2251         mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));  
2252         IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)  
2253         for (i=0; i<MAC1_LEN; i++)  
2254                 dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));  
2255           
2256         for (i=0; i<MAC2_LEN; i++)  
2257                 dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));  
2258         return 0;  
2259 }  
2260           
2261 static int reset_sar(struct atm_dev *dev)  
2262 {  
2263         IADEV *iadev;  
2264         int i, error = 1;  
2265         unsigned int pci[64];  
2266           
2267         iadev = INPH_IA_DEV(dev);  
2268         for(i=0; i<64; i++)  
2269           if ((error = pci_read_config_dword(iadev->pci,  
2270                                 i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)  
2271               return error;  
2272         writel(0, iadev->reg+IPHASE5575_EXT_RESET);  
2273         for(i=0; i<64; i++)  
2274           if ((error = pci_write_config_dword(iadev->pci,  
2275                                         i*4, pci[i])) != PCIBIOS_SUCCESSFUL)  
2276             return error;  
2277         udelay(5);  
2278         return 0;  
2279 }  
2280           
2281           
2282 static int __devinit ia_init(struct atm_dev *dev)
2283 {  
2284         IADEV *iadev;  
2285         unsigned long real_base;
2286         void __iomem *base;
2287         unsigned short command;  
2288         int error, i; 
2289           
2290         /* The device has been identified and registered. Now we read   
2291            necessary configuration info like memory base address,   
2292            interrupt number etc */  
2293           
2294         IF_INIT(printk(">ia_init\n");)  
2295         dev->ci_range.vpi_bits = 0;  
2296         dev->ci_range.vci_bits = NR_VCI_LD;  
2297
2298         iadev = INPH_IA_DEV(dev);  
2299         real_base = pci_resource_start (iadev->pci, 0);
2300         iadev->irq = iadev->pci->irq;
2301                   
2302         error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
2303         if (error) {
2304                 printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",  
2305                                 dev->number,error);  
2306                 return -EINVAL;  
2307         }  
2308         IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",  
2309                         dev->number, iadev->pci->revision, real_base, iadev->irq);)
2310           
2311         /* find mapping size of board */  
2312           
2313         iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2314
2315         if (iadev->pci_map_size == 0x100000){
2316           iadev->num_vc = 4096;
2317           dev->ci_range.vci_bits = NR_VCI_4K_LD;  
2318           iadev->memSize = 4;
2319         }
2320         else if (iadev->pci_map_size == 0x40000) {
2321           iadev->num_vc = 1024;
2322           iadev->memSize = 1;
2323         }
2324         else {
2325            printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2326            return -EINVAL;
2327         }
2328         IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)  
2329           
2330         /* enable bus mastering */
2331         pci_set_master(iadev->pci);
2332
2333         /*  
2334          * Delay at least 1us before doing any mem accesses (how 'bout 10?)  
2335          */  
2336         udelay(10);  
2337           
2338         /* mapping the physical address to a virtual address in address space */  
2339         base = ioremap(real_base,iadev->pci_map_size);  /* ioremap is not resolved ??? */  
2340           
2341         if (!base)  
2342         {  
2343                 printk(DEV_LABEL " (itf %d): can't set up page mapping\n",  
2344                             dev->number);  
2345                 return error;  
2346         }  
2347         IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",  
2348                         dev->number, iadev->pci->revision, base, iadev->irq);)
2349           
2350         /* filling the iphase dev structure */  
2351         iadev->mem = iadev->pci_map_size /2;  
2352         iadev->real_base = real_base;  
2353         iadev->base = base;  
2354                   
2355         /* Bus Interface Control Registers */  
2356         iadev->reg = base + REG_BASE;
2357         /* Segmentation Control Registers */  
2358         iadev->seg_reg = base + SEG_BASE;
2359         /* Reassembly Control Registers */  
2360         iadev->reass_reg = base + REASS_BASE;  
2361         /* Front end/ DMA control registers */  
2362         iadev->phy = base + PHY_BASE;  
2363         iadev->dma = base + PHY_BASE;  
2364         /* RAM - Segmentation RAm and Reassembly RAM */  
2365         iadev->ram = base + ACTUAL_RAM_BASE;  
2366         iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;  
2367         iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;  
2368   
2369         /* lets print out the above */  
2370         IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n", 
2371           iadev->reg,iadev->seg_reg,iadev->reass_reg, 
2372           iadev->phy, iadev->ram, iadev->seg_ram, 
2373           iadev->reass_ram);) 
2374           
2375         /* lets try reading the MAC address */  
2376         error = get_esi(dev);  
2377         if (error) {
2378           iounmap(iadev->base);
2379           return error;  
2380         }
2381         printk("IA: ");
2382         for (i=0; i < ESI_LEN; i++)  
2383                 printk("%s%02X",i ? "-" : "",dev->esi[i]);  
2384         printk("\n");  
2385   
2386         /* reset SAR */  
2387         if (reset_sar(dev)) {
2388            iounmap(iadev->base);
2389            printk("IA: reset SAR fail, please try again\n");
2390            return 1;
2391         }
2392         return 0;  
2393 }  
2394
2395 static void ia_update_stats(IADEV *iadev) {
2396     if (!iadev->carrier_detect)
2397         return;
2398     iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2399     iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2400     iadev->drop_rxpkt +=  readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2401     iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2402     iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2403     iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2404     return;
2405 }
2406   
2407 static void ia_led_timer(unsigned long arg) {
2408         unsigned long flags;
2409         static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2410         u_char i;
2411         static u32 ctrl_reg; 
2412         for (i = 0; i < iadev_count; i++) {
2413            if (ia_dev[i]) {
2414               ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2415               if (blinking[i] == 0) {
2416                  blinking[i]++;
2417                  ctrl_reg &= (~CTRL_LED);
2418                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2419                  ia_update_stats(ia_dev[i]);
2420               }
2421               else {
2422                  blinking[i] = 0;
2423                  ctrl_reg |= CTRL_LED;
2424                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2425                  spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2426                  if (ia_dev[i]->close_pending)  
2427                     wake_up(&ia_dev[i]->close_wait);
2428                  ia_tx_poll(ia_dev[i]);
2429                  spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2430               }
2431            }
2432         }
2433         mod_timer(&ia_timer, jiffies + HZ / 4);
2434         return;
2435 }
2436
2437 static void ia_phy_put(struct atm_dev *dev, unsigned char value,   
2438         unsigned long addr)  
2439 {  
2440         writel(value, INPH_IA_DEV(dev)->phy+addr);  
2441 }  
2442   
2443 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)  
2444 {  
2445         return readl(INPH_IA_DEV(dev)->phy+addr);  
2446 }  
2447
2448 static void ia_free_tx(IADEV *iadev)
2449 {
2450         int i;
2451
2452         kfree(iadev->desc_tbl);
2453         for (i = 0; i < iadev->num_vc; i++)
2454                 kfree(iadev->testTable[i]);
2455         kfree(iadev->testTable);
2456         for (i = 0; i < iadev->num_tx_desc; i++) {
2457                 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2458
2459                 pci_unmap_single(iadev->pci, desc->dma_addr,
2460                         sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2461                 kfree(desc->cpcs);
2462         }
2463         kfree(iadev->tx_buf);
2464         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2465                             iadev->tx_dle_dma);  
2466 }
2467
2468 static void ia_free_rx(IADEV *iadev)
2469 {
2470         kfree(iadev->rx_open);
2471         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2472                           iadev->rx_dle_dma);  
2473 }
2474
2475 static int __devinit ia_start(struct atm_dev *dev)
2476 {  
2477         IADEV *iadev;  
2478         int error;  
2479         unsigned char phy;  
2480         u32 ctrl_reg;  
2481         IF_EVENT(printk(">ia_start\n");)  
2482         iadev = INPH_IA_DEV(dev);  
2483         if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) {
2484                 printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",  
2485                     dev->number, iadev->irq);  
2486                 error = -EAGAIN;
2487                 goto err_out;
2488         }  
2489         /* @@@ should release IRQ on error */  
2490         /* enabling memory + master */  
2491         if ((error = pci_write_config_word(iadev->pci,   
2492                                 PCI_COMMAND,   
2493                                 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))   
2494         {  
2495                 printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"  
2496                     "master (0x%x)\n",dev->number, error);  
2497                 error = -EIO;  
2498                 goto err_free_irq;
2499         }  
2500         udelay(10);  
2501   
2502         /* Maybe we should reset the front end, initialize Bus Interface Control   
2503                 Registers and see. */  
2504   
2505         IF_INIT(printk("Bus ctrl reg: %08x\n", 
2506                             readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)  
2507         ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);  
2508         ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))  
2509                         | CTRL_B8  
2510                         | CTRL_B16  
2511                         | CTRL_B32  
2512                         | CTRL_B48  
2513                         | CTRL_B64  
2514                         | CTRL_B128  
2515                         | CTRL_ERRMASK  
2516                         | CTRL_DLETMASK         /* shud be removed l8r */  
2517                         | CTRL_DLERMASK  
2518                         | CTRL_SEGMASK  
2519                         | CTRL_REASSMASK          
2520                         | CTRL_FEMASK  
2521                         | CTRL_CSPREEMPT;  
2522   
2523        writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);   
2524   
2525         IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", 
2526                            readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));  
2527            printk("Bus status reg after init: %08x\n", 
2528                             readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)  
2529     
2530         ia_hw_type(iadev); 
2531         error = tx_init(dev);  
2532         if (error)
2533                 goto err_free_irq;
2534         error = rx_init(dev);  
2535         if (error)
2536                 goto err_free_tx;
2537   
2538         ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);  
2539         writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);   
2540         IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", 
2541                                readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)  
2542         phy = 0; /* resolve compiler complaint */
2543         IF_INIT ( 
2544         if ((phy=ia_phy_get(dev,0)) == 0x30)  
2545                 printk("IA: pm5346,rev.%d\n",phy&0x0f);  
2546         else  
2547                 printk("IA: utopia,rev.%0x\n",phy);) 
2548
2549         if (iadev->phy_type &  FE_25MBIT_PHY)
2550            ia_mb25_init(iadev);
2551         else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2552            ia_suni_pm7345_init(iadev);
2553         else {
2554                 error = suni_init(dev);
2555                 if (error)
2556                         goto err_free_rx;
2557                 if (dev->phy->start) {
2558                         error = dev->phy->start(dev);
2559                         if (error)
2560                                 goto err_free_rx;
2561                 }
2562                 /* Get iadev->carrier_detect status */
2563                 IaFrontEndIntr(iadev);
2564         }
2565         return 0;
2566
2567 err_free_rx:
2568         ia_free_rx(iadev);
2569 err_free_tx:
2570         ia_free_tx(iadev);
2571 err_free_irq:
2572         free_irq(iadev->irq, dev);  
2573 err_out:
2574         return error;
2575 }  
2576   
2577 static void ia_close(struct atm_vcc *vcc)  
2578 {
2579         DEFINE_WAIT(wait);
2580         u16 *vc_table;
2581         IADEV *iadev;
2582         struct ia_vcc *ia_vcc;
2583         struct sk_buff *skb = NULL;
2584         struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2585         unsigned long closetime, flags;
2586
2587         iadev = INPH_IA_DEV(vcc->dev);
2588         ia_vcc = INPH_IA_VCC(vcc);
2589         if (!ia_vcc) return;  
2590
2591         IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d  vci = %d\n", 
2592                                               ia_vcc->vc_desc_cnt,vcc->vci);)
2593         clear_bit(ATM_VF_READY,&vcc->flags);
2594         skb_queue_head_init (&tmp_tx_backlog);
2595         skb_queue_head_init (&tmp_vcc_backlog); 
2596         if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2597            iadev->close_pending++;
2598            prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
2599            schedule_timeout(50);
2600            finish_wait(&iadev->timeout_wait, &wait);
2601            spin_lock_irqsave(&iadev->tx_lock, flags); 
2602            while((skb = skb_dequeue(&iadev->tx_backlog))) {
2603               if (ATM_SKB(skb)->vcc == vcc){ 
2604                  if (vcc->pop) vcc->pop(vcc, skb);
2605                  else dev_kfree_skb_any(skb);
2606               }
2607               else 
2608                  skb_queue_tail(&tmp_tx_backlog, skb);
2609            } 
2610            while((skb = skb_dequeue(&tmp_tx_backlog))) 
2611              skb_queue_tail(&iadev->tx_backlog, skb);
2612            IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);) 
2613            closetime = 300000 / ia_vcc->pcr;
2614            if (closetime == 0)
2615               closetime = 1;
2616            spin_unlock_irqrestore(&iadev->tx_lock, flags);
2617            wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
2618            spin_lock_irqsave(&iadev->tx_lock, flags);
2619            iadev->close_pending--;
2620            iadev->testTable[vcc->vci]->lastTime = 0;
2621            iadev->testTable[vcc->vci]->fract = 0; 
2622            iadev->testTable[vcc->vci]->vc_status = VC_UBR; 
2623            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2624               if (vcc->qos.txtp.min_pcr > 0)
2625                  iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2626            }
2627            if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2628               ia_vcc = INPH_IA_VCC(vcc); 
2629               iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2630               ia_cbrVc_close (vcc);
2631            }
2632            spin_unlock_irqrestore(&iadev->tx_lock, flags);
2633         }
2634         
2635         if (vcc->qos.rxtp.traffic_class != ATM_NONE) {   
2636            // reset reass table
2637            vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2638            vc_table += vcc->vci; 
2639            *vc_table = NO_AAL5_PKT;
2640            // reset vc table
2641            vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2642            vc_table += vcc->vci;
2643            *vc_table = (vcc->vci << 6) | 15;
2644            if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2645               struct abr_vc_table __iomem *abr_vc_table = 
2646                                 (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2647               abr_vc_table +=  vcc->vci;
2648               abr_vc_table->rdf = 0x0003;
2649               abr_vc_table->air = 0x5eb1;
2650            }                                 
2651            // Drain the packets
2652            rx_dle_intr(vcc->dev); 
2653            iadev->rx_open[vcc->vci] = NULL;
2654         }
2655         kfree(INPH_IA_VCC(vcc));  
2656         ia_vcc = NULL;
2657         vcc->dev_data = NULL;
2658         clear_bit(ATM_VF_ADDR,&vcc->flags);
2659         return;        
2660 }  
2661   
2662 static int ia_open(struct atm_vcc *vcc)
2663 {  
2664         IADEV *iadev;  
2665         struct ia_vcc *ia_vcc;  
2666         int error;  
2667         if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))  
2668         {  
2669                 IF_EVENT(printk("ia: not partially allocated resources\n");)  
2670                 vcc->dev_data = NULL;
2671         }  
2672         iadev = INPH_IA_DEV(vcc->dev);  
2673         if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)  
2674         {  
2675                 IF_EVENT(printk("iphase open: unspec part\n");)  
2676                 set_bit(ATM_VF_ADDR,&vcc->flags);
2677         }  
2678         if (vcc->qos.aal != ATM_AAL5)  
2679                 return -EINVAL;  
2680         IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n", 
2681                                  vcc->dev->number, vcc->vpi, vcc->vci);)  
2682   
2683         /* Device dependent initialization */  
2684         ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);  
2685         if (!ia_vcc) return -ENOMEM;  
2686         vcc->dev_data = ia_vcc;
2687   
2688         if ((error = open_rx(vcc)))  
2689         {  
2690                 IF_EVENT(printk("iadev: error in open_rx, closing\n");)  
2691                 ia_close(vcc);  
2692                 return error;  
2693         }  
2694   
2695         if ((error = open_tx(vcc)))  
2696         {  
2697                 IF_EVENT(printk("iadev: error in open_tx, closing\n");)  
2698                 ia_close(vcc);  
2699                 return error;  
2700         }  
2701   
2702         set_bit(ATM_VF_READY,&vcc->flags);
2703
2704 #if 0
2705         {
2706            static u8 first = 1; 
2707            if (first) {
2708               ia_timer.expires = jiffies + 3*HZ;
2709               add_timer(&ia_timer);
2710               first = 0;
2711            }           
2712         }
2713 #endif
2714         IF_EVENT(printk("ia open returning\n");)  
2715         return 0;  
2716 }  
2717   
2718 static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)  
2719 {  
2720         IF_EVENT(printk(">ia_change_qos\n");)  
2721         return 0;  
2722 }  
2723   
2724 static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)  
2725 {  
2726    IA_CMDBUF ia_cmds;
2727    IADEV *iadev;
2728    int i, board;
2729    u16 __user *tmps;
2730    IF_EVENT(printk(">ia_ioctl\n");)  
2731    if (cmd != IA_CMD) {
2732       if (!dev->phy->ioctl) return -EINVAL;
2733       return dev->phy->ioctl(dev,cmd,arg);
2734    }
2735    if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT; 
2736    board = ia_cmds.status;
2737    if ((board < 0) || (board > iadev_count))
2738          board = 0;    
2739    iadev = ia_dev[board];
2740    switch (ia_cmds.cmd) {
2741    case MEMDUMP:
2742    {
2743         switch (ia_cmds.sub_cmd) {
2744           case MEMDUMP_DEV:     
2745              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2746              if (copy_to_user(ia_cmds.buf, iadev, sizeof(IADEV)))
2747                 return -EFAULT;
2748              ia_cmds.status = 0;
2749              break;
2750           case MEMDUMP_SEGREG:
2751              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2752              tmps = (u16 __user *)ia_cmds.buf;
2753              for(i=0; i<0x80; i+=2, tmps++)
2754                 if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2755              ia_cmds.status = 0;
2756              ia_cmds.len = 0x80;
2757              break;
2758           case MEMDUMP_REASSREG:
2759              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2760              tmps = (u16 __user *)ia_cmds.buf;
2761              for(i=0; i<0x80; i+=2, tmps++)
2762                 if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2763              ia_cmds.status = 0;
2764              ia_cmds.len = 0x80;
2765              break;
2766           case MEMDUMP_FFL:
2767           {  
2768              ia_regs_t       *regs_local;
2769              ffredn_t        *ffL;
2770              rfredn_t        *rfL;
2771                      
2772              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2773              regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
2774              if (!regs_local) return -ENOMEM;
2775              ffL = &regs_local->ffredn;
2776              rfL = &regs_local->rfredn;
2777              /* Copy real rfred registers into the local copy */
2778              for (i=0; i<(sizeof (rfredn_t))/4; i++)
2779                 ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2780                 /* Copy real ffred registers into the local copy */
2781              for (i=0; i<(sizeof (ffredn_t))/4; i++)
2782                 ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2783
2784              if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2785                 kfree(regs_local);
2786                 return -EFAULT;
2787              }
2788              kfree(regs_local);
2789              printk("Board %d registers dumped\n", board);
2790              ia_cmds.status = 0;                  
2791          }      
2792              break;        
2793          case READ_REG:
2794          {  
2795              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2796              desc_dbg(iadev); 
2797              ia_cmds.status = 0; 
2798          }
2799              break;
2800          case 0x6:
2801          {  
2802              ia_cmds.status = 0; 
2803              printk("skb = 0x%lx\n", (long)skb_peek(&iadev->tx_backlog));
2804              printk("rtn_q: 0x%lx\n",(long)ia_deque_rtn_q(&iadev->tx_return_q));
2805          }
2806              break;
2807          case 0x8:
2808          {
2809              struct k_sonet_stats *stats;
2810              stats = &PRIV(_ia_dev[board])->sonet_stats;
2811              printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2812              printk("line_bip   : %d\n", atomic_read(&stats->line_bip));
2813              printk("path_bip   : %d\n", atomic_read(&stats->path_bip));
2814              printk("line_febe  : %d\n", atomic_read(&stats->line_febe));
2815              printk("path_febe  : %d\n", atomic_read(&stats->path_febe));
2816              printk("corr_hcs   : %d\n", atomic_read(&stats->corr_hcs));
2817              printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2818              printk("tx_cells   : %d\n", atomic_read(&stats->tx_cells));
2819              printk("rx_cells   : %d\n", atomic_read(&stats->rx_cells));
2820          }
2821             ia_cmds.status = 0;
2822             break;
2823          case 0x9:
2824             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2825             for (i = 1; i <= iadev->num_rx_desc; i++)
2826                free_desc(_ia_dev[board], i);
2827             writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD), 
2828                                             iadev->reass_reg+REASS_MASK_REG);
2829             iadev->rxing = 1;
2830             
2831             ia_cmds.status = 0;
2832             break;
2833
2834          case 0xb:
2835             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2836             IaFrontEndIntr(iadev);
2837             break;
2838          case 0xa:
2839             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2840          {  
2841              ia_cmds.status = 0; 
2842              IADebugFlag = ia_cmds.maddr;
2843              printk("New debug option loaded\n");
2844          }
2845              break;
2846          default:
2847              ia_cmds.status = 0;
2848              break;
2849       } 
2850    }
2851       break;
2852    default:
2853       break;
2854
2855    }    
2856    return 0;  
2857 }  
2858   
2859 static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname,   
2860         void __user *optval, int optlen)  
2861 {  
2862         IF_EVENT(printk(">ia_getsockopt\n");)  
2863         return -EINVAL;  
2864 }  
2865   
2866 static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname,   
2867         void __user *optval, int optlen)  
2868 {  
2869         IF_EVENT(printk(">ia_setsockopt\n");)  
2870         return -EINVAL;  
2871 }  
2872   
2873 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2874         IADEV *iadev;
2875         struct dle *wr_ptr;
2876         struct tx_buf_desc __iomem *buf_desc_ptr;
2877         int desc;
2878         int comp_code;
2879         int total_len;
2880         struct cpcs_trailer *trailer;
2881         struct ia_vcc *iavcc;
2882
2883         iadev = INPH_IA_DEV(vcc->dev);  
2884         iavcc = INPH_IA_VCC(vcc);
2885         if (!iavcc->txing) {
2886            printk("discard packet on closed VC\n");
2887            if (vcc->pop)
2888                 vcc->pop(vcc, skb);
2889            else
2890                 dev_kfree_skb_any(skb);
2891            return 0;
2892         }
2893
2894         if (skb->len > iadev->tx_buf_sz - 8) {
2895            printk("Transmit size over tx buffer size\n");
2896            if (vcc->pop)
2897                  vcc->pop(vcc, skb);
2898            else
2899                  dev_kfree_skb_any(skb);
2900           return 0;
2901         }
2902         if ((unsigned long)skb->data & 3) {
2903            printk("Misaligned SKB\n");
2904            if (vcc->pop)
2905                  vcc->pop(vcc, skb);
2906            else
2907                  dev_kfree_skb_any(skb);
2908            return 0;
2909         }       
2910         /* Get a descriptor number from our free descriptor queue  
2911            We get the descr number from the TCQ now, since I am using  
2912            the TCQ as a free buffer queue. Initially TCQ will be   
2913            initialized with all the descriptors and is hence, full.  
2914         */
2915         desc = get_desc (iadev, iavcc);
2916         if (desc == 0xffff) 
2917             return 1;
2918         comp_code = desc >> 13;  
2919         desc &= 0x1fff;  
2920   
2921         if ((desc == 0) || (desc > iadev->num_tx_desc))  
2922         {  
2923                 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);) 
2924                 atomic_inc(&vcc->stats->tx);
2925                 if (vcc->pop)   
2926                     vcc->pop(vcc, skb);   
2927                 else  
2928                     dev_kfree_skb_any(skb);
2929                 return 0;   /* return SUCCESS */
2930         }  
2931   
2932         if (comp_code)  
2933         {  
2934             IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n", 
2935                                                             desc, comp_code);)  
2936         }  
2937        
2938         /* remember the desc and vcc mapping */
2939         iavcc->vc_desc_cnt++;
2940         iadev->desc_tbl[desc-1].iavcc = iavcc;
2941         iadev->desc_tbl[desc-1].txskb = skb;
2942         IA_SKB_STATE(skb) = 0;
2943
2944         iadev->ffL.tcq_rd += 2;
2945         if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2946                 iadev->ffL.tcq_rd  = iadev->ffL.tcq_st;
2947         writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2948   
2949         /* Put the descriptor number in the packet ready queue  
2950                 and put the updated write pointer in the DLE field   
2951         */   
2952         *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc; 
2953
2954         iadev->ffL.prq_wr += 2;
2955         if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2956                 iadev->ffL.prq_wr = iadev->ffL.prq_st;
2957           
2958         /* Figure out the exact length of the packet and padding required to 
2959            make it  aligned on a 48 byte boundary.  */
2960         total_len = skb->len + sizeof(struct cpcs_trailer);  
2961         total_len = ((total_len + 47) / 48) * 48;
2962         IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)  
2963  
2964         /* Put the packet in a tx buffer */   
2965         trailer = iadev->tx_buf[desc-1].cpcs;
2966         IF_TX(printk("Sent: skb = 0x%x skb->data: 0x%x len: %d, desc: %d\n",
2967                   (u32)skb, (u32)skb->data, skb->len, desc);)
2968         trailer->control = 0; 
2969         /*big endian*/ 
2970         trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2971         trailer->crc32 = 0;     /* not needed - dummy bytes */  
2972
2973         /* Display the packet */  
2974         IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n", 
2975                                                         skb->len, tcnter++);  
2976         xdump(skb->data, skb->len, "TX: ");
2977         printk("\n");)
2978
2979         /* Build the buffer descriptor */  
2980         buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
2981         buf_desc_ptr += desc;   /* points to the corresponding entry */  
2982         buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;   
2983         /* Huh ? p.115 of users guide describes this as a read-only register */
2984         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2985         buf_desc_ptr->vc_index = vcc->vci;
2986         buf_desc_ptr->bytes = total_len;  
2987
2988         if (vcc->qos.txtp.traffic_class == ATM_ABR)  
2989            clear_lockup (vcc, iadev);
2990
2991         /* Build the DLE structure */  
2992         wr_ptr = iadev->tx_dle_q.write;  
2993         memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));  
2994         wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
2995                 skb->len, PCI_DMA_TODEVICE);
2996         wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) | 
2997                                                   buf_desc_ptr->buf_start_lo;  
2998         /* wr_ptr->bytes = swap(total_len);     didn't seem to affect ?? */  
2999         wr_ptr->bytes = skb->len;  
3000
3001         /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3002         if ((wr_ptr->bytes >> 2) == 0xb)
3003            wr_ptr->bytes = 0x30;
3004
3005         wr_ptr->mode = TX_DLE_PSI; 
3006         wr_ptr->prq_wr_ptr_data = 0;
3007   
3008         /* end is not to be used for the DLE q */  
3009         if (++wr_ptr == iadev->tx_dle_q.end)  
3010                 wr_ptr = iadev->tx_dle_q.start;  
3011         
3012         /* Build trailer dle */
3013         wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3014         wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) | 
3015           buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3016
3017         wr_ptr->bytes = sizeof(struct cpcs_trailer);
3018         wr_ptr->mode = DMA_INT_ENABLE; 
3019         wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3020         
3021         /* end is not to be used for the DLE q */
3022         if (++wr_ptr == iadev->tx_dle_q.end)  
3023                 wr_ptr = iadev->tx_dle_q.start;
3024
3025         iadev->tx_dle_q.write = wr_ptr;  
3026         ATM_DESC(skb) = vcc->vci;
3027         skb_queue_tail(&iadev->tx_dma_q, skb);
3028
3029         atomic_inc(&vcc->stats->tx);
3030         iadev->tx_pkt_cnt++;
3031         /* Increment transaction counter */  
3032         writel(2, iadev->dma+IPHASE5575_TX_COUNTER);  
3033         
3034 #if 0        
3035         /* add flow control logic */ 
3036         if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3037           if (iavcc->vc_desc_cnt > 10) {
3038              vcc->tx_quota =  vcc->tx_quota * 3 / 4;
3039             printk("Tx1:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3040               iavcc->flow_inc = -1;
3041               iavcc->saved_tx_quota = vcc->tx_quota;
3042            } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3043              // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3044              printk("Tx2:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota ); 
3045               iavcc->flow_inc = 0;
3046            }
3047         }
3048 #endif
3049         IF_TX(printk("ia send done\n");)  
3050         return 0;  
3051 }  
3052
3053 static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3054 {
3055         IADEV *iadev; 
3056         struct ia_vcc *iavcc;
3057         unsigned long flags;
3058
3059         iadev = INPH_IA_DEV(vcc->dev);
3060         iavcc = INPH_IA_VCC(vcc); 
3061         if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3062         {
3063             if (!skb)
3064                 printk(KERN_CRIT "null skb in ia_send\n");
3065             else dev_kfree_skb_any(skb);
3066             return -EINVAL;
3067         }                         
3068         spin_lock_irqsave(&iadev->tx_lock, flags); 
3069         if (!test_bit(ATM_VF_READY,&vcc->flags)){ 
3070             dev_kfree_skb_any(skb);
3071             spin_unlock_irqrestore(&iadev->tx_lock, flags);
3072             return -EINVAL; 
3073         }
3074         ATM_SKB(skb)->vcc = vcc;
3075  
3076         if (skb_peek(&iadev->tx_backlog)) {
3077            skb_queue_tail(&iadev->tx_backlog, skb);
3078         }
3079         else {
3080            if (ia_pkt_tx (vcc, skb)) {
3081               skb_queue_tail(&iadev->tx_backlog, skb);
3082            }
3083         }
3084         spin_unlock_irqrestore(&iadev->tx_lock, flags);
3085         return 0;
3086
3087 }
3088
3089 static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3090
3091   int   left = *pos, n;   
3092   char  *tmpPtr;
3093   IADEV *iadev = INPH_IA_DEV(dev);
3094   if(!left--) {
3095      if (iadev->phy_type == FE_25MBIT_PHY) {
3096        n = sprintf(page, "  Board Type         :  Iphase5525-1KVC-128K\n");
3097        return n;
3098      }
3099      if (iadev->phy_type == FE_DS3_PHY)
3100         n = sprintf(page, "  Board Type         :  Iphase-ATM-DS3");
3101      else if (iadev->phy_type == FE_E3_PHY)
3102         n = sprintf(page, "  Board Type         :  Iphase-ATM-E3");
3103      else if (iadev->phy_type == FE_UTP_OPTION)
3104          n = sprintf(page, "  Board Type         :  Iphase-ATM-UTP155"); 
3105      else
3106         n = sprintf(page, "  Board Type         :  Iphase-ATM-OC3");
3107      tmpPtr = page + n;
3108      if (iadev->pci_map_size == 0x40000)
3109         n += sprintf(tmpPtr, "-1KVC-");
3110      else
3111         n += sprintf(tmpPtr, "-4KVC-");  
3112      tmpPtr = page + n; 
3113      if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3114         n += sprintf(tmpPtr, "1M  \n");
3115      else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3116         n += sprintf(tmpPtr, "512K\n");
3117      else
3118        n += sprintf(tmpPtr, "128K\n");
3119      return n;
3120   }
3121   if (!left) {
3122      return  sprintf(page, "  Number of Tx Buffer:  %u\n"
3123                            "  Size of Tx Buffer  :  %u\n"
3124                            "  Number of Rx Buffer:  %u\n"
3125                            "  Size of Rx Buffer  :  %u\n"
3126                            "  Packets Receiverd  :  %u\n"
3127                            "  Packets Transmitted:  %u\n"
3128                            "  Cells Received     :  %u\n"
3129                            "  Cells Transmitted  :  %u\n"
3130                            "  Board Dropped Cells:  %u\n"
3131                            "  Board Dropped Pkts :  %u\n",
3132                            iadev->num_tx_desc,  iadev->tx_buf_sz,
3133                            iadev->num_rx_desc,  iadev->rx_buf_sz,
3134                            iadev->rx_pkt_cnt,   iadev->tx_pkt_cnt,
3135                            iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3136                            iadev->drop_rxcell, iadev->drop_rxpkt);                        
3137   }
3138   return 0;
3139 }
3140   
3141 static const struct atmdev_ops ops = {  
3142         .open           = ia_open,  
3143         .close          = ia_close,  
3144         .ioctl          = ia_ioctl,  
3145         .getsockopt     = ia_getsockopt,  
3146         .setsockopt     = ia_setsockopt,  
3147         .send           = ia_send,  
3148         .phy_put        = ia_phy_put,  
3149         .phy_get        = ia_phy_get,  
3150         .change_qos     = ia_change_qos,  
3151         .proc_read      = ia_proc_read,
3152         .owner          = THIS_MODULE,
3153 };  
3154           
3155 static int __devinit ia_init_one(struct pci_dev *pdev,
3156                                  const struct pci_device_id *ent)
3157 {  
3158         struct atm_dev *dev;  
3159         IADEV *iadev;  
3160         unsigned long flags;
3161         int ret;
3162
3163         iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
3164         if (!iadev) {
3165                 ret = -ENOMEM;
3166                 goto err_out;
3167         }
3168
3169         iadev->pci = pdev;
3170
3171         IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3172                 pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3173         if (pci_enable_device(pdev)) {
3174                 ret = -ENODEV;
3175                 goto err_out_free_iadev;
3176         }
3177         dev = atm_dev_register(DEV_LABEL, &ops, -1, NULL);
3178         if (!dev) {
3179                 ret = -ENOMEM;
3180                 goto err_out_disable_dev;
3181         }
3182         dev->dev_data = iadev;
3183         IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3184         IF_INIT(printk("dev_id = 0x%x iadev->LineRate = %d \n", (u32)dev,
3185                 iadev->LineRate);)
3186
3187         pci_set_drvdata(pdev, dev);
3188
3189         ia_dev[iadev_count] = iadev;
3190         _ia_dev[iadev_count] = dev;
3191         iadev_count++;
3192         spin_lock_init(&iadev->misc_lock);
3193         /* First fixes first. I don't want to think about this now. */
3194         spin_lock_irqsave(&iadev->misc_lock, flags); 
3195         if (ia_init(dev) || ia_start(dev)) {  
3196                 IF_INIT(printk("IA register failed!\n");)
3197                 iadev_count--;
3198                 ia_dev[iadev_count] = NULL;
3199                 _ia_dev[iadev_count] = NULL;
3200                 spin_unlock_irqrestore(&iadev->misc_lock, flags); 
3201                 ret = -EINVAL;
3202                 goto err_out_deregister_dev;
3203         }
3204         spin_unlock_irqrestore(&iadev->misc_lock, flags); 
3205         IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3206
3207         iadev->next_board = ia_boards;  
3208         ia_boards = dev;  
3209
3210         return 0;
3211
3212 err_out_deregister_dev:
3213         atm_dev_deregister(dev);  
3214 err_out_disable_dev:
3215         pci_disable_device(pdev);
3216 err_out_free_iadev:
3217         kfree(iadev);
3218 err_out:
3219         return ret;
3220 }
3221
3222 static void __devexit ia_remove_one(struct pci_dev *pdev)
3223 {
3224         struct atm_dev *dev = pci_get_drvdata(pdev);
3225         IADEV *iadev = INPH_IA_DEV(dev);
3226
3227         /* Disable phy interrupts */
3228         ia_phy_put(dev, ia_phy_get(dev, SUNI_RSOP_CIE) & ~(SUNI_RSOP_CIE_LOSE),
3229                                    SUNI_RSOP_CIE);
3230         udelay(1);
3231
3232         if (dev->phy && dev->phy->stop)
3233                 dev->phy->stop(dev);
3234
3235         /* De-register device */  
3236         free_irq(iadev->irq, dev);
3237         iadev_count--;
3238         ia_dev[iadev_count] = NULL;
3239         _ia_dev[iadev_count] = NULL;
3240         IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);)
3241         atm_dev_deregister(dev);
3242
3243         iounmap(iadev->base);  
3244         pci_disable_device(pdev);
3245
3246         ia_free_rx(iadev);
3247         ia_free_tx(iadev);
3248
3249         kfree(iadev);
3250 }
3251
3252 static struct pci_device_id ia_pci_tbl[] = {
3253         { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3254         { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3255         { 0,}
3256 };
3257 MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3258
3259 static struct pci_driver ia_driver = {
3260         .name =         DEV_LABEL,
3261         .id_table =     ia_pci_tbl,
3262         .probe =        ia_init_one,
3263         .remove =       __devexit_p(ia_remove_one),
3264 };
3265
3266 static int __init ia_module_init(void)
3267 {
3268         int ret;
3269
3270         ret = pci_register_driver(&ia_driver);
3271         if (ret >= 0) {
3272                 ia_timer.expires = jiffies + 3*HZ;
3273                 add_timer(&ia_timer); 
3274         } else
3275                 printk(KERN_ERR DEV_LABEL ": no adapter found\n");  
3276         return ret;
3277 }
3278
3279 static void __exit ia_module_exit(void)
3280 {
3281         pci_unregister_driver(&ia_driver);
3282
3283         del_timer(&ia_timer);
3284 }
3285
3286 module_init(ia_module_init);
3287 module_exit(ia_module_exit);