xref: /kernel/linux/linux-5.10/drivers/atm/iphase.c (revision 8c2ecf20)
1/******************************************************************************
2         iphase.c: Device driver for Interphase ATM PCI adapter cards
3                    Author: Peter Wang  <pwang@iphase.com>
4		   Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5                   Interphase Corporation  <www.iphase.com>
6                               Version: 1.0
7*******************************************************************************
8
9      This software may be used and distributed according to the terms
10      of the GNU General Public License (GPL), incorporated herein by reference.
11      Drivers based on this skeleton fall under the GPL and must retain
12      the authorship (implicit copyright) notice.
13
14      This program is distributed in the hope that it will be useful, but
15      WITHOUT ANY WARRANTY; without even the implied warranty of
16      MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17      General Public License for more details.
18
19      Modified from an incomplete driver for Interphase 5575 1KVC 1M card which
20      was originally written by Monalisa Agrawal at UNH. Now this driver
21      supports a variety of varients of Interphase ATM PCI (i)Chip adapter
22      card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM)
23      in terms of PHY type, the size of control memory and the size of
24      packet memory. The following are the change log and history:
25
26          Bugfix the Mona's UBR driver.
27          Modify the basic memory allocation and dma logic.
28          Port the driver to the latest kernel from 2.0.46.
29          Complete the ABR logic of the driver, and added the ABR work-
30              around for the hardware anormalies.
31          Add the CBR support.
32	  Add the flow control logic to the driver to allow rate-limit VC.
33          Add 4K VC support to the board with 512K control memory.
34          Add the support of all the variants of the Interphase ATM PCI
35          (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
36          (25M UTP25) and x531 (DS3 and E3).
37          Add SMP support.
38
39      Support and updates available at: ftp://ftp.iphase.com/pub/atm
40
41*******************************************************************************/
42
43#include <linux/module.h>
44#include <linux/kernel.h>
45#include <linux/mm.h>
46#include <linux/pci.h>
47#include <linux/errno.h>
48#include <linux/atm.h>
49#include <linux/atmdev.h>
50#include <linux/sonet.h>
51#include <linux/skbuff.h>
52#include <linux/time.h>
53#include <linux/delay.h>
54#include <linux/uio.h>
55#include <linux/init.h>
56#include <linux/interrupt.h>
57#include <linux/wait.h>
58#include <linux/slab.h>
59#include <asm/io.h>
60#include <linux/atomic.h>
61#include <linux/uaccess.h>
62#include <asm/string.h>
63#include <asm/byteorder.h>
64#include <linux/vmalloc.h>
65#include <linux/jiffies.h>
66#include <linux/nospec.h>
67#include "iphase.h"
68#include "suni.h"
69#define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
70
71#define PRIV(dev) ((struct suni_priv *) dev->phy_data)
72
73static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
74static void desc_dbg(IADEV *iadev);
75
76static IADEV *ia_dev[8];
77static struct atm_dev *_ia_dev[8];
78static int iadev_count;
79static void ia_led_timer(struct timer_list *unused);
80static DEFINE_TIMER(ia_timer, ia_led_timer);
81static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
82static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
83static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
84            |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0;
85
86module_param(IA_TX_BUF, int, 0);
87module_param(IA_TX_BUF_SZ, int, 0);
88module_param(IA_RX_BUF, int, 0);
89module_param(IA_RX_BUF_SZ, int, 0);
90module_param(IADebugFlag, uint, 0644);
91
92MODULE_LICENSE("GPL");
93
94/**************************** IA_LIB **********************************/
95
96static void ia_init_rtn_q (IARTN_Q *que)
97{
98   que->next = NULL;
99   que->tail = NULL;
100}
101
102static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data)
103{
104   data->next = NULL;
105   if (que->next == NULL)
106      que->next = que->tail = data;
107   else {
108      data->next = que->next;
109      que->next = data;
110   }
111   return;
112}
113
114static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
115   IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
116   if (!entry)
117      return -ENOMEM;
118   entry->data = data;
119   entry->next = NULL;
120   if (que->next == NULL)
121      que->next = que->tail = entry;
122   else {
123      que->tail->next = entry;
124      que->tail = que->tail->next;
125   }
126   return 1;
127}
128
129static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
130   IARTN_Q *tmpdata;
131   if (que->next == NULL)
132      return NULL;
133   tmpdata = que->next;
134   if ( que->next == que->tail)
135      que->next = que->tail = NULL;
136   else
137      que->next = que->next->next;
138   return tmpdata;
139}
140
141static void ia_hack_tcq(IADEV *dev) {
142
143  u_short 		desc1;
144  u_short		tcq_wr;
145  struct ia_vcc         *iavcc_r = NULL;
146
147  tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
148  while (dev->host_tcq_wr != tcq_wr) {
149     desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
150     if (!desc1) ;
151     else if (!dev->desc_tbl[desc1 -1].timestamp) {
152        IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
153        *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
154     }
155     else if (dev->desc_tbl[desc1 -1].timestamp) {
156        if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) {
157           printk("IA: Fatal err in get_desc\n");
158           continue;
159        }
160        iavcc_r->vc_desc_cnt--;
161        dev->desc_tbl[desc1 -1].timestamp = 0;
162        IF_EVENT(printk("ia_hack: return_q skb = 0x%p desc = %d\n",
163                                   dev->desc_tbl[desc1 -1].txskb, desc1);)
164        if (iavcc_r->pcr < dev->rate_limit) {
165           IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
166           if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
167              printk("ia_hack_tcq: No memory available\n");
168        }
169        dev->desc_tbl[desc1 -1].iavcc = NULL;
170        dev->desc_tbl[desc1 -1].txskb = NULL;
171     }
172     dev->host_tcq_wr += 2;
173     if (dev->host_tcq_wr > dev->ffL.tcq_ed)
174        dev->host_tcq_wr = dev->ffL.tcq_st;
175  }
176} /* ia_hack_tcq */
177
178static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
179  u_short 		desc_num, i;
180  struct sk_buff        *skb;
181  struct ia_vcc         *iavcc_r = NULL;
182  unsigned long delta;
183  static unsigned long timer = 0;
184  int ltimeout;
185
186  ia_hack_tcq (dev);
187  if((time_after(jiffies,timer+50)) || ((dev->ffL.tcq_rd==dev->host_tcq_wr))) {
188     timer = jiffies;
189     i=0;
190     while (i < dev->num_tx_desc) {
191        if (!dev->desc_tbl[i].timestamp) {
192           i++;
193           continue;
194        }
195        ltimeout = dev->desc_tbl[i].iavcc->ltimeout;
196        delta = jiffies - dev->desc_tbl[i].timestamp;
197        if (delta >= ltimeout) {
198           IF_ABR(printk("RECOVER run!! desc_tbl %d = %d  delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
199           if (dev->ffL.tcq_rd == dev->ffL.tcq_st)
200              dev->ffL.tcq_rd =  dev->ffL.tcq_ed;
201           else
202              dev->ffL.tcq_rd -= 2;
203           *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
204           if (!(skb = dev->desc_tbl[i].txskb) ||
205                          !(iavcc_r = dev->desc_tbl[i].iavcc))
206              printk("Fatal err, desc table vcc or skb is NULL\n");
207           else
208              iavcc_r->vc_desc_cnt--;
209           dev->desc_tbl[i].timestamp = 0;
210           dev->desc_tbl[i].iavcc = NULL;
211           dev->desc_tbl[i].txskb = NULL;
212        }
213        i++;
214     } /* while */
215  }
216  if (dev->ffL.tcq_rd == dev->host_tcq_wr)
217     return 0xFFFF;
218
219  /* Get the next available descriptor number from TCQ */
220  desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
221
222  while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
223     dev->ffL.tcq_rd += 2;
224     if (dev->ffL.tcq_rd > dev->ffL.tcq_ed)
225	dev->ffL.tcq_rd = dev->ffL.tcq_st;
226     if (dev->ffL.tcq_rd == dev->host_tcq_wr)
227        return 0xFFFF;
228     desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
229  }
230
231  /* get system time */
232  dev->desc_tbl[desc_num -1].timestamp = jiffies;
233  return desc_num;
234}
235
236static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
237  u_char          	foundLockUp;
238  vcstatus_t		*vcstatus;
239  u_short               *shd_tbl;
240  u_short               tempCellSlot, tempFract;
241  struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
242  struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
243  u_int  i;
244
245  if (vcc->qos.txtp.traffic_class == ATM_ABR) {
246     vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
247     vcstatus->cnt++;
248     foundLockUp = 0;
249     if( vcstatus->cnt == 0x05 ) {
250        abr_vc += vcc->vci;
251	eabr_vc += vcc->vci;
252	if( eabr_vc->last_desc ) {
253	   if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
254              /* Wait for 10 Micro sec */
255              udelay(10);
256	      if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
257		 foundLockUp = 1;
258           }
259	   else {
260	      tempCellSlot = abr_vc->last_cell_slot;
261              tempFract    = abr_vc->fraction;
262              if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
263                         && (tempFract == dev->testTable[vcc->vci]->fract))
264	         foundLockUp = 1;
265              dev->testTable[vcc->vci]->lastTime = tempCellSlot;
266              dev->testTable[vcc->vci]->fract = tempFract;
267	   }
268        } /* last descriptor */
269        vcstatus->cnt = 0;
270     } /* vcstatus->cnt */
271
272     if (foundLockUp) {
273        IF_ABR(printk("LOCK UP found\n");)
274	writew(0xFFFD, dev->seg_reg+MODE_REG_0);
275        /* Wait for 10 Micro sec */
276        udelay(10);
277        abr_vc->status &= 0xFFF8;
278        abr_vc->status |= 0x0001;  /* state is idle */
279	shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;
280	for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
281	if (i < dev->num_vc)
282           shd_tbl[i] = vcc->vci;
283        else
284           IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
285        writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
286        writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
287        writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);
288	vcstatus->cnt = 0;
289     } /* foundLockUp */
290
291  } /* if an ABR VC */
292
293
294}
295
296/*
297** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
298**
299**  +----+----+------------------+-------------------------------+
300**  |  R | NZ |  5-bit exponent  |        9-bit mantissa         |
301**  +----+----+------------------+-------------------------------+
302**
303**    R = reserved (written as 0)
304**    NZ = 0 if 0 cells/sec; 1 otherwise
305**
306**    if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
307*/
308static u16
309cellrate_to_float(u32 cr)
310{
311
312#define	NZ 		0x4000
313#define	M_BITS		9		/* Number of bits in mantissa */
314#define	E_BITS		5		/* Number of bits in exponent */
315#define	M_MASK		0x1ff
316#define	E_MASK		0x1f
317  u16   flot;
318  u32	tmp = cr & 0x00ffffff;
319  int 	i   = 0;
320  if (cr == 0)
321     return 0;
322  while (tmp != 1) {
323     tmp >>= 1;
324     i++;
325  }
326  if (i == M_BITS)
327     flot = NZ | (i << M_BITS) | (cr & M_MASK);
328  else if (i < M_BITS)
329     flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
330  else
331     flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
332  return flot;
333}
334
335#if 0
336/*
337** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
338*/
339static u32
340float_to_cellrate(u16 rate)
341{
342  u32   exp, mantissa, cps;
343  if ((rate & NZ) == 0)
344     return 0;
345  exp = (rate >> M_BITS) & E_MASK;
346  mantissa = rate & M_MASK;
347  if (exp == 0)
348     return 1;
349  cps = (1 << M_BITS) | mantissa;
350  if (exp == M_BITS)
351     cps = cps;
352  else if (exp > M_BITS)
353     cps <<= (exp - M_BITS);
354  else
355     cps >>= (M_BITS - exp);
356  return cps;
357}
358#endif
359
360static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
361  srv_p->class_type = ATM_ABR;
362  srv_p->pcr        = dev->LineRate;
363  srv_p->mcr        = 0;
364  srv_p->icr        = 0x055cb7;
365  srv_p->tbe        = 0xffffff;
366  srv_p->frtt       = 0x3a;
367  srv_p->rif        = 0xf;
368  srv_p->rdf        = 0xb;
369  srv_p->nrm        = 0x4;
370  srv_p->trm        = 0x7;
371  srv_p->cdf        = 0x3;
372  srv_p->adtf       = 50;
373}
374
375static int
376ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p,
377                                                struct atm_vcc *vcc, u8 flag)
378{
379  f_vc_abr_entry  *f_abr_vc;
380  r_vc_abr_entry  *r_abr_vc;
381  u32		icr;
382  u8		trm, nrm, crm;
383  u16		adtf, air, *ptr16;
384  f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
385  f_abr_vc += vcc->vci;
386  switch (flag) {
387     case 1: /* FFRED initialization */
388#if 0  /* sanity check */
389       if (srv_p->pcr == 0)
390          return INVALID_PCR;
391       if (srv_p->pcr > dev->LineRate)
392          srv_p->pcr = dev->LineRate;
393       if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
394	  return MCR_UNAVAILABLE;
395       if (srv_p->mcr > srv_p->pcr)
396	  return INVALID_MCR;
397       if (!(srv_p->icr))
398	  srv_p->icr = srv_p->pcr;
399       if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
400	  return INVALID_ICR;
401       if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
402	  return INVALID_TBE;
403       if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
404	  return INVALID_FRTT;
405       if (srv_p->nrm > MAX_NRM)
406	  return INVALID_NRM;
407       if (srv_p->trm > MAX_TRM)
408	  return INVALID_TRM;
409       if (srv_p->adtf > MAX_ADTF)
410          return INVALID_ADTF;
411       else if (srv_p->adtf == 0)
412	  srv_p->adtf = 1;
413       if (srv_p->cdf > MAX_CDF)
414	  return INVALID_CDF;
415       if (srv_p->rif > MAX_RIF)
416	  return INVALID_RIF;
417       if (srv_p->rdf > MAX_RDF)
418	  return INVALID_RDF;
419#endif
420       memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
421       f_abr_vc->f_vc_type = ABR;
422       nrm = 2 << srv_p->nrm;     /* (2 ** (srv_p->nrm +1)) */
423			          /* i.e 2**n = 2 << (n-1) */
424       f_abr_vc->f_nrm = nrm << 8 | nrm;
425       trm = 100000/(2 << (16 - srv_p->trm));
426       if ( trm == 0) trm = 1;
427       f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
428       crm = srv_p->tbe / nrm;
429       if (crm == 0) crm = 1;
430       f_abr_vc->f_crm = crm & 0xff;
431       f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
432       icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
433				((srv_p->tbe/srv_p->frtt)*1000000) :
434				(1000000/(srv_p->frtt/srv_p->tbe)));
435       f_abr_vc->f_icr = cellrate_to_float(icr);
436       adtf = (10000 * srv_p->adtf)/8192;
437       if (adtf == 0) adtf = 1;
438       f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
439       f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
440       f_abr_vc->f_acr = f_abr_vc->f_icr;
441       f_abr_vc->f_status = 0x0042;
442       break;
443    case 0: /* RFRED initialization */
444       ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize);
445       *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
446       r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
447       r_abr_vc += vcc->vci;
448       r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
449       air = srv_p->pcr << (15 - srv_p->rif);
450       if (air == 0) air = 1;
451       r_abr_vc->r_air = cellrate_to_float(air);
452       dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
453       dev->sum_mcr	   += srv_p->mcr;
454       dev->n_abr++;
455       break;
456    default:
457       break;
458  }
459  return	0;
460}
461static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
462   u32 rateLow=0, rateHigh, rate;
463   int entries;
464   struct ia_vcc *ia_vcc;
465
466   int   idealSlot =0, testSlot, toBeAssigned, inc;
467   u32   spacing;
468   u16  *SchedTbl, *TstSchedTbl;
469   u16  cbrVC, vcIndex;
470   u32   fracSlot    = 0;
471   u32   sp_mod      = 0;
472   u32   sp_mod2     = 0;
473
474   /* IpAdjustTrafficParams */
475   if (vcc->qos.txtp.max_pcr <= 0) {
476      IF_ERR(printk("PCR for CBR not defined\n");)
477      return -1;
478   }
479   rate = vcc->qos.txtp.max_pcr;
480   entries = rate / dev->Granularity;
481   IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
482                                entries, rate, dev->Granularity);)
483   if (entries < 1)
484      IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");)
485   rateLow  =  entries * dev->Granularity;
486   rateHigh = (entries + 1) * dev->Granularity;
487   if (3*(rate - rateLow) > (rateHigh - rate))
488      entries++;
489   if (entries > dev->CbrRemEntries) {
490      IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
491      IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
492                                       entries, dev->CbrRemEntries);)
493      return -EBUSY;
494   }
495
496   ia_vcc = INPH_IA_VCC(vcc);
497   ia_vcc->NumCbrEntry = entries;
498   dev->sum_mcr += entries * dev->Granularity;
499   /* IaFFrednInsertCbrSched */
500   // Starting at an arbitrary location, place the entries into the table
501   // as smoothly as possible
502   cbrVC   = 0;
503   spacing = dev->CbrTotEntries / entries;
504   sp_mod  = dev->CbrTotEntries % entries; // get modulo
505   toBeAssigned = entries;
506   fracSlot = 0;
507   vcIndex  = vcc->vci;
508   IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
509   while (toBeAssigned)
510   {
511      // If this is the first time, start the table loading for this connection
512      // as close to entryPoint as possible.
513      if (toBeAssigned == entries)
514      {
515         idealSlot = dev->CbrEntryPt;
516         dev->CbrEntryPt += 2;    // Adding 2 helps to prevent clumping
517         if (dev->CbrEntryPt >= dev->CbrTotEntries)
518            dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
519      } else {
520         idealSlot += (u32)(spacing + fracSlot); // Point to the next location
521         // in the table that would be  smoothest
522         fracSlot = ((sp_mod + sp_mod2) / entries);  // get new integer part
523         sp_mod2  = ((sp_mod + sp_mod2) % entries);  // calc new fractional part
524      }
525      if (idealSlot >= (int)dev->CbrTotEntries)
526         idealSlot -= dev->CbrTotEntries;
527      // Continuously check around this ideal value until a null
528      // location is encountered.
529      SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize);
530      inc = 0;
531      testSlot = idealSlot;
532      TstSchedTbl = (u16*)(SchedTbl+testSlot);  //set index and read in value
533      IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%p, NumToAssign=%d\n",
534                                testSlot, TstSchedTbl,toBeAssigned);)
535      memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
536      while (cbrVC)  // If another VC at this location, we have to keep looking
537      {
538          inc++;
539          testSlot = idealSlot - inc;
540          if (testSlot < 0) { // Wrap if necessary
541             testSlot += dev->CbrTotEntries;
542             IF_CBR(printk("Testslot Wrap. STable Start=0x%p,Testslot=%d\n",
543                                                       SchedTbl,testSlot);)
544          }
545          TstSchedTbl = (u16 *)(SchedTbl + testSlot);  // set table index
546          memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
547          if (!cbrVC)
548             break;
549          testSlot = idealSlot + inc;
550          if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
551             testSlot -= dev->CbrTotEntries;
552             IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
553             IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n",
554                                            testSlot, toBeAssigned);)
555          }
556          // set table index and read in value
557          TstSchedTbl = (u16*)(SchedTbl + testSlot);
558          IF_CBR(printk("Reading CBR Tbl from 0x%p, CbrVal=0x%x Iteration %d\n",
559                          TstSchedTbl,cbrVC,inc);)
560          memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
561       } /* while */
562       // Move this VCI number into this location of the CBR Sched table.
563       memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex, sizeof(*TstSchedTbl));
564       dev->CbrRemEntries--;
565       toBeAssigned--;
566   } /* while */
567
568   /* IaFFrednCbrEnable */
569   dev->NumEnabledCBR++;
570   if (dev->NumEnabledCBR == 1) {
571       writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
572       IF_CBR(printk("CBR is enabled\n");)
573   }
574   return 0;
575}
576static void ia_cbrVc_close (struct atm_vcc *vcc) {
577   IADEV *iadev;
578   u16 *SchedTbl, NullVci = 0;
579   u32 i, NumFound;
580
581   iadev = INPH_IA_DEV(vcc->dev);
582   iadev->NumEnabledCBR--;
583   SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
584   if (iadev->NumEnabledCBR == 0) {
585      writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
586      IF_CBR (printk("CBR support disabled\n");)
587   }
588   NumFound = 0;
589   for (i=0; i < iadev->CbrTotEntries; i++)
590   {
591      if (*SchedTbl == vcc->vci) {
592         iadev->CbrRemEntries++;
593         *SchedTbl = NullVci;
594         IF_CBR(NumFound++;)
595      }
596      SchedTbl++;
597   }
598   IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
599}
600
601static int ia_avail_descs(IADEV *iadev) {
602   int tmp = 0;
603   ia_hack_tcq(iadev);
604   if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
605      tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
606   else
607      tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
608                   iadev->ffL.tcq_st) / 2;
609   return tmp;
610}
611
612static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
613
614static int ia_que_tx (IADEV *iadev) {
615   struct sk_buff *skb;
616   int num_desc;
617   struct atm_vcc *vcc;
618   num_desc = ia_avail_descs(iadev);
619
620   while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
621      if (!(vcc = ATM_SKB(skb)->vcc)) {
622         dev_kfree_skb_any(skb);
623         printk("ia_que_tx: Null vcc\n");
624         break;
625      }
626      if (!test_bit(ATM_VF_READY,&vcc->flags)) {
627         dev_kfree_skb_any(skb);
628         printk("Free the SKB on closed vci %d \n", vcc->vci);
629         break;
630      }
631      if (ia_pkt_tx (vcc, skb)) {
632         skb_queue_head(&iadev->tx_backlog, skb);
633      }
634      num_desc--;
635   }
636   return 0;
637}
638
639static void ia_tx_poll (IADEV *iadev) {
640   struct atm_vcc *vcc = NULL;
641   struct sk_buff *skb = NULL, *skb1 = NULL;
642   struct ia_vcc *iavcc;
643   IARTN_Q *  rtne;
644
645   ia_hack_tcq(iadev);
646   while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
647       skb = rtne->data.txskb;
648       if (!skb) {
649           printk("ia_tx_poll: skb is null\n");
650           goto out;
651       }
652       vcc = ATM_SKB(skb)->vcc;
653       if (!vcc) {
654           printk("ia_tx_poll: vcc is null\n");
655           dev_kfree_skb_any(skb);
656	   goto out;
657       }
658
659       iavcc = INPH_IA_VCC(vcc);
660       if (!iavcc) {
661           printk("ia_tx_poll: iavcc is null\n");
662           dev_kfree_skb_any(skb);
663	   goto out;
664       }
665
666       skb1 = skb_dequeue(&iavcc->txing_skb);
667       while (skb1 && (skb1 != skb)) {
668          if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
669             printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
670          }
671          IF_ERR(printk("Release the SKB not match\n");)
672          if ((vcc->pop) && (skb1->len != 0))
673          {
674             vcc->pop(vcc, skb1);
675             IF_EVENT(printk("Transmit Done - skb 0x%lx return\n",
676                                                          (long)skb1);)
677          }
678          else
679             dev_kfree_skb_any(skb1);
680          skb1 = skb_dequeue(&iavcc->txing_skb);
681       }
682       if (!skb1) {
683          IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);)
684          ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
685          break;
686       }
687       if ((vcc->pop) && (skb->len != 0))
688       {
689          vcc->pop(vcc, skb);
690          IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
691       }
692       else
693          dev_kfree_skb_any(skb);
694       kfree(rtne);
695    }
696    ia_que_tx(iadev);
697out:
698    return;
699}
700#if 0
701static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
702{
703        u32	t;
704	int	i;
705	/*
706	 * Issue a command to enable writes to the NOVRAM
707	 */
708	NVRAM_CMD (EXTEND + EWEN);
709	NVRAM_CLR_CE;
710	/*
711	 * issue the write command
712	 */
713	NVRAM_CMD(IAWRITE + addr);
714	/*
715	 * Send the data, starting with D15, then D14, and so on for 16 bits
716	 */
717	for (i=15; i>=0; i--) {
718		NVRAM_CLKOUT (val & 0x8000);
719		val <<= 1;
720	}
721	NVRAM_CLR_CE;
722	CFG_OR(NVCE);
723	t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
724	while (!(t & NVDO))
725		t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
726
727	NVRAM_CLR_CE;
728	/*
729	 * disable writes again
730	 */
731	NVRAM_CMD(EXTEND + EWDS)
732	NVRAM_CLR_CE;
733	CFG_AND(~NVDI);
734}
735#endif
736
737static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
738{
739	u_short	val;
740        u32	t;
741	int	i;
742	/*
743	 * Read the first bit that was clocked with the falling edge of the
744	 * the last command data clock
745	 */
746	NVRAM_CMD(IAREAD + addr);
747	/*
748	 * Now read the rest of the bits, the next bit read is D14, then D13,
749	 * and so on.
750	 */
751	val = 0;
752	for (i=15; i>=0; i--) {
753		NVRAM_CLKIN(t);
754		val |= (t << i);
755	}
756	NVRAM_CLR_CE;
757	CFG_AND(~NVDI);
758	return val;
759}
760
761static void ia_hw_type(IADEV *iadev) {
762   u_short memType = ia_eeprom_get(iadev, 25);
763   iadev->memType = memType;
764   if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
765      iadev->num_tx_desc = IA_TX_BUF;
766      iadev->tx_buf_sz = IA_TX_BUF_SZ;
767      iadev->num_rx_desc = IA_RX_BUF;
768      iadev->rx_buf_sz = IA_RX_BUF_SZ;
769   } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
770      if (IA_TX_BUF == DFL_TX_BUFFERS)
771        iadev->num_tx_desc = IA_TX_BUF / 2;
772      else
773        iadev->num_tx_desc = IA_TX_BUF;
774      iadev->tx_buf_sz = IA_TX_BUF_SZ;
775      if (IA_RX_BUF == DFL_RX_BUFFERS)
776        iadev->num_rx_desc = IA_RX_BUF / 2;
777      else
778        iadev->num_rx_desc = IA_RX_BUF;
779      iadev->rx_buf_sz = IA_RX_BUF_SZ;
780   }
781   else {
782      if (IA_TX_BUF == DFL_TX_BUFFERS)
783        iadev->num_tx_desc = IA_TX_BUF / 8;
784      else
785        iadev->num_tx_desc = IA_TX_BUF;
786      iadev->tx_buf_sz = IA_TX_BUF_SZ;
787      if (IA_RX_BUF == DFL_RX_BUFFERS)
788        iadev->num_rx_desc = IA_RX_BUF / 8;
789      else
790        iadev->num_rx_desc = IA_RX_BUF;
791      iadev->rx_buf_sz = IA_RX_BUF_SZ;
792   }
793   iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz);
794   IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
795         iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
796         iadev->rx_buf_sz, iadev->rx_pkt_ram);)
797
798#if 0
799   if ((memType & FE_MASK) == FE_SINGLE_MODE) {
800      iadev->phy_type = PHY_OC3C_S;
801   else if ((memType & FE_MASK) == FE_UTP_OPTION)
802      iadev->phy_type = PHY_UTP155;
803   else
804     iadev->phy_type = PHY_OC3C_M;
805#endif
806
807   iadev->phy_type = memType & FE_MASK;
808   IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n",
809                                         memType,iadev->phy_type);)
810   if (iadev->phy_type == FE_25MBIT_PHY)
811      iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
812   else if (iadev->phy_type == FE_DS3_PHY)
813      iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
814   else if (iadev->phy_type == FE_E3_PHY)
815      iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
816   else
817       iadev->LineRate = (u32)(ATM_OC3_PCR);
818   IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
819
820}
821
822static u32 ia_phy_read32(struct iadev_priv *ia, unsigned int reg)
823{
824	return readl(ia->phy + (reg >> 2));
825}
826
827static void ia_phy_write32(struct iadev_priv *ia, unsigned int reg, u32 val)
828{
829	writel(val, ia->phy + (reg >> 2));
830}
831
832static void ia_frontend_intr(struct iadev_priv *iadev)
833{
834	u32 status;
835
836	if (iadev->phy_type & FE_25MBIT_PHY) {
837		status = ia_phy_read32(iadev, MB25_INTR_STATUS);
838		iadev->carrier_detect = (status & MB25_IS_GSB) ? 1 : 0;
839	} else if (iadev->phy_type & FE_DS3_PHY) {
840		ia_phy_read32(iadev, SUNI_DS3_FRM_INTR_STAT);
841		status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
842		iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
843	} else if (iadev->phy_type & FE_E3_PHY) {
844		ia_phy_read32(iadev, SUNI_E3_FRM_MAINT_INTR_IND);
845		status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
846		iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
847	} else {
848		status = ia_phy_read32(iadev, SUNI_RSOP_STATUS);
849		iadev->carrier_detect = (status & SUNI_LOSV) ? 0 : 1;
850	}
851
852	printk(KERN_INFO "IA: SUNI carrier %s\n",
853		iadev->carrier_detect ? "detected" : "lost signal");
854}
855
856static void ia_mb25_init(struct iadev_priv *iadev)
857{
858#if 0
859   mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
860#endif
861	ia_phy_write32(iadev, MB25_MASTER_CTRL, MB25_MC_DRIC | MB25_MC_DREC);
862	ia_phy_write32(iadev, MB25_DIAG_CONTROL, 0);
863
864	iadev->carrier_detect =
865		(ia_phy_read32(iadev, MB25_INTR_STATUS) & MB25_IS_GSB) ? 1 : 0;
866}
867
868struct ia_reg {
869	u16 reg;
870	u16 val;
871};
872
873static void ia_phy_write(struct iadev_priv *iadev,
874			 const struct ia_reg *regs, int len)
875{
876	while (len--) {
877		ia_phy_write32(iadev, regs->reg, regs->val);
878		regs++;
879	}
880}
881
882static void ia_suni_pm7345_init_ds3(struct iadev_priv *iadev)
883{
884	static const struct ia_reg suni_ds3_init[] = {
885		{ SUNI_DS3_FRM_INTR_ENBL,	0x17 },
886		{ SUNI_DS3_FRM_CFG,		0x01 },
887		{ SUNI_DS3_TRAN_CFG,		0x01 },
888		{ SUNI_CONFIG,			0 },
889		{ SUNI_SPLR_CFG,		0 },
890		{ SUNI_SPLT_CFG,		0 }
891	};
892	u32 status;
893
894	status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
895	iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
896
897	ia_phy_write(iadev, suni_ds3_init, ARRAY_SIZE(suni_ds3_init));
898}
899
900static void ia_suni_pm7345_init_e3(struct iadev_priv *iadev)
901{
902	static const struct ia_reg suni_e3_init[] = {
903		{ SUNI_E3_FRM_FRAM_OPTIONS,		0x04 },
904		{ SUNI_E3_FRM_MAINT_OPTIONS,		0x20 },
905		{ SUNI_E3_FRM_FRAM_INTR_ENBL,		0x1d },
906		{ SUNI_E3_FRM_MAINT_INTR_ENBL,		0x30 },
907		{ SUNI_E3_TRAN_STAT_DIAG_OPTIONS,	0 },
908		{ SUNI_E3_TRAN_FRAM_OPTIONS,		0x01 },
909		{ SUNI_CONFIG,				SUNI_PM7345_E3ENBL },
910		{ SUNI_SPLR_CFG,			0x41 },
911		{ SUNI_SPLT_CFG,			0x41 }
912	};
913	u32 status;
914
915	status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
916	iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
917	ia_phy_write(iadev, suni_e3_init, ARRAY_SIZE(suni_e3_init));
918}
919
920static void ia_suni_pm7345_init(struct iadev_priv *iadev)
921{
922	static const struct ia_reg suni_init[] = {
923		/* Enable RSOP loss of signal interrupt. */
924		{ SUNI_INTR_ENBL,		0x28 },
925		/* Clear error counters. */
926		{ SUNI_ID_RESET,		0 },
927		/* Clear "PMCTST" in master test register. */
928		{ SUNI_MASTER_TEST,		0 },
929
930		{ SUNI_RXCP_CTRL,		0x2c },
931		{ SUNI_RXCP_FCTRL,		0x81 },
932
933		{ SUNI_RXCP_IDLE_PAT_H1,	0 },
934		{ SUNI_RXCP_IDLE_PAT_H2,	0 },
935		{ SUNI_RXCP_IDLE_PAT_H3,	0 },
936		{ SUNI_RXCP_IDLE_PAT_H4,	0x01 },
937
938		{ SUNI_RXCP_IDLE_MASK_H1,	0xff },
939		{ SUNI_RXCP_IDLE_MASK_H2,	0xff },
940		{ SUNI_RXCP_IDLE_MASK_H3,	0xff },
941		{ SUNI_RXCP_IDLE_MASK_H4,	0xfe },
942
943		{ SUNI_RXCP_CELL_PAT_H1,	0 },
944		{ SUNI_RXCP_CELL_PAT_H2,	0 },
945		{ SUNI_RXCP_CELL_PAT_H3,	0 },
946		{ SUNI_RXCP_CELL_PAT_H4,	0x01 },
947
948		{ SUNI_RXCP_CELL_MASK_H1,	0xff },
949		{ SUNI_RXCP_CELL_MASK_H2,	0xff },
950		{ SUNI_RXCP_CELL_MASK_H3,	0xff },
951		{ SUNI_RXCP_CELL_MASK_H4,	0xff },
952
953		{ SUNI_TXCP_CTRL,		0xa4 },
954		{ SUNI_TXCP_INTR_EN_STS,	0x10 },
955		{ SUNI_TXCP_IDLE_PAT_H5,	0x55 }
956	};
957
958	if (iadev->phy_type & FE_DS3_PHY)
959		ia_suni_pm7345_init_ds3(iadev);
960	else
961		ia_suni_pm7345_init_e3(iadev);
962
963	ia_phy_write(iadev, suni_init, ARRAY_SIZE(suni_init));
964
965	ia_phy_write32(iadev, SUNI_CONFIG, ia_phy_read32(iadev, SUNI_CONFIG) &
966		~(SUNI_PM7345_LLB | SUNI_PM7345_CLB |
967		  SUNI_PM7345_DLB | SUNI_PM7345_PLB));
968#ifdef __SNMP__
969   suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
970#endif /* __SNMP__ */
971   return;
972}
973
974
975/***************************** IA_LIB END *****************************/
976
977#ifdef CONFIG_ATM_IA_DEBUG
978static int tcnter = 0;
979static void xdump( u_char*  cp, int  length, char*  prefix )
980{
981    int col, count;
982    u_char prntBuf[120];
983    u_char*  pBuf = prntBuf;
984    count = 0;
985    while(count < length){
986        pBuf += sprintf( pBuf, "%s", prefix );
987        for(col = 0;count + col < length && col < 16; col++){
988            if (col != 0 && (col % 4) == 0)
989                pBuf += sprintf( pBuf, " " );
990            pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
991        }
992        while(col++ < 16){      /* pad end of buffer with blanks */
993            if ((col % 4) == 0)
994                sprintf( pBuf, " " );
995            pBuf += sprintf( pBuf, "   " );
996        }
997        pBuf += sprintf( pBuf, "  " );
998        for(col = 0;count + col < length && col < 16; col++){
999            if (isprint((int)cp[count + col]))
1000                pBuf += sprintf( pBuf, "%c", cp[count + col] );
1001            else
1002                pBuf += sprintf( pBuf, "." );
1003                }
1004        printk("%s\n", prntBuf);
1005        count += col;
1006        pBuf = prntBuf;
1007    }
1008
1009}  /* close xdump(... */
1010#endif /* CONFIG_ATM_IA_DEBUG */
1011
1012
1013static struct atm_dev *ia_boards = NULL;
1014
1015#define ACTUAL_RAM_BASE \
1016	RAM_BASE*((iadev->mem)/(128 * 1024))
1017#define ACTUAL_SEG_RAM_BASE \
1018	IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1019#define ACTUAL_REASS_RAM_BASE \
1020	IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1021
1022
1023/*-- some utilities and memory allocation stuff will come here -------------*/
1024
1025static void desc_dbg(IADEV *iadev) {
1026
1027  u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1028  u32 i;
1029  void __iomem *tmp;
1030  // regval = readl((u32)ia_cmds->maddr);
1031  tcq_wr_ptr =  readw(iadev->seg_reg+TCQ_WR_PTR);
1032  printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1033                     tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1034                     readw(iadev->seg_ram+tcq_wr_ptr-2));
1035  printk(" host_tcq_wr = 0x%x  host_tcq_rd = 0x%x \n",  iadev->host_tcq_wr,
1036                   iadev->ffL.tcq_rd);
1037  tcq_st_ptr =  readw(iadev->seg_reg+TCQ_ST_ADR);
1038  tcq_ed_ptr =  readw(iadev->seg_reg+TCQ_ED_ADR);
1039  printk("tcq_st_ptr = 0x%x    tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1040  i = 0;
1041  while (tcq_st_ptr != tcq_ed_ptr) {
1042      tmp = iadev->seg_ram+tcq_st_ptr;
1043      printk("TCQ slot %d desc = %d  Addr = %p\n", i++, readw(tmp), tmp);
1044      tcq_st_ptr += 2;
1045  }
1046  for(i=0; i <iadev->num_tx_desc; i++)
1047      printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1048}
1049
1050
1051/*----------------------------- Receiving side stuff --------------------------*/
1052
1053static void rx_excp_rcvd(struct atm_dev *dev)
1054{
1055#if 0 /* closing the receiving size will cause too many excp int */
1056  IADEV *iadev;
1057  u_short state;
1058  u_short excpq_rd_ptr;
1059  //u_short *ptr;
1060  int vci, error = 1;
1061  iadev = INPH_IA_DEV(dev);
1062  state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1063  while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)
1064  { printk("state = %x \n", state);
1065        excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;
1066 printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr);
1067        if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1068            IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1069        // TODO: update exception stat
1070	vci = readw(iadev->reass_ram+excpq_rd_ptr);
1071	error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;
1072        // pwang_test
1073	excpq_rd_ptr += 4;
1074	if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))
1075 	    excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1076	writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);
1077        state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1078  }
1079#endif
1080}
1081
1082static void free_desc(struct atm_dev *dev, int desc)
1083{
1084	IADEV *iadev;
1085	iadev = INPH_IA_DEV(dev);
1086        writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr);
1087	iadev->rfL.fdq_wr +=2;
1088	if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1089		iadev->rfL.fdq_wr =  iadev->rfL.fdq_st;
1090	writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);
1091}
1092
1093
1094static int rx_pkt(struct atm_dev *dev)
1095{
1096	IADEV *iadev;
1097	struct atm_vcc *vcc;
1098	unsigned short status;
1099	struct rx_buf_desc __iomem *buf_desc_ptr;
1100	int desc;
1101	struct dle* wr_ptr;
1102	int len;
1103	struct sk_buff *skb;
1104	u_int buf_addr, dma_addr;
1105
1106	iadev = INPH_IA_DEV(dev);
1107	if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff))
1108	{
1109   	    printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);
1110	    return -EINVAL;
1111	}
1112	/* mask 1st 3 bits to get the actual descno. */
1113	desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;
1114        IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n",
1115                                    iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1116              printk(" pcq_wr_ptr = 0x%x\n",
1117                               readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1118	/* update the read pointer  - maybe we shud do this in the end*/
1119	if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed)
1120		iadev->rfL.pcq_rd = iadev->rfL.pcq_st;
1121	else
1122		iadev->rfL.pcq_rd += 2;
1123	writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);
1124
1125	/* get the buffer desc entry.
1126		update stuff. - doesn't seem to be any update necessary
1127	*/
1128	buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1129	/* make the ptr point to the corresponding buffer desc entry */
1130	buf_desc_ptr += desc;
1131        if (!desc || (desc > iadev->num_rx_desc) ||
1132                      ((buf_desc_ptr->vc_index & 0xffff) >= iadev->num_vc)) {
1133            free_desc(dev, desc);
1134            IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1135            return -1;
1136        }
1137	vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];
1138	if (!vcc)
1139	{
1140                free_desc(dev, desc);
1141		printk("IA: null vcc, drop PDU\n");
1142		return -1;
1143	}
1144
1145
1146	/* might want to check the status bits for errors */
1147	status = (u_short) (buf_desc_ptr->desc_mode);
1148	if (status & (RX_CER | RX_PTE | RX_OFL))
1149	{
1150                atomic_inc(&vcc->stats->rx_err);
1151		IF_ERR(printk("IA: bad packet, dropping it");)
1152                if (status & RX_CER) {
1153                    IF_ERR(printk(" cause: packet CRC error\n");)
1154                }
1155                else if (status & RX_PTE) {
1156                    IF_ERR(printk(" cause: packet time out\n");)
1157                }
1158                else {
1159                    IF_ERR(printk(" cause: buffer overflow\n");)
1160                }
1161		goto out_free_desc;
1162	}
1163
1164	/*
1165		build DLE.
1166	*/
1167
1168	buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;
1169	dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;
1170	len = dma_addr - buf_addr;
1171        if (len > iadev->rx_buf_sz) {
1172           printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1173           atomic_inc(&vcc->stats->rx_err);
1174	   goto out_free_desc;
1175        }
1176
1177        if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1178           if (vcc->vci < 32)
1179              printk("Drop control packets\n");
1180	   goto out_free_desc;
1181        }
1182	skb_put(skb,len);
1183        // pwang_test
1184        ATM_SKB(skb)->vcc = vcc;
1185        ATM_DESC(skb) = desc;
1186	skb_queue_tail(&iadev->rx_dma_q, skb);
1187
1188	/* Build the DLE structure */
1189	wr_ptr = iadev->rx_dle_q.write;
1190	wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
1191					      len, DMA_FROM_DEVICE);
1192	wr_ptr->local_pkt_addr = buf_addr;
1193	wr_ptr->bytes = len;	/* We don't know this do we ?? */
1194	wr_ptr->mode = DMA_INT_ENABLE;
1195
1196	/* shud take care of wrap around here too. */
1197        if(++wr_ptr == iadev->rx_dle_q.end)
1198             wr_ptr = iadev->rx_dle_q.start;
1199	iadev->rx_dle_q.write = wr_ptr;
1200	udelay(1);
1201	/* Increment transaction counter */
1202	writel(1, iadev->dma+IPHASE5575_RX_COUNTER);
1203out:	return 0;
1204out_free_desc:
1205        free_desc(dev, desc);
1206        goto out;
1207}
1208
1209static void rx_intr(struct atm_dev *dev)
1210{
1211  IADEV *iadev;
1212  u_short status;
1213  u_short state, i;
1214
1215  iadev = INPH_IA_DEV(dev);
1216  status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;
1217  IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1218  if (status & RX_PKT_RCVD)
1219  {
1220	/* do something */
1221	/* Basically recvd an interrupt for receiving a packet.
1222	A descriptor would have been written to the packet complete
1223	queue. Get all the descriptors and set up dma to move the
1224	packets till the packet complete queue is empty..
1225	*/
1226	state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1227        IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);)
1228	while(!(state & PCQ_EMPTY))
1229	{
1230             rx_pkt(dev);
1231	     state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1232	}
1233        iadev->rxing = 1;
1234  }
1235  if (status & RX_FREEQ_EMPT)
1236  {
1237     if (iadev->rxing) {
1238        iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1239        iadev->rx_tmp_jif = jiffies;
1240        iadev->rxing = 0;
1241     }
1242     else if ((time_after(jiffies, iadev->rx_tmp_jif + 50)) &&
1243               ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1244        for (i = 1; i <= iadev->num_rx_desc; i++)
1245               free_desc(dev, i);
1246printk("Test logic RUN!!!!\n");
1247        writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1248        iadev->rxing = 1;
1249     }
1250     IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)
1251  }
1252
1253  if (status & RX_EXCP_RCVD)
1254  {
1255	/* probably need to handle the exception queue also. */
1256	IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)
1257	rx_excp_rcvd(dev);
1258  }
1259
1260
1261  if (status & RX_RAW_RCVD)
1262  {
1263	/* need to handle the raw incoming cells. This deepnds on
1264	whether we have programmed to receive the raw cells or not.
1265	Else ignore. */
1266	IF_EVENT(printk("Rx intr status:  RX_RAW_RCVD %08x\n", status);)
1267  }
1268}
1269
1270
1271static void rx_dle_intr(struct atm_dev *dev)
1272{
1273  IADEV *iadev;
1274  struct atm_vcc *vcc;
1275  struct sk_buff *skb;
1276  int desc;
1277  u_short state;
1278  struct dle *dle, *cur_dle;
1279  u_int dle_lp;
1280  int len;
1281  iadev = INPH_IA_DEV(dev);
1282
1283  /* free all the dles done, that is just update our own dle read pointer
1284	- do we really need to do this. Think not. */
1285  /* DMA is done, just get all the recevie buffers from the rx dma queue
1286	and push them up to the higher layer protocol. Also free the desc
1287	associated with the buffer. */
1288  dle = iadev->rx_dle_q.read;
1289  dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);
1290  cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));
1291  while(dle != cur_dle)
1292  {
1293      /* free the DMAed skb */
1294      skb = skb_dequeue(&iadev->rx_dma_q);
1295      if (!skb)
1296         goto INCR_DLE;
1297      desc = ATM_DESC(skb);
1298      free_desc(dev, desc);
1299
1300      if (!(len = skb->len))
1301      {
1302          printk("rx_dle_intr: skb len 0\n");
1303	  dev_kfree_skb_any(skb);
1304      }
1305      else
1306      {
1307          struct cpcs_trailer *trailer;
1308          u_short length;
1309          struct ia_vcc *ia_vcc;
1310
1311	  dma_unmap_single(&iadev->pci->dev, iadev->rx_dle_q.write->sys_pkt_addr,
1312			   len, DMA_FROM_DEVICE);
1313          /* no VCC related housekeeping done as yet. lets see */
1314          vcc = ATM_SKB(skb)->vcc;
1315	  if (!vcc) {
1316	      printk("IA: null vcc\n");
1317              dev_kfree_skb_any(skb);
1318              goto INCR_DLE;
1319          }
1320          ia_vcc = INPH_IA_VCC(vcc);
1321          if (ia_vcc == NULL)
1322          {
1323             atomic_inc(&vcc->stats->rx_err);
1324             atm_return(vcc, skb->truesize);
1325             dev_kfree_skb_any(skb);
1326             goto INCR_DLE;
1327           }
1328          // get real pkt length  pwang_test
1329          trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1330                                 skb->len - sizeof(*trailer));
1331	  length = swap_byte_order(trailer->length);
1332          if ((length > iadev->rx_buf_sz) || (length >
1333                              (skb->len - sizeof(struct cpcs_trailer))))
1334          {
1335             atomic_inc(&vcc->stats->rx_err);
1336             IF_ERR(printk("rx_dle_intr: Bad  AAL5 trailer %d (skb len %d)",
1337                                                            length, skb->len);)
1338             atm_return(vcc, skb->truesize);
1339             dev_kfree_skb_any(skb);
1340             goto INCR_DLE;
1341          }
1342          skb_trim(skb, length);
1343
1344	  /* Display the packet */
1345	  IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);
1346          xdump(skb->data, skb->len, "RX: ");
1347          printk("\n");)
1348
1349	  IF_RX(printk("rx_dle_intr: skb push");)
1350	  vcc->push(vcc,skb);
1351	  atomic_inc(&vcc->stats->rx);
1352          iadev->rx_pkt_cnt++;
1353      }
1354INCR_DLE:
1355      if (++dle == iadev->rx_dle_q.end)
1356    	  dle = iadev->rx_dle_q.start;
1357  }
1358  iadev->rx_dle_q.read = dle;
1359
1360  /* if the interrupts are masked because there were no free desc available,
1361		unmask them now. */
1362  if (!iadev->rxing) {
1363     state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1364     if (!(state & FREEQ_EMPTY)) {
1365        state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1366        writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1367                                      iadev->reass_reg+REASS_MASK_REG);
1368        iadev->rxing++;
1369     }
1370  }
1371}
1372
1373
1374static int open_rx(struct atm_vcc *vcc)
1375{
1376	IADEV *iadev;
1377	u_short __iomem *vc_table;
1378	u_short __iomem *reass_ptr;
1379	IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1380
1381	if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;
1382	iadev = INPH_IA_DEV(vcc->dev);
1383        if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
1384           if (iadev->phy_type & FE_25MBIT_PHY) {
1385               printk("IA:  ABR not support\n");
1386               return -EINVAL;
1387           }
1388        }
1389	/* Make only this VCI in the vc table valid and let all
1390		others be invalid entries */
1391	vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
1392	vc_table += vcc->vci;
1393	/* mask the last 6 bits and OR it with 3 for 1K VCs */
1394
1395        *vc_table = vcc->vci << 6;
1396	/* Also keep a list of open rx vcs so that we can attach them with
1397		incoming PDUs later. */
1398	if ((vcc->qos.rxtp.traffic_class == ATM_ABR) ||
1399                                (vcc->qos.txtp.traffic_class == ATM_ABR))
1400	{
1401                srv_cls_param_t srv_p;
1402                init_abr_vc(iadev, &srv_p);
1403                ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1404	}
1405       	else {  /* for UBR  later may need to add CBR logic */
1406        	reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
1407           	reass_ptr += vcc->vci;
1408           	*reass_ptr = NO_AAL5_PKT;
1409       	}
1410
1411	if (iadev->rx_open[vcc->vci])
1412		printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",
1413			vcc->dev->number, vcc->vci);
1414	iadev->rx_open[vcc->vci] = vcc;
1415	return 0;
1416}
1417
1418static int rx_init(struct atm_dev *dev)
1419{
1420	IADEV *iadev;
1421	struct rx_buf_desc __iomem *buf_desc_ptr;
1422	unsigned long rx_pkt_start = 0;
1423	void *dle_addr;
1424	struct abr_vc_table  *abr_vc_table;
1425	u16 *vc_table;
1426	u16 *reass_table;
1427	int i,j, vcsize_sel;
1428	u_short freeq_st_adr;
1429	u_short *freeq_start;
1430
1431	iadev = INPH_IA_DEV(dev);
1432  //    spin_lock_init(&iadev->rx_lock);
1433
1434	/* Allocate 4k bytes - more aligned than needed (4k boundary) */
1435	dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
1436				      &iadev->rx_dle_dma, GFP_KERNEL);
1437	if (!dle_addr)  {
1438		printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1439		goto err_out;
1440	}
1441	iadev->rx_dle_q.start = (struct dle *)dle_addr;
1442	iadev->rx_dle_q.read = iadev->rx_dle_q.start;
1443	iadev->rx_dle_q.write = iadev->rx_dle_q.start;
1444	iadev->rx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1445	/* the end of the dle q points to the entry after the last
1446	DLE that can be used. */
1447
1448	/* write the upper 20 bits of the start address to rx list address register */
1449	/* We know this is 32bit bus addressed so the following is safe */
1450	writel(iadev->rx_dle_dma & 0xfffff000,
1451	       iadev->dma + IPHASE5575_RX_LIST_ADDR);
1452	IF_INIT(printk("Tx Dle list addr: 0x%p value: 0x%0x\n",
1453                      iadev->dma+IPHASE5575_TX_LIST_ADDR,
1454                      readl(iadev->dma + IPHASE5575_TX_LIST_ADDR));
1455	printk("Rx Dle list addr: 0x%p value: 0x%0x\n",
1456                      iadev->dma+IPHASE5575_RX_LIST_ADDR,
1457                      readl(iadev->dma + IPHASE5575_RX_LIST_ADDR));)
1458
1459	writew(0xffff, iadev->reass_reg+REASS_MASK_REG);
1460	writew(0, iadev->reass_reg+MODE_REG);
1461	writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);
1462
1463	/* Receive side control memory map
1464	   -------------------------------
1465
1466		Buffer descr	0x0000 (736 - 23K)
1467		VP Table	0x5c00 (256 - 512)
1468		Except q	0x5e00 (128 - 512)
1469		Free buffer q	0x6000 (1K - 2K)
1470		Packet comp q	0x6800 (1K - 2K)
1471		Reass Table	0x7000 (1K - 2K)
1472		VC Table	0x7800 (1K - 2K)
1473		ABR VC Table	0x8000 (1K - 32K)
1474	*/
1475
1476	/* Base address for Buffer Descriptor Table */
1477	writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);
1478	/* Set the buffer size register */
1479	writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);
1480
1481	/* Initialize each entry in the Buffer Descriptor Table */
1482        iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1483	buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1484	memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1485	buf_desc_ptr++;
1486	rx_pkt_start = iadev->rx_pkt_ram;
1487	for(i=1; i<=iadev->num_rx_desc; i++)
1488	{
1489		memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1490		buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;
1491		buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;
1492		buf_desc_ptr++;
1493		rx_pkt_start += iadev->rx_buf_sz;
1494	}
1495	IF_INIT(printk("Rx Buffer desc ptr: 0x%p\n", buf_desc_ptr);)
1496        i = FREE_BUF_DESC_Q*iadev->memSize;
1497	writew(i >> 16,  iadev->reass_reg+REASS_QUEUE_BASE);
1498        writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1499        writew(i+iadev->num_rx_desc*sizeof(u_short),
1500                                         iadev->reass_reg+FREEQ_ED_ADR);
1501        writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1502        writew(i+iadev->num_rx_desc*sizeof(u_short),
1503                                        iadev->reass_reg+FREEQ_WR_PTR);
1504	/* Fill the FREEQ with all the free descriptors. */
1505	freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);
1506	freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);
1507	for(i=1; i<=iadev->num_rx_desc; i++)
1508	{
1509		*freeq_start = (u_short)i;
1510		freeq_start++;
1511	}
1512	IF_INIT(printk("freeq_start: 0x%p\n", freeq_start);)
1513        /* Packet Complete Queue */
1514        i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1515        writew(i, iadev->reass_reg+PCQ_ST_ADR);
1516        writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1517        writew(i, iadev->reass_reg+PCQ_RD_PTR);
1518        writew(i, iadev->reass_reg+PCQ_WR_PTR);
1519
1520        /* Exception Queue */
1521        i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1522        writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1523        writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q),
1524                                             iadev->reass_reg+EXCP_Q_ED_ADR);
1525        writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1526        writew(i, iadev->reass_reg+EXCP_Q_WR_PTR);
1527
1528    	/* Load local copy of FREEQ and PCQ ptrs */
1529        iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1530       	iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1531	iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1532	iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1533        iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1534	iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1535	iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1536	iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1537
1538        IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x",
1539              iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd,
1540              iadev->rfL.pcq_wr);)
1541	/* just for check - no VP TBL */
1542	/* VP Table */
1543	/* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */
1544	/* initialize VP Table for invalid VPIs
1545		- I guess we can write all 1s or 0x000f in the entire memory
1546		  space or something similar.
1547	*/
1548
1549	/* This seems to work and looks right to me too !!! */
1550        i =  REASS_TABLE * iadev->memSize;
1551	writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);
1552 	/* initialize Reassembly table to I don't know what ???? */
1553	reass_table = (u16 *)(iadev->reass_ram+i);
1554        j = REASS_TABLE_SZ * iadev->memSize;
1555	for(i=0; i < j; i++)
1556		*reass_table++ = NO_AAL5_PKT;
1557       i = 8*1024;
1558       vcsize_sel =  0;
1559       while (i != iadev->num_vc) {
1560          i /= 2;
1561          vcsize_sel++;
1562       }
1563       i = RX_VC_TABLE * iadev->memSize;
1564       writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1565       vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
1566        j = RX_VC_TABLE_SZ * iadev->memSize;
1567	for(i = 0; i < j; i++)
1568	{
1569		/* shift the reassembly pointer by 3 + lower 3 bits of
1570		vc_lkup_base register (=3 for 1K VCs) and the last byte
1571		is those low 3 bits.
1572		Shall program this later.
1573		*/
1574		*vc_table = (i << 6) | 15;	/* for invalid VCI */
1575		vc_table++;
1576	}
1577        /* ABR VC table */
1578        i =  ABR_VC_TABLE * iadev->memSize;
1579        writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1580
1581        i = ABR_VC_TABLE * iadev->memSize;
1582	abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);
1583        j = REASS_TABLE_SZ * iadev->memSize;
1584        memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1585    	for(i = 0; i < j; i++) {
1586		abr_vc_table->rdf = 0x0003;
1587             	abr_vc_table->air = 0x5eb1;
1588	       	abr_vc_table++;
1589        }
1590
1591	/* Initialize other registers */
1592
1593	/* VP Filter Register set for VC Reassembly only */
1594	writew(0xff00, iadev->reass_reg+VP_FILTER);
1595        writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1596	writew(0x1,  iadev->reass_reg+PROTOCOL_ID);
1597
1598	/* Packet Timeout Count  related Registers :
1599	   Set packet timeout to occur in about 3 seconds
1600	   Set Packet Aging Interval count register to overflow in about 4 us
1601 	*/
1602        writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1603
1604        i = (j >> 6) & 0xFF;
1605        j += 2 * (j - 1);
1606        i |= ((j << 2) & 0xFF00);
1607        writew(i, iadev->reass_reg+TMOUT_RANGE);
1608
1609        /* initiate the desc_tble */
1610        for(i=0; i<iadev->num_tx_desc;i++)
1611            iadev->desc_tbl[i].timestamp = 0;
1612
1613	/* to clear the interrupt status register - read it */
1614	readw(iadev->reass_reg+REASS_INTR_STATUS_REG);
1615
1616	/* Mask Register - clear it */
1617	writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);
1618
1619	skb_queue_head_init(&iadev->rx_dma_q);
1620	iadev->rx_free_desc_qhead = NULL;
1621
1622	iadev->rx_open = kcalloc(iadev->num_vc, sizeof(void *), GFP_KERNEL);
1623	if (!iadev->rx_open) {
1624		printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1625		dev->number);
1626		goto err_free_dle;
1627	}
1628
1629        iadev->rxing = 1;
1630        iadev->rx_pkt_cnt = 0;
1631	/* Mode Register */
1632	writew(R_ONLINE, iadev->reass_reg+MODE_REG);
1633	return 0;
1634
1635err_free_dle:
1636	dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1637			  iadev->rx_dle_dma);
1638err_out:
1639	return -ENOMEM;
1640}
1641
1642
1643/*
1644	The memory map suggested in appendix A and the coding for it.
1645	Keeping it around just in case we change our mind later.
1646
1647		Buffer descr	0x0000 (128 - 4K)
1648		UBR sched	0x1000 (1K - 4K)
1649		UBR Wait q	0x2000 (1K - 4K)
1650		Commn queues	0x3000 Packet Ready, Trasmit comp(0x3100)
1651					(128 - 256) each
1652		extended VC	0x4000 (1K - 8K)
1653		ABR sched	0x6000	and ABR wait queue (1K - 2K) each
1654		CBR sched	0x7000 (as needed)
1655		VC table	0x8000 (1K - 32K)
1656*/
1657
1658static void tx_intr(struct atm_dev *dev)
1659{
1660	IADEV *iadev;
1661	unsigned short status;
1662        unsigned long flags;
1663
1664	iadev = INPH_IA_DEV(dev);
1665
1666	status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);
1667        if (status & TRANSMIT_DONE){
1668
1669           IF_EVENT(printk("Transmit Done Intr logic run\n");)
1670           spin_lock_irqsave(&iadev->tx_lock, flags);
1671           ia_tx_poll(iadev);
1672           spin_unlock_irqrestore(&iadev->tx_lock, flags);
1673           writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1674           if (iadev->close_pending)
1675               wake_up(&iadev->close_wait);
1676        }
1677	if (status & TCQ_NOT_EMPTY)
1678	{
1679	    IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)
1680	}
1681}
1682
1683static void tx_dle_intr(struct atm_dev *dev)
1684{
1685        IADEV *iadev;
1686        struct dle *dle, *cur_dle;
1687        struct sk_buff *skb;
1688        struct atm_vcc *vcc;
1689        struct ia_vcc  *iavcc;
1690        u_int dle_lp;
1691        unsigned long flags;
1692
1693        iadev = INPH_IA_DEV(dev);
1694        spin_lock_irqsave(&iadev->tx_lock, flags);
1695        dle = iadev->tx_dle_q.read;
1696        dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) &
1697                                        (sizeof(struct dle)*DLE_ENTRIES - 1);
1698        cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1699        while (dle != cur_dle)
1700        {
1701            /* free the DMAed skb */
1702            skb = skb_dequeue(&iadev->tx_dma_q);
1703            if (!skb) break;
1704
1705	    /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1706	    if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1707		dma_unmap_single(&iadev->pci->dev, dle->sys_pkt_addr, skb->len,
1708				 DMA_TO_DEVICE);
1709	    }
1710            vcc = ATM_SKB(skb)->vcc;
1711            if (!vcc) {
1712                  printk("tx_dle_intr: vcc is null\n");
1713		  spin_unlock_irqrestore(&iadev->tx_lock, flags);
1714                  dev_kfree_skb_any(skb);
1715
1716                  return;
1717            }
1718            iavcc = INPH_IA_VCC(vcc);
1719            if (!iavcc) {
1720                  printk("tx_dle_intr: iavcc is null\n");
1721		  spin_unlock_irqrestore(&iadev->tx_lock, flags);
1722                  dev_kfree_skb_any(skb);
1723                  return;
1724            }
1725            if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1726               if ((vcc->pop) && (skb->len != 0))
1727               {
1728                 vcc->pop(vcc, skb);
1729               }
1730               else {
1731                 dev_kfree_skb_any(skb);
1732               }
1733            }
1734            else { /* Hold the rate-limited skb for flow control */
1735               IA_SKB_STATE(skb) |= IA_DLED;
1736               skb_queue_tail(&iavcc->txing_skb, skb);
1737            }
1738            IF_EVENT(printk("tx_dle_intr: enque skb = 0x%p \n", skb);)
1739            if (++dle == iadev->tx_dle_q.end)
1740                 dle = iadev->tx_dle_q.start;
1741        }
1742        iadev->tx_dle_q.read = dle;
1743        spin_unlock_irqrestore(&iadev->tx_lock, flags);
1744}
1745
1746static int open_tx(struct atm_vcc *vcc)
1747{
1748	struct ia_vcc *ia_vcc;
1749	IADEV *iadev;
1750	struct main_vc *vc;
1751	struct ext_vc *evc;
1752        int ret;
1753	IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)
1754	if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
1755	iadev = INPH_IA_DEV(vcc->dev);
1756
1757        if (iadev->phy_type & FE_25MBIT_PHY) {
1758           if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1759               printk("IA:  ABR not support\n");
1760               return -EINVAL;
1761           }
1762	  if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1763               printk("IA:  CBR not support\n");
1764               return -EINVAL;
1765          }
1766        }
1767        ia_vcc =  INPH_IA_VCC(vcc);
1768        memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1769        if (vcc->qos.txtp.max_sdu >
1770                         (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1771           printk("IA:  SDU size over (%d) the configured SDU size %d\n",
1772		  vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1773	   vcc->dev_data = NULL;
1774           kfree(ia_vcc);
1775           return -EINVAL;
1776        }
1777	ia_vcc->vc_desc_cnt = 0;
1778        ia_vcc->txing = 1;
1779
1780        /* find pcr */
1781        if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR)
1782           vcc->qos.txtp.pcr = iadev->LineRate;
1783        else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1784           vcc->qos.txtp.pcr = iadev->LineRate;
1785        else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0))
1786           vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1787        if (vcc->qos.txtp.pcr > iadev->LineRate)
1788             vcc->qos.txtp.pcr = iadev->LineRate;
1789        ia_vcc->pcr = vcc->qos.txtp.pcr;
1790
1791        if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1792        else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1793        else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1794        else ia_vcc->ltimeout = 2700 * HZ  / ia_vcc->pcr;
1795        if (ia_vcc->pcr < iadev->rate_limit)
1796           skb_queue_head_init (&ia_vcc->txing_skb);
1797        if (ia_vcc->pcr < iadev->rate_limit) {
1798	   struct sock *sk = sk_atm(vcc);
1799
1800	   if (vcc->qos.txtp.max_sdu != 0) {
1801               if (ia_vcc->pcr > 60000)
1802                  sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1803               else if (ia_vcc->pcr > 2000)
1804                  sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1805               else
1806                 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1807           }
1808           else
1809             sk->sk_sndbuf = 24576;
1810        }
1811
1812	vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
1813	evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
1814	vc += vcc->vci;
1815	evc += vcc->vci;
1816	memset((caddr_t)vc, 0, sizeof(*vc));
1817	memset((caddr_t)evc, 0, sizeof(*evc));
1818
1819	/* store the most significant 4 bits of vci as the last 4 bits
1820		of first part of atm header.
1821	   store the last 12 bits of vci as first 12 bits of the second
1822		part of the atm header.
1823	*/
1824	evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;
1825	evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;
1826
1827	/* check the following for different traffic classes */
1828	if (vcc->qos.txtp.traffic_class == ATM_UBR)
1829	{
1830		vc->type = UBR;
1831                vc->status = CRC_APPEND;
1832		vc->acr = cellrate_to_float(iadev->LineRate);
1833                if (vcc->qos.txtp.pcr > 0)
1834                   vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);
1835                IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n",
1836                                             vcc->qos.txtp.max_pcr,vc->acr);)
1837	}
1838	else if (vcc->qos.txtp.traffic_class == ATM_ABR)
1839	{       srv_cls_param_t srv_p;
1840		IF_ABR(printk("Tx ABR VCC\n");)
1841                init_abr_vc(iadev, &srv_p);
1842                if (vcc->qos.txtp.pcr > 0)
1843                   srv_p.pcr = vcc->qos.txtp.pcr;
1844                if (vcc->qos.txtp.min_pcr > 0) {
1845                   int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1846                   if (tmpsum > iadev->LineRate)
1847                       return -EBUSY;
1848                   srv_p.mcr = vcc->qos.txtp.min_pcr;
1849                   iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1850                }
1851                else srv_p.mcr = 0;
1852                if (vcc->qos.txtp.icr)
1853                   srv_p.icr = vcc->qos.txtp.icr;
1854                if (vcc->qos.txtp.tbe)
1855                   srv_p.tbe = vcc->qos.txtp.tbe;
1856                if (vcc->qos.txtp.frtt)
1857                   srv_p.frtt = vcc->qos.txtp.frtt;
1858                if (vcc->qos.txtp.rif)
1859                   srv_p.rif = vcc->qos.txtp.rif;
1860                if (vcc->qos.txtp.rdf)
1861                   srv_p.rdf = vcc->qos.txtp.rdf;
1862                if (vcc->qos.txtp.nrm_pres)
1863                   srv_p.nrm = vcc->qos.txtp.nrm;
1864                if (vcc->qos.txtp.trm_pres)
1865                   srv_p.trm = vcc->qos.txtp.trm;
1866                if (vcc->qos.txtp.adtf_pres)
1867                   srv_p.adtf = vcc->qos.txtp.adtf;
1868                if (vcc->qos.txtp.cdf_pres)
1869                   srv_p.cdf = vcc->qos.txtp.cdf;
1870                if (srv_p.icr > srv_p.pcr)
1871                   srv_p.icr = srv_p.pcr;
1872                IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d  mcr = %d\n",
1873                                                      srv_p.pcr, srv_p.mcr);)
1874		ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1875	} else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1876                if (iadev->phy_type & FE_25MBIT_PHY) {
1877                    printk("IA:  CBR not support\n");
1878                    return -EINVAL;
1879                }
1880                if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1881                   IF_CBR(printk("PCR is not available\n");)
1882                   return -1;
1883                }
1884                vc->type = CBR;
1885                vc->status = CRC_APPEND;
1886                if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {
1887                    return ret;
1888                }
1889	} else {
1890		printk("iadev:  Non UBR, ABR and CBR traffic not supported\n");
1891	}
1892
1893        iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1894	IF_EVENT(printk("ia open_tx returning \n");)
1895	return 0;
1896}
1897
1898
1899static int tx_init(struct atm_dev *dev)
1900{
1901	IADEV *iadev;
1902	struct tx_buf_desc *buf_desc_ptr;
1903	unsigned int tx_pkt_start;
1904	void *dle_addr;
1905	int i;
1906	u_short tcq_st_adr;
1907	u_short *tcq_start;
1908	u_short prq_st_adr;
1909	u_short *prq_start;
1910	struct main_vc *vc;
1911	struct ext_vc *evc;
1912        u_short tmp16;
1913        u32 vcsize_sel;
1914
1915	iadev = INPH_IA_DEV(dev);
1916        spin_lock_init(&iadev->tx_lock);
1917
1918	IF_INIT(printk("Tx MASK REG: 0x%0x\n",
1919                                readw(iadev->seg_reg+SEG_MASK_REG));)
1920
1921	/* Allocate 4k (boundary aligned) bytes */
1922	dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
1923				      &iadev->tx_dle_dma, GFP_KERNEL);
1924	if (!dle_addr)  {
1925		printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1926		goto err_out;
1927	}
1928	iadev->tx_dle_q.start = (struct dle*)dle_addr;
1929	iadev->tx_dle_q.read = iadev->tx_dle_q.start;
1930	iadev->tx_dle_q.write = iadev->tx_dle_q.start;
1931	iadev->tx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1932
1933	/* write the upper 20 bits of the start address to tx list address register */
1934	writel(iadev->tx_dle_dma & 0xfffff000,
1935	       iadev->dma + IPHASE5575_TX_LIST_ADDR);
1936	writew(0xffff, iadev->seg_reg+SEG_MASK_REG);
1937	writew(0, iadev->seg_reg+MODE_REG_0);
1938	writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);
1939        iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1940        iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1941        iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1942
1943	/*
1944	   Transmit side control memory map
1945	   --------------------------------
1946	 Buffer descr 	0x0000 (128 - 4K)
1947	 Commn queues	0x1000	Transmit comp, Packet ready(0x1400)
1948					(512 - 1K) each
1949					TCQ - 4K, PRQ - 5K
1950	 CBR Table 	0x1800 (as needed) - 6K
1951	 UBR Table	0x3000 (1K - 4K) - 12K
1952	 UBR Wait queue	0x4000 (1K - 4K) - 16K
1953	 ABR sched	0x5000	and ABR wait queue (1K - 2K) each
1954				ABR Tbl - 20K, ABR Wq - 22K
1955	 extended VC	0x6000 (1K - 8K) - 24K
1956	 VC Table	0x8000 (1K - 32K) - 32K
1957
1958	Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl
1959	and Wait q, which can be allotted later.
1960	*/
1961
1962	/* Buffer Descriptor Table Base address */
1963	writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);
1964
1965	/* initialize each entry in the buffer descriptor table */
1966	buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);
1967	memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1968	buf_desc_ptr++;
1969	tx_pkt_start = TX_PACKET_RAM;
1970	for(i=1; i<=iadev->num_tx_desc; i++)
1971	{
1972		memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1973		buf_desc_ptr->desc_mode = AAL5;
1974		buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;
1975		buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;
1976		buf_desc_ptr++;
1977		tx_pkt_start += iadev->tx_buf_sz;
1978	}
1979	iadev->tx_buf = kmalloc_array(iadev->num_tx_desc,
1980				      sizeof(*iadev->tx_buf),
1981				      GFP_KERNEL);
1982        if (!iadev->tx_buf) {
1983            printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1984	    goto err_free_dle;
1985        }
1986       	for (i= 0; i< iadev->num_tx_desc; i++)
1987       	{
1988	    struct cpcs_trailer *cpcs;
1989
1990       	    cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
1991            if(!cpcs) {
1992		printk(KERN_ERR DEV_LABEL " couldn't get freepage\n");
1993		goto err_free_tx_bufs;
1994            }
1995	    iadev->tx_buf[i].cpcs = cpcs;
1996	    iadev->tx_buf[i].dma_addr = dma_map_single(&iadev->pci->dev,
1997						       cpcs,
1998						       sizeof(*cpcs),
1999						       DMA_TO_DEVICE);
2000        }
2001	iadev->desc_tbl = kmalloc_array(iadev->num_tx_desc,
2002					sizeof(*iadev->desc_tbl),
2003					GFP_KERNEL);
2004	if (!iadev->desc_tbl) {
2005		printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
2006		goto err_free_all_tx_bufs;
2007	}
2008
2009	/* Communication Queues base address */
2010        i = TX_COMP_Q * iadev->memSize;
2011	writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);
2012
2013	/* Transmit Complete Queue */
2014	writew(i, iadev->seg_reg+TCQ_ST_ADR);
2015	writew(i, iadev->seg_reg+TCQ_RD_PTR);
2016	writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR);
2017	iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
2018        writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2019                                              iadev->seg_reg+TCQ_ED_ADR);
2020	/* Fill the TCQ with all the free descriptors. */
2021	tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);
2022	tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);
2023	for(i=1; i<=iadev->num_tx_desc; i++)
2024	{
2025		*tcq_start = (u_short)i;
2026		tcq_start++;
2027	}
2028
2029	/* Packet Ready Queue */
2030        i = PKT_RDY_Q * iadev->memSize;
2031	writew(i, iadev->seg_reg+PRQ_ST_ADR);
2032	writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2033                                              iadev->seg_reg+PRQ_ED_ADR);
2034	writew(i, iadev->seg_reg+PRQ_RD_PTR);
2035	writew(i, iadev->seg_reg+PRQ_WR_PTR);
2036
2037        /* Load local copy of PRQ and TCQ ptrs */
2038        iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2039	iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2040 	iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2041
2042	iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2043	iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2044	iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2045
2046	/* Just for safety initializing the queue to have desc 1 always */
2047	/* Fill the PRQ with all the free descriptors. */
2048	prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);
2049	prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);
2050	for(i=1; i<=iadev->num_tx_desc; i++)
2051	{
2052		*prq_start = (u_short)0;	/* desc 1 in all entries */
2053		prq_start++;
2054	}
2055	/* CBR Table */
2056        IF_INIT(printk("Start CBR Init\n");)
2057#if 1  /* for 1K VC board, CBR_PTR_BASE is 0 */
2058        writew(0,iadev->seg_reg+CBR_PTR_BASE);
2059#else /* Charlie's logic is wrong ? */
2060        tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2061        IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2062        writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2063#endif
2064
2065        IF_INIT(printk("value in register = 0x%x\n",
2066                                   readw(iadev->seg_reg+CBR_PTR_BASE));)
2067        tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2068        writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2069        IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2070                                        readw(iadev->seg_reg+CBR_TAB_BEG));)
2071        writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2072        tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2073        writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2074        IF_INIT(printk("iadev->seg_reg = 0x%p CBR_PTR_BASE = 0x%x\n",
2075               iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2076        IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2077          readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2078          readw(iadev->seg_reg+CBR_TAB_END+1));)
2079
2080        /* Initialize the CBR Schedualing Table */
2081        memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize,
2082                                                          0, iadev->num_vc*6);
2083        iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2084        iadev->CbrEntryPt = 0;
2085        iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2086        iadev->NumEnabledCBR = 0;
2087
2088	/* UBR scheduling Table and wait queue */
2089	/* initialize all bytes of UBR scheduler table and wait queue to 0
2090		- SCHEDSZ is 1K (# of entries).
2091		- UBR Table size is 4K
2092		- UBR wait queue is 4K
2093	   since the table and wait queues are contiguous, all the bytes
2094	   can be initialized by one memeset.
2095	*/
2096
2097        vcsize_sel = 0;
2098        i = 8*1024;
2099        while (i != iadev->num_vc) {
2100          i /= 2;
2101          vcsize_sel++;
2102        }
2103
2104        i = MAIN_VC_TABLE * iadev->memSize;
2105        writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2106        i =  EXT_VC_TABLE * iadev->memSize;
2107        writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2108        i = UBR_SCHED_TABLE * iadev->memSize;
2109        writew((i & 0xffff) >> 11,  iadev->seg_reg+UBR_SBPTR_BASE);
2110        i = UBR_WAIT_Q * iadev->memSize;
2111        writew((i >> 7) & 0xffff,  iadev->seg_reg+UBRWQ_BASE);
2112 	memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2113                                                       0, iadev->num_vc*8);
2114	/* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/
2115	/* initialize all bytes of ABR scheduler table and wait queue to 0
2116		- SCHEDSZ is 1K (# of entries).
2117		- ABR Table size is 2K
2118		- ABR wait queue is 2K
2119	   since the table and wait queues are contiguous, all the bytes
2120	   can be initialized by one memeset.
2121	*/
2122        i = ABR_SCHED_TABLE * iadev->memSize;
2123        writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2124        i = ABR_WAIT_Q * iadev->memSize;
2125        writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2126
2127        i = ABR_SCHED_TABLE*iadev->memSize;
2128	memset((caddr_t)(iadev->seg_ram+i),  0, iadev->num_vc*4);
2129	vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
2130	evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
2131	iadev->testTable = kmalloc_array(iadev->num_vc,
2132					 sizeof(*iadev->testTable),
2133					 GFP_KERNEL);
2134        if (!iadev->testTable) {
2135           printk("Get freepage  failed\n");
2136	   goto err_free_desc_tbl;
2137        }
2138	for(i=0; i<iadev->num_vc; i++)
2139	{
2140		memset((caddr_t)vc, 0, sizeof(*vc));
2141		memset((caddr_t)evc, 0, sizeof(*evc));
2142                iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2143						GFP_KERNEL);
2144		if (!iadev->testTable[i])
2145			goto err_free_test_tables;
2146              	iadev->testTable[i]->lastTime = 0;
2147 		iadev->testTable[i]->fract = 0;
2148                iadev->testTable[i]->vc_status = VC_UBR;
2149		vc++;
2150		evc++;
2151	}
2152
2153	/* Other Initialization */
2154
2155	/* Max Rate Register */
2156        if (iadev->phy_type & FE_25MBIT_PHY) {
2157	   writew(RATE25, iadev->seg_reg+MAXRATE);
2158	   writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2159        }
2160        else {
2161	   writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2162	   writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2163        }
2164	/* Set Idle Header Reigisters to be sure */
2165	writew(0, iadev->seg_reg+IDLEHEADHI);
2166	writew(0, iadev->seg_reg+IDLEHEADLO);
2167
2168	/* Program ABR UBR Priority Register  as  PRI_ABR_UBR_EQUAL */
2169        writew(0xaa00, iadev->seg_reg+ABRUBR_ARB);
2170
2171        iadev->close_pending = 0;
2172        init_waitqueue_head(&iadev->close_wait);
2173        init_waitqueue_head(&iadev->timeout_wait);
2174	skb_queue_head_init(&iadev->tx_dma_q);
2175	ia_init_rtn_q(&iadev->tx_return_q);
2176
2177	/* RM Cell Protocol ID and Message Type */
2178	writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);
2179        skb_queue_head_init (&iadev->tx_backlog);
2180
2181	/* Mode Register 1 */
2182	writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);
2183
2184	/* Mode Register 0 */
2185	writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);
2186
2187	/* Interrupt Status Register - read to clear */
2188	readw(iadev->seg_reg+SEG_INTR_STATUS_REG);
2189
2190	/* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */
2191        writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2192        writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2193        iadev->tx_pkt_cnt = 0;
2194        iadev->rate_limit = iadev->LineRate / 3;
2195
2196	return 0;
2197
2198err_free_test_tables:
2199	while (--i >= 0)
2200		kfree(iadev->testTable[i]);
2201	kfree(iadev->testTable);
2202err_free_desc_tbl:
2203	kfree(iadev->desc_tbl);
2204err_free_all_tx_bufs:
2205	i = iadev->num_tx_desc;
2206err_free_tx_bufs:
2207	while (--i >= 0) {
2208		struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2209
2210		dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
2211				 sizeof(*desc->cpcs), DMA_TO_DEVICE);
2212		kfree(desc->cpcs);
2213	}
2214	kfree(iadev->tx_buf);
2215err_free_dle:
2216	dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2217			  iadev->tx_dle_dma);
2218err_out:
2219	return -ENOMEM;
2220}
2221
2222static irqreturn_t ia_int(int irq, void *dev_id)
2223{
2224   struct atm_dev *dev;
2225   IADEV *iadev;
2226   unsigned int status;
2227   int handled = 0;
2228
2229   dev = dev_id;
2230   iadev = INPH_IA_DEV(dev);
2231   while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))
2232   {
2233	handled = 1;
2234        IF_EVENT(printk("ia_int: status = 0x%x\n", status);)
2235	if (status & STAT_REASSINT)
2236	{
2237	   /* do something */
2238	   IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);)
2239	   rx_intr(dev);
2240	}
2241	if (status & STAT_DLERINT)
2242	{
2243	   /* Clear this bit by writing a 1 to it. */
2244	   writel(STAT_DLERINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2245	   rx_dle_intr(dev);
2246	}
2247	if (status & STAT_SEGINT)
2248	{
2249	   /* do something */
2250           IF_EVENT(printk("IA: tx_intr \n");)
2251	   tx_intr(dev);
2252	}
2253	if (status & STAT_DLETINT)
2254	{
2255	   writel(STAT_DLETINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2256	   tx_dle_intr(dev);
2257	}
2258	if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))
2259	{
2260           if (status & STAT_FEINT)
2261               ia_frontend_intr(iadev);
2262	}
2263   }
2264   return IRQ_RETVAL(handled);
2265}
2266
2267
2268
2269/*----------------------------- entries --------------------------------*/
2270static int get_esi(struct atm_dev *dev)
2271{
2272	IADEV *iadev;
2273	int i;
2274	u32 mac1;
2275	u16 mac2;
2276
2277	iadev = INPH_IA_DEV(dev);
2278	mac1 = cpu_to_be32(le32_to_cpu(readl(
2279				iadev->reg+IPHASE5575_MAC1)));
2280	mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));
2281	IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)
2282	for (i=0; i<MAC1_LEN; i++)
2283		dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));
2284
2285	for (i=0; i<MAC2_LEN; i++)
2286		dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));
2287	return 0;
2288}
2289
2290static int reset_sar(struct atm_dev *dev)
2291{
2292	IADEV *iadev;
2293	int i, error;
2294	unsigned int pci[64];
2295
2296	iadev = INPH_IA_DEV(dev);
2297	for (i = 0; i < 64; i++) {
2298		error = pci_read_config_dword(iadev->pci, i * 4, &pci[i]);
2299		if (error != PCIBIOS_SUCCESSFUL)
2300			return error;
2301	}
2302	writel(0, iadev->reg+IPHASE5575_EXT_RESET);
2303	for (i = 0; i < 64; i++) {
2304		error = pci_write_config_dword(iadev->pci, i * 4, pci[i]);
2305		if (error != PCIBIOS_SUCCESSFUL)
2306			return error;
2307	}
2308	udelay(5);
2309	return 0;
2310}
2311
2312
2313static int ia_init(struct atm_dev *dev)
2314{
2315	IADEV *iadev;
2316	unsigned long real_base;
2317	void __iomem *base;
2318	unsigned short command;
2319	int error, i;
2320
2321	/* The device has been identified and registered. Now we read
2322	   necessary configuration info like memory base address,
2323	   interrupt number etc */
2324
2325	IF_INIT(printk(">ia_init\n");)
2326	dev->ci_range.vpi_bits = 0;
2327	dev->ci_range.vci_bits = NR_VCI_LD;
2328
2329	iadev = INPH_IA_DEV(dev);
2330	real_base = pci_resource_start (iadev->pci, 0);
2331	iadev->irq = iadev->pci->irq;
2332
2333	error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
2334	if (error) {
2335		printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",
2336				dev->number,error);
2337		return -EINVAL;
2338	}
2339	IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",
2340			dev->number, iadev->pci->revision, real_base, iadev->irq);)
2341
2342	/* find mapping size of board */
2343
2344	iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2345
2346        if (iadev->pci_map_size == 0x100000){
2347          iadev->num_vc = 4096;
2348	  dev->ci_range.vci_bits = NR_VCI_4K_LD;
2349          iadev->memSize = 4;
2350        }
2351        else if (iadev->pci_map_size == 0x40000) {
2352          iadev->num_vc = 1024;
2353          iadev->memSize = 1;
2354        }
2355        else {
2356           printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2357           return -EINVAL;
2358        }
2359	IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)
2360
2361	/* enable bus mastering */
2362	pci_set_master(iadev->pci);
2363
2364	/*
2365	 * Delay at least 1us before doing any mem accesses (how 'bout 10?)
2366	 */
2367	udelay(10);
2368
2369	/* mapping the physical address to a virtual address in address space */
2370	base = ioremap(real_base,iadev->pci_map_size);  /* ioremap is not resolved ??? */
2371
2372	if (!base)
2373	{
2374		printk(DEV_LABEL " (itf %d): can't set up page mapping\n",
2375			    dev->number);
2376		return -ENOMEM;
2377	}
2378	IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",
2379			dev->number, iadev->pci->revision, base, iadev->irq);)
2380
2381	/* filling the iphase dev structure */
2382	iadev->mem = iadev->pci_map_size /2;
2383	iadev->real_base = real_base;
2384	iadev->base = base;
2385
2386	/* Bus Interface Control Registers */
2387	iadev->reg = base + REG_BASE;
2388	/* Segmentation Control Registers */
2389	iadev->seg_reg = base + SEG_BASE;
2390	/* Reassembly Control Registers */
2391	iadev->reass_reg = base + REASS_BASE;
2392	/* Front end/ DMA control registers */
2393	iadev->phy = base + PHY_BASE;
2394	iadev->dma = base + PHY_BASE;
2395	/* RAM - Segmentation RAm and Reassembly RAM */
2396	iadev->ram = base + ACTUAL_RAM_BASE;
2397	iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;
2398	iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;
2399
2400	/* lets print out the above */
2401	IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n",
2402          iadev->reg,iadev->seg_reg,iadev->reass_reg,
2403          iadev->phy, iadev->ram, iadev->seg_ram,
2404          iadev->reass_ram);)
2405
2406	/* lets try reading the MAC address */
2407	error = get_esi(dev);
2408	if (error) {
2409	  iounmap(iadev->base);
2410	  return error;
2411	}
2412        printk("IA: ");
2413	for (i=0; i < ESI_LEN; i++)
2414                printk("%s%02X",i ? "-" : "",dev->esi[i]);
2415        printk("\n");
2416
2417        /* reset SAR */
2418        if (reset_sar(dev)) {
2419	   iounmap(iadev->base);
2420           printk("IA: reset SAR fail, please try again\n");
2421           return 1;
2422        }
2423	return 0;
2424}
2425
2426static void ia_update_stats(IADEV *iadev) {
2427    if (!iadev->carrier_detect)
2428        return;
2429    iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2430    iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2431    iadev->drop_rxpkt +=  readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2432    iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2433    iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2434    iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2435    return;
2436}
2437
2438static void ia_led_timer(struct timer_list *unused) {
2439 	unsigned long flags;
2440  	static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2441        u_char i;
2442        static u32 ctrl_reg;
2443        for (i = 0; i < iadev_count; i++) {
2444           if (ia_dev[i]) {
2445	      ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2446	      if (blinking[i] == 0) {
2447		 blinking[i]++;
2448                 ctrl_reg &= (~CTRL_LED);
2449                 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2450                 ia_update_stats(ia_dev[i]);
2451              }
2452              else {
2453		 blinking[i] = 0;
2454		 ctrl_reg |= CTRL_LED;
2455                 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2456                 spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2457                 if (ia_dev[i]->close_pending)
2458                    wake_up(&ia_dev[i]->close_wait);
2459                 ia_tx_poll(ia_dev[i]);
2460                 spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2461              }
2462           }
2463        }
2464	mod_timer(&ia_timer, jiffies + HZ / 4);
2465 	return;
2466}
2467
2468static void ia_phy_put(struct atm_dev *dev, unsigned char value,
2469	unsigned long addr)
2470{
2471	writel(value, INPH_IA_DEV(dev)->phy+addr);
2472}
2473
2474static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)
2475{
2476	return readl(INPH_IA_DEV(dev)->phy+addr);
2477}
2478
2479static void ia_free_tx(IADEV *iadev)
2480{
2481	int i;
2482
2483	kfree(iadev->desc_tbl);
2484	for (i = 0; i < iadev->num_vc; i++)
2485		kfree(iadev->testTable[i]);
2486	kfree(iadev->testTable);
2487	for (i = 0; i < iadev->num_tx_desc; i++) {
2488		struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2489
2490		dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
2491				 sizeof(*desc->cpcs), DMA_TO_DEVICE);
2492		kfree(desc->cpcs);
2493	}
2494	kfree(iadev->tx_buf);
2495	dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2496			  iadev->tx_dle_dma);
2497}
2498
2499static void ia_free_rx(IADEV *iadev)
2500{
2501	kfree(iadev->rx_open);
2502	dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2503			  iadev->rx_dle_dma);
2504}
2505
2506static int ia_start(struct atm_dev *dev)
2507{
2508	IADEV *iadev;
2509	int error;
2510	unsigned char phy;
2511	u32 ctrl_reg;
2512	IF_EVENT(printk(">ia_start\n");)
2513	iadev = INPH_IA_DEV(dev);
2514        if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) {
2515                printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",
2516                    dev->number, iadev->irq);
2517		error = -EAGAIN;
2518		goto err_out;
2519        }
2520        /* @@@ should release IRQ on error */
2521	/* enabling memory + master */
2522        if ((error = pci_write_config_word(iadev->pci,
2523				PCI_COMMAND,
2524				PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))
2525	{
2526                printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"
2527                    "master (0x%x)\n",dev->number, error);
2528		error = -EIO;
2529		goto err_free_irq;
2530        }
2531	udelay(10);
2532
2533	/* Maybe we should reset the front end, initialize Bus Interface Control
2534		Registers and see. */
2535
2536	IF_INIT(printk("Bus ctrl reg: %08x\n",
2537                            readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2538	ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2539	ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))
2540			| CTRL_B8
2541			| CTRL_B16
2542			| CTRL_B32
2543			| CTRL_B48
2544			| CTRL_B64
2545			| CTRL_B128
2546			| CTRL_ERRMASK
2547			| CTRL_DLETMASK		/* shud be removed l8r */
2548			| CTRL_DLERMASK
2549			| CTRL_SEGMASK
2550			| CTRL_REASSMASK
2551			| CTRL_FEMASK
2552			| CTRL_CSPREEMPT;
2553
2554       writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2555
2556	IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2557                           readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));
2558	   printk("Bus status reg after init: %08x\n",
2559                            readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)
2560
2561        ia_hw_type(iadev);
2562	error = tx_init(dev);
2563	if (error)
2564		goto err_free_irq;
2565	error = rx_init(dev);
2566	if (error)
2567		goto err_free_tx;
2568
2569	ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2570       	writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2571	IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2572                               readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2573        phy = 0; /* resolve compiler complaint */
2574        IF_INIT (
2575	if ((phy=ia_phy_get(dev,0)) == 0x30)
2576		printk("IA: pm5346,rev.%d\n",phy&0x0f);
2577	else
2578		printk("IA: utopia,rev.%0x\n",phy);)
2579
2580	if (iadev->phy_type &  FE_25MBIT_PHY)
2581           ia_mb25_init(iadev);
2582	else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2583           ia_suni_pm7345_init(iadev);
2584	else {
2585		error = suni_init(dev);
2586		if (error)
2587			goto err_free_rx;
2588		if (dev->phy->start) {
2589			error = dev->phy->start(dev);
2590			if (error)
2591				goto err_free_rx;
2592		}
2593		/* Get iadev->carrier_detect status */
2594		ia_frontend_intr(iadev);
2595	}
2596	return 0;
2597
2598err_free_rx:
2599	ia_free_rx(iadev);
2600err_free_tx:
2601	ia_free_tx(iadev);
2602err_free_irq:
2603	free_irq(iadev->irq, dev);
2604err_out:
2605	return error;
2606}
2607
2608static void ia_close(struct atm_vcc *vcc)
2609{
2610	DEFINE_WAIT(wait);
2611        u16 *vc_table;
2612        IADEV *iadev;
2613        struct ia_vcc *ia_vcc;
2614        struct sk_buff *skb = NULL;
2615        struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2616        unsigned long closetime, flags;
2617
2618        iadev = INPH_IA_DEV(vcc->dev);
2619        ia_vcc = INPH_IA_VCC(vcc);
2620	if (!ia_vcc) return;
2621
2622        IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d  vci = %d\n",
2623                                              ia_vcc->vc_desc_cnt,vcc->vci);)
2624	clear_bit(ATM_VF_READY,&vcc->flags);
2625        skb_queue_head_init (&tmp_tx_backlog);
2626        skb_queue_head_init (&tmp_vcc_backlog);
2627        if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2628           iadev->close_pending++;
2629	   prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
2630	   schedule_timeout(msecs_to_jiffies(500));
2631	   finish_wait(&iadev->timeout_wait, &wait);
2632           spin_lock_irqsave(&iadev->tx_lock, flags);
2633           while((skb = skb_dequeue(&iadev->tx_backlog))) {
2634              if (ATM_SKB(skb)->vcc == vcc){
2635                 if (vcc->pop) vcc->pop(vcc, skb);
2636                 else dev_kfree_skb_any(skb);
2637              }
2638              else
2639                 skb_queue_tail(&tmp_tx_backlog, skb);
2640           }
2641           while((skb = skb_dequeue(&tmp_tx_backlog)))
2642             skb_queue_tail(&iadev->tx_backlog, skb);
2643           IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);)
2644           closetime = 300000 / ia_vcc->pcr;
2645           if (closetime == 0)
2646              closetime = 1;
2647           spin_unlock_irqrestore(&iadev->tx_lock, flags);
2648           wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
2649           spin_lock_irqsave(&iadev->tx_lock, flags);
2650           iadev->close_pending--;
2651           iadev->testTable[vcc->vci]->lastTime = 0;
2652           iadev->testTable[vcc->vci]->fract = 0;
2653           iadev->testTable[vcc->vci]->vc_status = VC_UBR;
2654           if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2655              if (vcc->qos.txtp.min_pcr > 0)
2656                 iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2657           }
2658           if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2659              ia_vcc = INPH_IA_VCC(vcc);
2660              iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2661              ia_cbrVc_close (vcc);
2662           }
2663           spin_unlock_irqrestore(&iadev->tx_lock, flags);
2664        }
2665
2666        if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2667           // reset reass table
2668           vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2669           vc_table += vcc->vci;
2670           *vc_table = NO_AAL5_PKT;
2671           // reset vc table
2672           vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2673           vc_table += vcc->vci;
2674           *vc_table = (vcc->vci << 6) | 15;
2675           if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2676              struct abr_vc_table __iomem *abr_vc_table =
2677                                (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2678              abr_vc_table +=  vcc->vci;
2679              abr_vc_table->rdf = 0x0003;
2680              abr_vc_table->air = 0x5eb1;
2681           }
2682           // Drain the packets
2683           rx_dle_intr(vcc->dev);
2684           iadev->rx_open[vcc->vci] = NULL;
2685        }
2686	kfree(INPH_IA_VCC(vcc));
2687        ia_vcc = NULL;
2688        vcc->dev_data = NULL;
2689        clear_bit(ATM_VF_ADDR,&vcc->flags);
2690        return;
2691}
2692
2693static int ia_open(struct atm_vcc *vcc)
2694{
2695	struct ia_vcc *ia_vcc;
2696	int error;
2697	if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
2698	{
2699		IF_EVENT(printk("ia: not partially allocated resources\n");)
2700		vcc->dev_data = NULL;
2701	}
2702	if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)
2703	{
2704		IF_EVENT(printk("iphase open: unspec part\n");)
2705		set_bit(ATM_VF_ADDR,&vcc->flags);
2706	}
2707	if (vcc->qos.aal != ATM_AAL5)
2708		return -EINVAL;
2709	IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n",
2710                                 vcc->dev->number, vcc->vpi, vcc->vci);)
2711
2712	/* Device dependent initialization */
2713	ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);
2714	if (!ia_vcc) return -ENOMEM;
2715	vcc->dev_data = ia_vcc;
2716
2717	if ((error = open_rx(vcc)))
2718	{
2719		IF_EVENT(printk("iadev: error in open_rx, closing\n");)
2720		ia_close(vcc);
2721		return error;
2722	}
2723
2724	if ((error = open_tx(vcc)))
2725	{
2726		IF_EVENT(printk("iadev: error in open_tx, closing\n");)
2727		ia_close(vcc);
2728		return error;
2729	}
2730
2731	set_bit(ATM_VF_READY,&vcc->flags);
2732
2733#if 0
2734        {
2735           static u8 first = 1;
2736           if (first) {
2737              ia_timer.expires = jiffies + 3*HZ;
2738              add_timer(&ia_timer);
2739              first = 0;
2740           }
2741        }
2742#endif
2743	IF_EVENT(printk("ia open returning\n");)
2744	return 0;
2745}
2746
2747static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)
2748{
2749	IF_EVENT(printk(">ia_change_qos\n");)
2750	return 0;
2751}
2752
2753static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2754{
2755   IA_CMDBUF ia_cmds;
2756   IADEV *iadev;
2757   int i, board;
2758   u16 __user *tmps;
2759   IF_EVENT(printk(">ia_ioctl\n");)
2760   if (cmd != IA_CMD) {
2761      if (!dev->phy->ioctl) return -EINVAL;
2762      return dev->phy->ioctl(dev,cmd,arg);
2763   }
2764   if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
2765   board = ia_cmds.status;
2766
2767	if ((board < 0) || (board > iadev_count))
2768		board = 0;
2769	board = array_index_nospec(board, iadev_count + 1);
2770
2771   iadev = ia_dev[board];
2772   switch (ia_cmds.cmd) {
2773   case MEMDUMP:
2774   {
2775	switch (ia_cmds.sub_cmd) {
2776          case MEMDUMP_SEGREG:
2777	     if (!capable(CAP_NET_ADMIN)) return -EPERM;
2778             tmps = (u16 __user *)ia_cmds.buf;
2779             for(i=0; i<0x80; i+=2, tmps++)
2780                if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2781             ia_cmds.status = 0;
2782             ia_cmds.len = 0x80;
2783             break;
2784          case MEMDUMP_REASSREG:
2785	     if (!capable(CAP_NET_ADMIN)) return -EPERM;
2786             tmps = (u16 __user *)ia_cmds.buf;
2787             for(i=0; i<0x80; i+=2, tmps++)
2788                if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2789             ia_cmds.status = 0;
2790             ia_cmds.len = 0x80;
2791             break;
2792          case MEMDUMP_FFL:
2793          {
2794             ia_regs_t       *regs_local;
2795             ffredn_t        *ffL;
2796             rfredn_t        *rfL;
2797
2798	     if (!capable(CAP_NET_ADMIN)) return -EPERM;
2799	     regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
2800	     if (!regs_local) return -ENOMEM;
2801	     ffL = &regs_local->ffredn;
2802	     rfL = &regs_local->rfredn;
2803             /* Copy real rfred registers into the local copy */
2804 	     for (i=0; i<(sizeof (rfredn_t))/4; i++)
2805                ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2806             	/* Copy real ffred registers into the local copy */
2807	     for (i=0; i<(sizeof (ffredn_t))/4; i++)
2808                ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2809
2810             if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2811                kfree(regs_local);
2812                return -EFAULT;
2813             }
2814             kfree(regs_local);
2815             printk("Board %d registers dumped\n", board);
2816             ia_cmds.status = 0;
2817	 }
2818    	     break;
2819         case READ_REG:
2820         {
2821	     if (!capable(CAP_NET_ADMIN)) return -EPERM;
2822             desc_dbg(iadev);
2823             ia_cmds.status = 0;
2824         }
2825             break;
2826         case 0x6:
2827         {
2828             ia_cmds.status = 0;
2829             printk("skb = 0x%p\n", skb_peek(&iadev->tx_backlog));
2830             printk("rtn_q: 0x%p\n",ia_deque_rtn_q(&iadev->tx_return_q));
2831         }
2832             break;
2833         case 0x8:
2834         {
2835             struct k_sonet_stats *stats;
2836             stats = &PRIV(_ia_dev[board])->sonet_stats;
2837             printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2838             printk("line_bip   : %d\n", atomic_read(&stats->line_bip));
2839             printk("path_bip   : %d\n", atomic_read(&stats->path_bip));
2840             printk("line_febe  : %d\n", atomic_read(&stats->line_febe));
2841             printk("path_febe  : %d\n", atomic_read(&stats->path_febe));
2842             printk("corr_hcs   : %d\n", atomic_read(&stats->corr_hcs));
2843             printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2844             printk("tx_cells   : %d\n", atomic_read(&stats->tx_cells));
2845             printk("rx_cells   : %d\n", atomic_read(&stats->rx_cells));
2846         }
2847            ia_cmds.status = 0;
2848            break;
2849         case 0x9:
2850	    if (!capable(CAP_NET_ADMIN)) return -EPERM;
2851            for (i = 1; i <= iadev->num_rx_desc; i++)
2852               free_desc(_ia_dev[board], i);
2853            writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD),
2854                                            iadev->reass_reg+REASS_MASK_REG);
2855            iadev->rxing = 1;
2856
2857            ia_cmds.status = 0;
2858            break;
2859
2860         case 0xb:
2861	    if (!capable(CAP_NET_ADMIN)) return -EPERM;
2862            ia_frontend_intr(iadev);
2863            break;
2864         case 0xa:
2865	    if (!capable(CAP_NET_ADMIN)) return -EPERM;
2866         {
2867             ia_cmds.status = 0;
2868             IADebugFlag = ia_cmds.maddr;
2869             printk("New debug option loaded\n");
2870         }
2871             break;
2872         default:
2873             ia_cmds.status = 0;
2874             break;
2875      }
2876   }
2877      break;
2878   default:
2879      break;
2880
2881   }
2882   return 0;
2883}
2884
2885static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2886        IADEV *iadev;
2887        struct dle *wr_ptr;
2888        struct tx_buf_desc __iomem *buf_desc_ptr;
2889        int desc;
2890        int comp_code;
2891        int total_len;
2892        struct cpcs_trailer *trailer;
2893        struct ia_vcc *iavcc;
2894
2895        iadev = INPH_IA_DEV(vcc->dev);
2896        iavcc = INPH_IA_VCC(vcc);
2897        if (!iavcc->txing) {
2898           printk("discard packet on closed VC\n");
2899           if (vcc->pop)
2900		vcc->pop(vcc, skb);
2901           else
2902		dev_kfree_skb_any(skb);
2903	   return 0;
2904        }
2905
2906        if (skb->len > iadev->tx_buf_sz - 8) {
2907           printk("Transmit size over tx buffer size\n");
2908           if (vcc->pop)
2909                 vcc->pop(vcc, skb);
2910           else
2911                 dev_kfree_skb_any(skb);
2912          return 0;
2913        }
2914        if ((unsigned long)skb->data & 3) {
2915           printk("Misaligned SKB\n");
2916           if (vcc->pop)
2917                 vcc->pop(vcc, skb);
2918           else
2919                 dev_kfree_skb_any(skb);
2920           return 0;
2921        }
2922	/* Get a descriptor number from our free descriptor queue
2923	   We get the descr number from the TCQ now, since I am using
2924	   the TCQ as a free buffer queue. Initially TCQ will be
2925	   initialized with all the descriptors and is hence, full.
2926	*/
2927	desc = get_desc (iadev, iavcc);
2928	if (desc == 0xffff)
2929	    return 1;
2930	comp_code = desc >> 13;
2931	desc &= 0x1fff;
2932
2933	if ((desc == 0) || (desc > iadev->num_tx_desc))
2934	{
2935		IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
2936                atomic_inc(&vcc->stats->tx);
2937		if (vcc->pop)
2938		    vcc->pop(vcc, skb);
2939		else
2940		    dev_kfree_skb_any(skb);
2941		return 0;   /* return SUCCESS */
2942	}
2943
2944	if (comp_code)
2945	{
2946	    IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n",
2947                                                            desc, comp_code);)
2948	}
2949
2950        /* remember the desc and vcc mapping */
2951        iavcc->vc_desc_cnt++;
2952        iadev->desc_tbl[desc-1].iavcc = iavcc;
2953        iadev->desc_tbl[desc-1].txskb = skb;
2954        IA_SKB_STATE(skb) = 0;
2955
2956        iadev->ffL.tcq_rd += 2;
2957        if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2958	  	iadev->ffL.tcq_rd  = iadev->ffL.tcq_st;
2959	writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2960
2961	/* Put the descriptor number in the packet ready queue
2962		and put the updated write pointer in the DLE field
2963	*/
2964	*(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc;
2965
2966 	iadev->ffL.prq_wr += 2;
2967        if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2968                iadev->ffL.prq_wr = iadev->ffL.prq_st;
2969
2970	/* Figure out the exact length of the packet and padding required to
2971           make it  aligned on a 48 byte boundary.  */
2972	total_len = skb->len + sizeof(struct cpcs_trailer);
2973	total_len = ((total_len + 47) / 48) * 48;
2974	IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)
2975
2976	/* Put the packet in a tx buffer */
2977	trailer = iadev->tx_buf[desc-1].cpcs;
2978        IF_TX(printk("Sent: skb = 0x%p skb->data: 0x%p len: %d, desc: %d\n",
2979                  skb, skb->data, skb->len, desc);)
2980	trailer->control = 0;
2981        /*big endian*/
2982	trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2983	trailer->crc32 = 0;	/* not needed - dummy bytes */
2984
2985	/* Display the packet */
2986	IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n",
2987                                                        skb->len, tcnter++);
2988        xdump(skb->data, skb->len, "TX: ");
2989        printk("\n");)
2990
2991	/* Build the buffer descriptor */
2992	buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
2993	buf_desc_ptr += desc;	/* points to the corresponding entry */
2994	buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;
2995	/* Huh ? p.115 of users guide describes this as a read-only register */
2996        writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2997	buf_desc_ptr->vc_index = vcc->vci;
2998	buf_desc_ptr->bytes = total_len;
2999
3000        if (vcc->qos.txtp.traffic_class == ATM_ABR)
3001	   clear_lockup (vcc, iadev);
3002
3003	/* Build the DLE structure */
3004	wr_ptr = iadev->tx_dle_q.write;
3005	memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));
3006	wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
3007					      skb->len, DMA_TO_DEVICE);
3008	wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) |
3009                                                  buf_desc_ptr->buf_start_lo;
3010	/* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */
3011	wr_ptr->bytes = skb->len;
3012
3013        /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3014        if ((wr_ptr->bytes >> 2) == 0xb)
3015           wr_ptr->bytes = 0x30;
3016
3017	wr_ptr->mode = TX_DLE_PSI;
3018	wr_ptr->prq_wr_ptr_data = 0;
3019
3020	/* end is not to be used for the DLE q */
3021	if (++wr_ptr == iadev->tx_dle_q.end)
3022		wr_ptr = iadev->tx_dle_q.start;
3023
3024        /* Build trailer dle */
3025        wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3026        wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) |
3027          buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3028
3029        wr_ptr->bytes = sizeof(struct cpcs_trailer);
3030        wr_ptr->mode = DMA_INT_ENABLE;
3031        wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3032
3033        /* end is not to be used for the DLE q */
3034        if (++wr_ptr == iadev->tx_dle_q.end)
3035                wr_ptr = iadev->tx_dle_q.start;
3036
3037	iadev->tx_dle_q.write = wr_ptr;
3038        ATM_DESC(skb) = vcc->vci;
3039        skb_queue_tail(&iadev->tx_dma_q, skb);
3040
3041        atomic_inc(&vcc->stats->tx);
3042        iadev->tx_pkt_cnt++;
3043	/* Increment transaction counter */
3044	writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
3045
3046#if 0
3047        /* add flow control logic */
3048        if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3049          if (iavcc->vc_desc_cnt > 10) {
3050             vcc->tx_quota =  vcc->tx_quota * 3 / 4;
3051            printk("Tx1:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3052              iavcc->flow_inc = -1;
3053              iavcc->saved_tx_quota = vcc->tx_quota;
3054           } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3055             // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3056             printk("Tx2:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3057              iavcc->flow_inc = 0;
3058           }
3059        }
3060#endif
3061	IF_TX(printk("ia send done\n");)
3062	return 0;
3063}
3064
3065static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3066{
3067        IADEV *iadev;
3068        unsigned long flags;
3069
3070        iadev = INPH_IA_DEV(vcc->dev);
3071        if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3072        {
3073            if (!skb)
3074                printk(KERN_CRIT "null skb in ia_send\n");
3075            else dev_kfree_skb_any(skb);
3076            return -EINVAL;
3077        }
3078        spin_lock_irqsave(&iadev->tx_lock, flags);
3079        if (!test_bit(ATM_VF_READY,&vcc->flags)){
3080            dev_kfree_skb_any(skb);
3081            spin_unlock_irqrestore(&iadev->tx_lock, flags);
3082            return -EINVAL;
3083        }
3084        ATM_SKB(skb)->vcc = vcc;
3085
3086        if (skb_peek(&iadev->tx_backlog)) {
3087           skb_queue_tail(&iadev->tx_backlog, skb);
3088        }
3089        else {
3090           if (ia_pkt_tx (vcc, skb)) {
3091              skb_queue_tail(&iadev->tx_backlog, skb);
3092           }
3093        }
3094        spin_unlock_irqrestore(&iadev->tx_lock, flags);
3095        return 0;
3096
3097}
3098
3099static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3100{
3101  int   left = *pos, n;
3102  char  *tmpPtr;
3103  IADEV *iadev = INPH_IA_DEV(dev);
3104  if(!left--) {
3105     if (iadev->phy_type == FE_25MBIT_PHY) {
3106       n = sprintf(page, "  Board Type         :  Iphase5525-1KVC-128K\n");
3107       return n;
3108     }
3109     if (iadev->phy_type == FE_DS3_PHY)
3110        n = sprintf(page, "  Board Type         :  Iphase-ATM-DS3");
3111     else if (iadev->phy_type == FE_E3_PHY)
3112        n = sprintf(page, "  Board Type         :  Iphase-ATM-E3");
3113     else if (iadev->phy_type == FE_UTP_OPTION)
3114         n = sprintf(page, "  Board Type         :  Iphase-ATM-UTP155");
3115     else
3116        n = sprintf(page, "  Board Type         :  Iphase-ATM-OC3");
3117     tmpPtr = page + n;
3118     if (iadev->pci_map_size == 0x40000)
3119        n += sprintf(tmpPtr, "-1KVC-");
3120     else
3121        n += sprintf(tmpPtr, "-4KVC-");
3122     tmpPtr = page + n;
3123     if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3124        n += sprintf(tmpPtr, "1M  \n");
3125     else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3126        n += sprintf(tmpPtr, "512K\n");
3127     else
3128       n += sprintf(tmpPtr, "128K\n");
3129     return n;
3130  }
3131  if (!left) {
3132     return  sprintf(page, "  Number of Tx Buffer:  %u\n"
3133                           "  Size of Tx Buffer  :  %u\n"
3134                           "  Number of Rx Buffer:  %u\n"
3135                           "  Size of Rx Buffer  :  %u\n"
3136                           "  Packets Received   :  %u\n"
3137                           "  Packets Transmitted:  %u\n"
3138                           "  Cells Received     :  %u\n"
3139                           "  Cells Transmitted  :  %u\n"
3140                           "  Board Dropped Cells:  %u\n"
3141                           "  Board Dropped Pkts :  %u\n",
3142                           iadev->num_tx_desc,  iadev->tx_buf_sz,
3143                           iadev->num_rx_desc,  iadev->rx_buf_sz,
3144                           iadev->rx_pkt_cnt,   iadev->tx_pkt_cnt,
3145                           iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3146                           iadev->drop_rxcell, iadev->drop_rxpkt);
3147  }
3148  return 0;
3149}
3150
3151static const struct atmdev_ops ops = {
3152	.open		= ia_open,
3153	.close		= ia_close,
3154	.ioctl		= ia_ioctl,
3155	.send		= ia_send,
3156	.phy_put	= ia_phy_put,
3157	.phy_get	= ia_phy_get,
3158	.change_qos	= ia_change_qos,
3159	.proc_read	= ia_proc_read,
3160	.owner		= THIS_MODULE,
3161};
3162
3163static int ia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3164{
3165	struct atm_dev *dev;
3166	IADEV *iadev;
3167	int ret;
3168
3169	iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
3170	if (!iadev) {
3171		ret = -ENOMEM;
3172		goto err_out;
3173	}
3174
3175	iadev->pci = pdev;
3176
3177	IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3178		pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3179	if (pci_enable_device(pdev)) {
3180		ret = -ENODEV;
3181		goto err_out_free_iadev;
3182	}
3183	dev = atm_dev_register(DEV_LABEL, &pdev->dev, &ops, -1, NULL);
3184	if (!dev) {
3185		ret = -ENOMEM;
3186		goto err_out_disable_dev;
3187	}
3188	dev->dev_data = iadev;
3189	IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3190	IF_INIT(printk("dev_id = 0x%p iadev->LineRate = %d \n", dev,
3191		iadev->LineRate);)
3192
3193	pci_set_drvdata(pdev, dev);
3194
3195	ia_dev[iadev_count] = iadev;
3196	_ia_dev[iadev_count] = dev;
3197	iadev_count++;
3198	if (ia_init(dev) || ia_start(dev)) {
3199		IF_INIT(printk("IA register failed!\n");)
3200		iadev_count--;
3201		ia_dev[iadev_count] = NULL;
3202		_ia_dev[iadev_count] = NULL;
3203		ret = -EINVAL;
3204		goto err_out_deregister_dev;
3205	}
3206	IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3207
3208	iadev->next_board = ia_boards;
3209	ia_boards = dev;
3210
3211	return 0;
3212
3213err_out_deregister_dev:
3214	atm_dev_deregister(dev);
3215err_out_disable_dev:
3216	pci_disable_device(pdev);
3217err_out_free_iadev:
3218	kfree(iadev);
3219err_out:
3220	return ret;
3221}
3222
3223static void ia_remove_one(struct pci_dev *pdev)
3224{
3225	struct atm_dev *dev = pci_get_drvdata(pdev);
3226	IADEV *iadev = INPH_IA_DEV(dev);
3227
3228	/* Disable phy interrupts */
3229	ia_phy_put(dev, ia_phy_get(dev, SUNI_RSOP_CIE) & ~(SUNI_RSOP_CIE_LOSE),
3230				   SUNI_RSOP_CIE);
3231	udelay(1);
3232
3233	if (dev->phy && dev->phy->stop)
3234		dev->phy->stop(dev);
3235
3236	/* De-register device */
3237      	free_irq(iadev->irq, dev);
3238	iadev_count--;
3239	ia_dev[iadev_count] = NULL;
3240	_ia_dev[iadev_count] = NULL;
3241	IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);)
3242	atm_dev_deregister(dev);
3243
3244      	iounmap(iadev->base);
3245	pci_disable_device(pdev);
3246
3247	ia_free_rx(iadev);
3248	ia_free_tx(iadev);
3249
3250      	kfree(iadev);
3251}
3252
3253static const struct pci_device_id ia_pci_tbl[] = {
3254	{ PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3255	{ PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3256	{ 0,}
3257};
3258MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3259
3260static struct pci_driver ia_driver = {
3261	.name =         DEV_LABEL,
3262	.id_table =     ia_pci_tbl,
3263	.probe =        ia_init_one,
3264	.remove =       ia_remove_one,
3265};
3266
3267static int __init ia_module_init(void)
3268{
3269	int ret;
3270
3271	ret = pci_register_driver(&ia_driver);
3272	if (ret >= 0) {
3273		ia_timer.expires = jiffies + 3*HZ;
3274		add_timer(&ia_timer);
3275	} else
3276		printk(KERN_ERR DEV_LABEL ": no adapter found\n");
3277	return ret;
3278}
3279
3280static void __exit ia_module_exit(void)
3281{
3282	pci_unregister_driver(&ia_driver);
3283
3284	del_timer_sync(&ia_timer);
3285}
3286
3287module_init(ia_module_init);
3288module_exit(ia_module_exit);
3289