#if defined(NDIS_OS2) || defined(ODI2)
#define CR_READ(var) ((var) & 0xffff0000 | ((var) & 0xffff))
#else
-#define CR_READ(var) (u_long)(var)
+#define CR_READ(var) (__le32)(var)
#endif
#define IMASK_SLOW (IS_PLINT1 | IS_PLINT2 | IS_TIMINT | IS_TOKEN | \
for (i=count-1, d1=start; i ; i--) {
d2 = d1 ;
d1++ ; /* descr is owned by the host */
- d2->r.rxd_rbctrl = AIX_REVERSE(BMU_CHECK) ;
+ d2->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ;
d2->r.rxd_next = &d1->r ;
phys = mac_drv_virt2phys(smc,(void *)d1) ;
- d2->r.rxd_nrdadr = AIX_REVERSE(phys) ;
+ d2->r.rxd_nrdadr = cpu_to_le32(phys) ;
}
DB_GEN("descr ring ends at = %x ",(void *)d1,0,3) ;
- d1->r.rxd_rbctrl = AIX_REVERSE(BMU_CHECK) ;
+ d1->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ;
d1->r.rxd_next = &start->r ;
phys = mac_drv_virt2phys(smc,(void *)start) ;
- d1->r.rxd_nrdadr = AIX_REVERSE(phys) ;
+ d1->r.rxd_nrdadr = cpu_to_le32(phys) ;
for (i=count, d1=start; i ; i--) {
DRV_BUF_FLUSH(&d1->r,DDI_DMA_SYNC_FORDEV) ;
DB_GEN("Init async TxD ring, %d TxDs ",HWM_ASYNC_TXD_COUNT,0,3) ;
(void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
HWM_ASYNC_TXD_COUNT) ;
- phys = AIX_REVERSE(ds->txd_ntdadr) ;
+ phys = le32_to_cpu(ds->txd_ntdadr) ;
ds++ ;
queue->tx_curr_put = queue->tx_curr_get = ds ;
ds-- ;
DB_GEN("Init sync TxD ring, %d TxDs ",HWM_SYNC_TXD_COUNT,0,3) ;
(void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
HWM_SYNC_TXD_COUNT) ;
- phys = AIX_REVERSE(ds->txd_ntdadr) ;
+ phys = le32_to_cpu(ds->txd_ntdadr) ;
ds++ ;
queue->tx_curr_put = queue->tx_curr_get = ds ;
queue->tx_free = HWM_SYNC_TXD_COUNT ;
DB_GEN("Init RxD ring, %d RxDs ",SMT_R1_RXD_COUNT,0,3) ;
(void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
SMT_R1_RXD_COUNT) ;
- phys = AIX_REVERSE(ds->rxd_nrdadr) ;
+ phys = le32_to_cpu(ds->rxd_nrdadr) ;
ds++ ;
queue->rx_curr_put = queue->rx_curr_get = ds ;
queue->rx_free = SMT_R1_RXD_COUNT ;
for (i = tx_used+queue->tx_free-1 ; i ; i-- ) {
t = t->txd_next ;
}
- phys = AIX_REVERSE(t->txd_ntdadr) ;
+ phys = le32_to_cpu(t->txd_ntdadr) ;
t = queue->tx_curr_get ;
while (tx_used) {
DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ;
- tbctrl = AIX_REVERSE(t->txd_tbctrl) ;
+ tbctrl = le32_to_cpu(t->txd_tbctrl) ;
if (tbctrl & BMU_OWN) {
if (tbctrl & BMU_STF) {
/*
* repair the descriptor
*/
- t->txd_tbctrl &= AIX_REVERSE(~BMU_OWN) ;
+ t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ;
}
}
- phys = AIX_REVERSE(t->txd_ntdadr) ;
+ phys = le32_to_cpu(t->txd_ntdadr) ;
DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
t = t->txd_next ;
tx_used-- ;
for (i = SMT_R1_RXD_COUNT-1 ; i ; i-- ) {
r = r->rxd_next ;
}
- phys = AIX_REVERSE(r->rxd_nrdadr) ;
+ phys = le32_to_cpu(r->rxd_nrdadr) ;
r = queue->rx_curr_get ;
while (rx_used) {
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
- rbctrl = AIX_REVERSE(r->rxd_rbctrl) ;
+ rbctrl = le32_to_cpu(r->rxd_rbctrl) ;
if (rbctrl & BMU_OWN) {
if (rbctrl & BMU_STF) {
/*
* repair the descriptor
*/
- r->rxd_rbctrl &= AIX_REVERSE(~BMU_OWN) ;
+ r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
}
}
- phys = AIX_REVERSE(r->rxd_nrdadr) ;
+ phys = le32_to_cpu(r->rxd_nrdadr) ;
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
r = r->rxd_next ;
rx_used-- ;
do {
DB_RX("Check RxD %x for OWN and EOF",(void *)r,0,5) ;
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
- rbctrl = CR_READ(r->rxd_rbctrl) ;
- rbctrl = AIX_REVERSE(rbctrl) ;
+ rbctrl = le32_to_cpu(CR_READ(r->rxd_rbctrl));
if (rbctrl & BMU_OWN) {
NDD_TRACE("RHxE",r,rfsw,rbctrl) ;
smc->os.hwm.detec_count = 0 ;
goto rx_end ;
}
- rfsw = AIX_REVERSE(r->rxd_rfsw) ;
+ rfsw = le32_to_cpu(r->rxd_rfsw) ;
if ((rbctrl & BMU_STF) != ((rbctrl & BMU_ST_BUF) <<5)) {
/*
* The BMU_STF bit is deleted, 1 frame is
/* may be next 2 DRV_BUF_FLUSH() can be skipped, because */
/* BMU_ST_BUF will not be changed by the ASIC */
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
- while (rx_used && !(r->rxd_rbctrl & AIX_REVERSE(BMU_ST_BUF))) {
+ while (rx_used && !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) {
DB_RX("Check STF bit in %x",(void *)r,0,5) ;
r = r->rxd_next ;
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
/*
* ASIC Errata no. 7 (STF - Bit Bug)
*/
- rxd->rxd_rbctrl &= AIX_REVERSE(~BMU_STF) ;
+ rxd->rxd_rbctrl &= cpu_to_le32(~BMU_STF) ;
for (r=rxd, i=frag_count ; i ; r=r->rxd_next, i--){
DB_RX("dma_complete for RxD %x",(void *)r,0,5) ;
hwm_cpy_rxd2mb(rxd,data,len) ;
#else
for (r=rxd, i=used_frags ; i ; r=r->rxd_next, i--){
- n = AIX_REVERSE(r->rxd_rbctrl) & RD_LENGTH ;
+ n = le32_to_cpu(r->rxd_rbctrl) & RD_LENGTH ;
DB_RX("cp SMT frame to mb: len = %d",n,0,6) ;
memcpy(data,r->rxd_virt,n) ;
data += n ;
int frame_status)
{
struct s_smt_fp_rxd volatile *r ;
- u_int rbctrl ;
+ __le32 rbctrl;
NDD_TRACE("RHfB",virt,len,frame_status) ;
DB_RX("hwm_rx_frag: len = %d, frame_status = %x\n",len,frame_status,2) ;
r = smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put ;
r->rxd_virt = virt ;
- r->rxd_rbadr = AIX_REVERSE(phys) ;
- rbctrl = AIX_REVERSE( (((u_long)frame_status &
+ r->rxd_rbadr = cpu_to_le32(phys) ;
+ rbctrl = cpu_to_le32( (((__u32)frame_status &
(FIRST_FRAG|LAST_FRAG))<<26) |
(((u_long) frame_status & FIRST_FRAG) << 21) |
BMU_OWN | BMU_CHECK | BMU_EN_IRQ_EOF | len) ;
smc->hw.fp.rx_q[QUEUE_R1].rx_free-- ;
smc->hw.fp.rx_q[QUEUE_R1].rx_used++ ;
smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put = r->rxd_next ;
- NDD_TRACE("RHfE",r,AIX_REVERSE(r->rxd_rbadr),0) ;
+ NDD_TRACE("RHfE",r,le32_to_cpu(r->rxd_rbadr),0) ;
}
/*
while (queue->rx_used) {
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
DB_RX("switch OWN bit of RxD 0x%x ",r,0,5) ;
- r->rxd_rbctrl &= AIX_REVERSE(~BMU_OWN) ;
+ r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
frag_count = 1 ;
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
r = r->rxd_next ;
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
while (r != queue->rx_curr_put &&
- !(r->rxd_rbctrl & AIX_REVERSE(BMU_ST_BUF))) {
+ !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) {
DB_RX("Check STF bit in %x",(void *)r,0,5) ;
- r->rxd_rbctrl &= AIX_REVERSE(~BMU_OWN) ;
+ r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
r = r->rxd_next ;
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
{
struct s_smt_fp_txd volatile *t ;
struct s_smt_tx_queue *queue ;
- u_int tbctrl ;
+ __le32 tbctrl ;
queue = smc->os.hwm.tx_p ;
/* '*t' is already defined */
DB_TX("LAN_TX: TxD = %x, virt = %x ",t,virt,3) ;
t->txd_virt = virt ;
- t->txd_txdscr = AIX_REVERSE(smc->os.hwm.tx_descr) ;
- t->txd_tbadr = AIX_REVERSE(phys) ;
- tbctrl = AIX_REVERSE((((u_long)frame_status &
+ t->txd_txdscr = cpu_to_le32(smc->os.hwm.tx_descr) ;
+ t->txd_tbadr = cpu_to_le32(phys) ;
+ tbctrl = cpu_to_le32((((__u32)frame_status &
(FIRST_FRAG|LAST_FRAG|EN_IRQ_EOF))<< 26) |
BMU_OWN|BMU_CHECK |len) ;
t->txd_tbctrl = tbctrl ;
struct s_smt_tx_queue *queue ;
struct s_smt_fp_txd volatile *t ;
u_long phys ;
- u_int tbctrl ;
+ __le32 tbctrl;
NDD_TRACE("THSB",mb,fc,0) ;
DB_TX("smt_send_mbuf: mb = 0x%x, fc = 0x%x",mb,fc,4) ;
DB_TX("init TxD = 0x%x",(void *)t,0,5) ;
if (i == frag_count-1) {
frame_status |= LAST_FRAG ;
- t->txd_txdscr = AIX_REVERSE(TX_DESCRIPTOR |
- (((u_long)(mb->sm_len-1)&3) << 27)) ;
+ t->txd_txdscr = cpu_to_le32(TX_DESCRIPTOR |
+ (((__u32)(mb->sm_len-1)&3) << 27)) ;
}
t->txd_virt = virt[i] ;
phys = dma_master(smc, (void far *)virt[i],
frag_len[i], DMA_RD|SMT_BUF) ;
- t->txd_tbadr = AIX_REVERSE(phys) ;
- tbctrl = AIX_REVERSE((((u_long) frame_status &
+ t->txd_tbadr = cpu_to_le32(phys) ;
+ tbctrl = cpu_to_le32((((__u32)frame_status &
(FIRST_FRAG|LAST_FRAG)) << 26) |
BMU_OWN | BMU_CHECK | BMU_SMT_TX |frag_len[i]) ;
t->txd_tbctrl = tbctrl ;
do {
DRV_BUF_FLUSH(t1,DDI_DMA_SYNC_FORCPU) ;
DB_TX("check OWN/EOF bit of TxD 0x%x",t1,0,5) ;
- tbctrl = CR_READ(t1->txd_tbctrl) ;
- tbctrl = AIX_REVERSE(tbctrl) ;
+ tbctrl = le32_to_cpu(CR_READ(t1->txd_tbctrl));
if (tbctrl & BMU_OWN || !queue->tx_used){
DB_TX("End of TxDs queue %d",i,0,4) ;
t1 = queue->tx_curr_get ;
for (n = frag_count; n; n--) {
- tbctrl = AIX_REVERSE(t1->txd_tbctrl) ;
+ tbctrl = le32_to_cpu(t1->txd_tbctrl) ;
dma_complete(smc,
(union s_fp_descr volatile *) t1,
(int) (DMA_RD |
while (tx_used) {
DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ;
DB_TX("switch OWN bit of TxD 0x%x ",t,0,5) ;
- t->txd_tbctrl &= AIX_REVERSE(~BMU_OWN) ;
+ t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ;
DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
t = t->txd_next ;
tx_used-- ;
* tx_curr_get and tx_curr_put to this position
*/
if (i == QUEUE_S) {
- outpd(ADDR(B5_XS_DA),AIX_REVERSE(t->txd_ntdadr)) ;
+ outpd(ADDR(B5_XS_DA),le32_to_cpu(t->txd_ntdadr)) ;
}
else {
- outpd(ADDR(B5_XA_DA),AIX_REVERSE(t->txd_ntdadr)) ;
+ outpd(ADDR(B5_XA_DA),le32_to_cpu(t->txd_ntdadr)) ;
}
queue->tx_curr_put = queue->tx_curr_get->txd_next ;