diff options
author | Al Viro <viro@ftp.linux.org.uk> | 2008-03-17 01:22:24 +0300 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2008-03-26 07:18:44 +0300 |
commit | 2f220e305b23ab277aa0f91e2a65978f5cc1a785 (patch) | |
tree | 005cab70e1afb7d3f6f8acf57ebec13125325235 /drivers/net/skfp/hwmtm.c | |
parent | eca1ad82bda0293339e1f8439dc9c8dba25ff088 (diff) | |
download | linux-2f220e305b23ab277aa0f91e2a65978f5cc1a785.tar.xz |
skfp annotations
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/skfp/hwmtm.c')
-rw-r--r-- | drivers/net/skfp/hwmtm.c | 86 |
1 files changed, 42 insertions, 44 deletions
diff --git a/drivers/net/skfp/hwmtm.c b/drivers/net/skfp/hwmtm.c index 46e339315656..4218e97033c9 100644 --- a/drivers/net/skfp/hwmtm.c +++ b/drivers/net/skfp/hwmtm.c @@ -208,7 +208,7 @@ SMbuf* smt_get_mbuf(struct s_smc *smc); #if defined(NDIS_OS2) || defined(ODI2) #define CR_READ(var) ((var) & 0xffff0000 | ((var) & 0xffff)) #else -#define CR_READ(var) (u_long)(var) +#define CR_READ(var) (__le32)(var) #endif #define IMASK_SLOW (IS_PLINT1 | IS_PLINT2 | IS_TIMINT | IS_TOKEN | \ @@ -343,16 +343,16 @@ static u_long init_descr_ring(struct s_smc *smc, for (i=count-1, d1=start; i ; i--) { d2 = d1 ; d1++ ; /* descr is owned by the host */ - d2->r.rxd_rbctrl = AIX_REVERSE(BMU_CHECK) ; + d2->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ; d2->r.rxd_next = &d1->r ; phys = mac_drv_virt2phys(smc,(void *)d1) ; - d2->r.rxd_nrdadr = AIX_REVERSE(phys) ; + d2->r.rxd_nrdadr = cpu_to_le32(phys) ; } DB_GEN("descr ring ends at = %x ",(void *)d1,0,3) ; - d1->r.rxd_rbctrl = AIX_REVERSE(BMU_CHECK) ; + d1->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ; d1->r.rxd_next = &start->r ; phys = mac_drv_virt2phys(smc,(void *)start) ; - d1->r.rxd_nrdadr = AIX_REVERSE(phys) ; + d1->r.rxd_nrdadr = cpu_to_le32(phys) ; for (i=count, d1=start; i ; i--) { DRV_BUF_FLUSH(&d1->r,DDI_DMA_SYNC_FORDEV) ; @@ -376,7 +376,7 @@ static void init_txd_ring(struct s_smc *smc) DB_GEN("Init async TxD ring, %d TxDs ",HWM_ASYNC_TXD_COUNT,0,3) ; (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds, HWM_ASYNC_TXD_COUNT) ; - phys = AIX_REVERSE(ds->txd_ntdadr) ; + phys = le32_to_cpu(ds->txd_ntdadr) ; ds++ ; queue->tx_curr_put = queue->tx_curr_get = ds ; ds-- ; @@ -390,7 +390,7 @@ static void init_txd_ring(struct s_smc *smc) DB_GEN("Init sync TxD ring, %d TxDs ",HWM_SYNC_TXD_COUNT,0,3) ; (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds, HWM_SYNC_TXD_COUNT) ; - phys = AIX_REVERSE(ds->txd_ntdadr) ; + phys = le32_to_cpu(ds->txd_ntdadr) ; ds++ ; queue->tx_curr_put = queue->tx_curr_get = ds ; queue->tx_free = HWM_SYNC_TXD_COUNT ; @@ -412,7 +412,7 @@ static void init_rxd_ring(struct s_smc *smc) DB_GEN("Init RxD ring, %d RxDs ",SMT_R1_RXD_COUNT,0,3) ; (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds, SMT_R1_RXD_COUNT) ; - phys = AIX_REVERSE(ds->rxd_nrdadr) ; + phys = le32_to_cpu(ds->rxd_nrdadr) ; ds++ ; queue->rx_curr_put = queue->rx_curr_get = ds ; queue->rx_free = SMT_R1_RXD_COUNT ; @@ -607,12 +607,12 @@ static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue) for (i = tx_used+queue->tx_free-1 ; i ; i-- ) { t = t->txd_next ; } - phys = AIX_REVERSE(t->txd_ntdadr) ; + phys = le32_to_cpu(t->txd_ntdadr) ; t = queue->tx_curr_get ; while (tx_used) { DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ; - tbctrl = AIX_REVERSE(t->txd_tbctrl) ; + tbctrl = le32_to_cpu(t->txd_tbctrl) ; if (tbctrl & BMU_OWN) { if (tbctrl & BMU_STF) { @@ -622,10 +622,10 @@ static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue) /* * repair the descriptor */ - t->txd_tbctrl &= AIX_REVERSE(~BMU_OWN) ; + t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ; } } - phys = AIX_REVERSE(t->txd_ntdadr) ; + phys = le32_to_cpu(t->txd_ntdadr) ; DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; t = t->txd_next ; tx_used-- ; @@ -659,12 +659,12 @@ static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue) for (i = SMT_R1_RXD_COUNT-1 ; i ; i-- ) { r = r->rxd_next ; } - phys = AIX_REVERSE(r->rxd_nrdadr) ; + phys = le32_to_cpu(r->rxd_nrdadr) ; r = queue->rx_curr_get ; while (rx_used) { DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; - rbctrl = AIX_REVERSE(r->rxd_rbctrl) ; + rbctrl = le32_to_cpu(r->rxd_rbctrl) ; if (rbctrl & BMU_OWN) { if (rbctrl & BMU_STF) { @@ -674,10 +674,10 @@ static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue) /* * repair the descriptor */ - r->rxd_rbctrl &= AIX_REVERSE(~BMU_OWN) ; + r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ; } } - phys = AIX_REVERSE(r->rxd_nrdadr) ; + phys = le32_to_cpu(r->rxd_nrdadr) ; DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ; r = r->rxd_next ; rx_used-- ; @@ -1094,8 +1094,7 @@ void process_receive(struct s_smc *smc) do { DB_RX("Check RxD %x for OWN and EOF",(void *)r,0,5) ; DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; - rbctrl = CR_READ(r->rxd_rbctrl) ; - rbctrl = AIX_REVERSE(rbctrl) ; + rbctrl = le32_to_cpu(CR_READ(r->rxd_rbctrl)); if (rbctrl & BMU_OWN) { NDD_TRACE("RHxE",r,rfsw,rbctrl) ; @@ -1118,7 +1117,7 @@ void process_receive(struct s_smc *smc) smc->os.hwm.detec_count = 0 ; goto rx_end ; } - rfsw = AIX_REVERSE(r->rxd_rfsw) ; + rfsw = le32_to_cpu(r->rxd_rfsw) ; if ((rbctrl & BMU_STF) != ((rbctrl & BMU_ST_BUF) <<5)) { /* * The BMU_STF bit is deleted, 1 frame is @@ -1151,7 +1150,7 @@ void process_receive(struct s_smc *smc) /* may be next 2 DRV_BUF_FLUSH() can be skipped, because */ /* BMU_ST_BUF will not be changed by the ASIC */ DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; - while (rx_used && !(r->rxd_rbctrl & AIX_REVERSE(BMU_ST_BUF))) { + while (rx_used && !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) { DB_RX("Check STF bit in %x",(void *)r,0,5) ; r = r->rxd_next ; DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; @@ -1171,7 +1170,7 @@ void process_receive(struct s_smc *smc) /* * ASIC Errata no. 7 (STF - Bit Bug) */ - rxd->rxd_rbctrl &= AIX_REVERSE(~BMU_STF) ; + rxd->rxd_rbctrl &= cpu_to_le32(~BMU_STF) ; for (r=rxd, i=frag_count ; i ; r=r->rxd_next, i--){ DB_RX("dma_complete for RxD %x",(void *)r,0,5) ; @@ -1287,7 +1286,7 @@ void process_receive(struct s_smc *smc) hwm_cpy_rxd2mb(rxd,data,len) ; #else for (r=rxd, i=used_frags ; i ; r=r->rxd_next, i--){ - n = AIX_REVERSE(r->rxd_rbctrl) & RD_LENGTH ; + n = le32_to_cpu(r->rxd_rbctrl) & RD_LENGTH ; DB_RX("cp SMT frame to mb: len = %d",n,0,6) ; memcpy(data,r->rxd_virt,n) ; data += n ; @@ -1426,14 +1425,14 @@ void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len, int frame_status) { struct s_smt_fp_rxd volatile *r ; - u_int rbctrl ; + __le32 rbctrl; NDD_TRACE("RHfB",virt,len,frame_status) ; DB_RX("hwm_rx_frag: len = %d, frame_status = %x\n",len,frame_status,2) ; r = smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put ; r->rxd_virt = virt ; - r->rxd_rbadr = AIX_REVERSE(phys) ; - rbctrl = AIX_REVERSE( (((u_long)frame_status & + r->rxd_rbadr = cpu_to_le32(phys) ; + rbctrl = cpu_to_le32( (((__u32)frame_status & (FIRST_FRAG|LAST_FRAG))<<26) | (((u_long) frame_status & FIRST_FRAG) << 21) | BMU_OWN | BMU_CHECK | BMU_EN_IRQ_EOF | len) ; @@ -1444,7 +1443,7 @@ void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len, smc->hw.fp.rx_q[QUEUE_R1].rx_free-- ; smc->hw.fp.rx_q[QUEUE_R1].rx_used++ ; smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put = r->rxd_next ; - NDD_TRACE("RHfE",r,AIX_REVERSE(r->rxd_rbadr),0) ; + NDD_TRACE("RHfE",r,le32_to_cpu(r->rxd_rbadr),0) ; } /* @@ -1494,15 +1493,15 @@ void mac_drv_clear_rx_queue(struct s_smc *smc) while (queue->rx_used) { DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; DB_RX("switch OWN bit of RxD 0x%x ",r,0,5) ; - r->rxd_rbctrl &= AIX_REVERSE(~BMU_OWN) ; + r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ; frag_count = 1 ; DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ; r = r->rxd_next ; DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; while (r != queue->rx_curr_put && - !(r->rxd_rbctrl & AIX_REVERSE(BMU_ST_BUF))) { + !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) { DB_RX("Check STF bit in %x",(void *)r,0,5) ; - r->rxd_rbctrl &= AIX_REVERSE(~BMU_OWN) ; + r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ; DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ; r = r->rxd_next ; DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; @@ -1640,7 +1639,7 @@ void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len, { struct s_smt_fp_txd volatile *t ; struct s_smt_tx_queue *queue ; - u_int tbctrl ; + __le32 tbctrl ; queue = smc->os.hwm.tx_p ; @@ -1657,9 +1656,9 @@ void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len, /* '*t' is already defined */ DB_TX("LAN_TX: TxD = %x, virt = %x ",t,virt,3) ; t->txd_virt = virt ; - t->txd_txdscr = AIX_REVERSE(smc->os.hwm.tx_descr) ; - t->txd_tbadr = AIX_REVERSE(phys) ; - tbctrl = AIX_REVERSE((((u_long)frame_status & + t->txd_txdscr = cpu_to_le32(smc->os.hwm.tx_descr) ; + t->txd_tbadr = cpu_to_le32(phys) ; + tbctrl = cpu_to_le32((((__u32)frame_status & (FIRST_FRAG|LAST_FRAG|EN_IRQ_EOF))<< 26) | BMU_OWN|BMU_CHECK |len) ; t->txd_tbctrl = tbctrl ; @@ -1826,7 +1825,7 @@ void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc) struct s_smt_tx_queue *queue ; struct s_smt_fp_txd volatile *t ; u_long phys ; - u_int tbctrl ; + __le32 tbctrl; NDD_TRACE("THSB",mb,fc,0) ; DB_TX("smt_send_mbuf: mb = 0x%x, fc = 0x%x",mb,fc,4) ; @@ -1894,14 +1893,14 @@ void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc) DB_TX("init TxD = 0x%x",(void *)t,0,5) ; if (i == frag_count-1) { frame_status |= LAST_FRAG ; - t->txd_txdscr = AIX_REVERSE(TX_DESCRIPTOR | - (((u_long)(mb->sm_len-1)&3) << 27)) ; + t->txd_txdscr = cpu_to_le32(TX_DESCRIPTOR | + (((__u32)(mb->sm_len-1)&3) << 27)) ; } t->txd_virt = virt[i] ; phys = dma_master(smc, (void far *)virt[i], frag_len[i], DMA_RD|SMT_BUF) ; - t->txd_tbadr = AIX_REVERSE(phys) ; - tbctrl = AIX_REVERSE((((u_long) frame_status & + t->txd_tbadr = cpu_to_le32(phys) ; + tbctrl = cpu_to_le32((((__u32)frame_status & (FIRST_FRAG|LAST_FRAG)) << 26) | BMU_OWN | BMU_CHECK | BMU_SMT_TX |frag_len[i]) ; t->txd_tbctrl = tbctrl ; @@ -1971,8 +1970,7 @@ static void mac_drv_clear_txd(struct s_smc *smc) do { DRV_BUF_FLUSH(t1,DDI_DMA_SYNC_FORCPU) ; DB_TX("check OWN/EOF bit of TxD 0x%x",t1,0,5) ; - tbctrl = CR_READ(t1->txd_tbctrl) ; - tbctrl = AIX_REVERSE(tbctrl) ; + tbctrl = le32_to_cpu(CR_READ(t1->txd_tbctrl)); if (tbctrl & BMU_OWN || !queue->tx_used){ DB_TX("End of TxDs queue %d",i,0,4) ; @@ -1984,7 +1982,7 @@ static void mac_drv_clear_txd(struct s_smc *smc) t1 = queue->tx_curr_get ; for (n = frag_count; n; n--) { - tbctrl = AIX_REVERSE(t1->txd_tbctrl) ; + tbctrl = le32_to_cpu(t1->txd_tbctrl) ; dma_complete(smc, (union s_fp_descr volatile *) t1, (int) (DMA_RD | @@ -2064,7 +2062,7 @@ void mac_drv_clear_tx_queue(struct s_smc *smc) while (tx_used) { DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ; DB_TX("switch OWN bit of TxD 0x%x ",t,0,5) ; - t->txd_tbctrl &= AIX_REVERSE(~BMU_OWN) ; + t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ; DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; t = t->txd_next ; tx_used-- ; @@ -2086,10 +2084,10 @@ void mac_drv_clear_tx_queue(struct s_smc *smc) * tx_curr_get and tx_curr_put to this position */ if (i == QUEUE_S) { - outpd(ADDR(B5_XS_DA),AIX_REVERSE(t->txd_ntdadr)) ; + outpd(ADDR(B5_XS_DA),le32_to_cpu(t->txd_ntdadr)) ; } else { - outpd(ADDR(B5_XA_DA),AIX_REVERSE(t->txd_ntdadr)) ; + outpd(ADDR(B5_XA_DA),le32_to_cpu(t->txd_ntdadr)) ; } queue->tx_curr_put = queue->tx_curr_get->txd_next ; |