diff options
author | David S. Miller <davem@davemloft.net> | 2015-03-16 02:56:52 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-03-16 02:56:52 +0300 |
commit | 12028820423bd7705beb5e09889faab95c545ea1 (patch) | |
tree | 4cb91e90fa4980a4e2853e5cb113402c96380f52 | |
parent | 5a2f78dd51d9d71aa40cb752af88332f45c884b7 (diff) | |
parent | 559176415cc663fff9dd99a3862629a4fcdb36ab (diff) | |
download | linux-12028820423bd7705beb5e09889faab95c545ea1.tar.xz |
Merge branch 'gianfar-next'
Claudiu Manoil says:
====================
gianfar: ARM port driver updates (2/2)
The 2nd round of driver updates to make gianfar portable on ARM,
for the ARM based SoC that integrates eTSEC - "ls1021a".
The patches address the bulk of remaining endianess issues -
handling DMA fields (BD and FCB), and device tree properties.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/freescale/gianfar.c | 202 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/gianfar.h | 32 |
2 files changed, 138 insertions, 96 deletions
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 7bf3682cdf47..70fa6887f940 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -158,7 +158,7 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, { u32 lstatus; - bdp->bufPtr = buf; + bdp->bufPtr = cpu_to_be32(buf); lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) @@ -166,7 +166,7 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, gfar_wmb(); - bdp->lstatus = lstatus; + bdp->lstatus = cpu_to_be32(lstatus); } static int gfar_init_bds(struct net_device *ndev) @@ -200,7 +200,8 @@ static int gfar_init_bds(struct net_device *ndev) /* Set the last descriptor in the ring to indicate wrap */ txbdp--; - txbdp->status |= TXBD_WRAP; + txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) | + TXBD_WRAP); } rfbptr = ®s->rfbptr0; @@ -214,7 +215,7 @@ static int gfar_init_bds(struct net_device *ndev) struct sk_buff *skb = rx_queue->rx_skbuff[j]; if (skb) { - bufaddr = rxbdp->bufPtr; + bufaddr = be32_to_cpu(rxbdp->bufPtr); } else { skb = gfar_new_skb(ndev, &bufaddr); if (!skb) { @@ -696,19 +697,28 @@ static int gfar_parse_group(struct device_node *np, grp->priv = priv; spin_lock_init(&grp->grplock); if (priv->mode == MQ_MG_MODE) { - u32 *rxq_mask, *txq_mask; - rxq_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL); - txq_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL); + u32 rxq_mask, txq_mask; + int ret; + + grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); + grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); + + ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask); + if (!ret) { + grp->rx_bit_map = rxq_mask ? + rxq_mask : (DEFAULT_MAPPING >> priv->num_grps); + } + + ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask); + if (!ret) { + grp->tx_bit_map = txq_mask ? + txq_mask : (DEFAULT_MAPPING >> priv->num_grps); + } if (priv->poll_mode == GFAR_SQ_POLLING) { /* One Q per interrupt group: Q0 to G0, Q1 to G1 */ grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); - } else { /* GFAR_MQ_POLLING */ - grp->rx_bit_map = rxq_mask ? - *rxq_mask : (DEFAULT_MAPPING >> priv->num_grps); - grp->tx_bit_map = txq_mask ? - *txq_mask : (DEFAULT_MAPPING >> priv->num_grps); } } else { grp->rx_bit_map = 0xFF; @@ -769,11 +779,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) struct gfar_private *priv = NULL; struct device_node *np = ofdev->dev.of_node; struct device_node *child = NULL; - const u32 *stash; - const u32 *stash_len; - const u32 *stash_idx; + struct property *stash; + u32 stash_len = 0; + u32 stash_idx = 0; unsigned int num_tx_qs, num_rx_qs; - u32 *tx_queues, *rx_queues; unsigned short mode, poll_mode; if (!np) @@ -787,10 +796,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) poll_mode = GFAR_SQ_POLLING; } - /* parse the num of HW tx and rx queues */ - tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL); - rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); - if (mode == SQ_SG_MODE) { num_tx_qs = 1; num_rx_qs = 1; @@ -809,8 +814,17 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) num_tx_qs = num_grps; /* one txq per int group */ num_rx_qs = num_grps; /* one rxq per int group */ } else { /* GFAR_MQ_POLLING */ - num_tx_qs = tx_queues ? *tx_queues : 1; - num_rx_qs = rx_queues ? *rx_queues : 1; + u32 tx_queues, rx_queues; + int ret; + + /* parse the num of HW tx and rx queues */ + ret = of_property_read_u32(np, "fsl,num_tx_queues", + &tx_queues); + num_tx_qs = ret ? 1 : tx_queues; + + ret = of_property_read_u32(np, "fsl,num_rx_queues", + &rx_queues); + num_rx_qs = ret ? 1 : rx_queues; } } @@ -851,13 +865,17 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) if (err) goto rx_alloc_failed; + err = of_property_read_string(np, "model", &model); + if (err) { + pr_err("Device model property missing, aborting\n"); + goto rx_alloc_failed; + } + /* Init Rx queue filer rule set linked list */ INIT_LIST_HEAD(&priv->rx_list.list); priv->rx_list.count = 0; mutex_init(&priv->rx_queue_access); - model = of_get_property(np, "model", NULL); - for (i = 0; i < MAXGROUPS; i++) priv->gfargrp[i].regs = NULL; @@ -877,22 +895,22 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) goto err_grp_init; } - stash = of_get_property(np, "bd-stash", NULL); + stash = of_find_property(np, "bd-stash", NULL); if (stash) { priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; priv->bd_stash_en = 1; } - stash_len = of_get_property(np, "rx-stash-len", NULL); + err = of_property_read_u32(np, "rx-stash-len", &stash_len); - if (stash_len) - priv->rx_stash_size = *stash_len; + if (err == 0) + priv->rx_stash_size = stash_len; - stash_idx = of_get_property(np, "rx-stash-idx", NULL); + err = of_property_read_u32(np, "rx-stash-idx", &stash_idx); - if (stash_idx) - priv->rx_stash_index = *stash_idx; + if (err == 0) + priv->rx_stash_index = stash_idx; if (stash_len || stash_idx) priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; @@ -919,15 +937,15 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | FSL_GIANFAR_DEV_HAS_TIMER; - ctype = of_get_property(np, "phy-connection-type", NULL); + err = of_property_read_string(np, "phy-connection-type", &ctype); /* We only care about rgmii-id. The rest are autodetected */ - if (ctype && !strcmp(ctype, "rgmii-id")) + if (err == 0 && !strcmp(ctype, "rgmii-id")) priv->interface = PHY_INTERFACE_MODE_RGMII_ID; else priv->interface = PHY_INTERFACE_MODE_MII; - if (of_get_property(np, "fsl,magic-packet", NULL)) + if (of_find_property(np, "fsl,magic-packet", NULL)) priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; priv->phy_node = of_parse_phandle(np, "phy-handle", 0); @@ -1884,14 +1902,15 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) if (!tx_queue->tx_skbuff[i]) continue; - dma_unmap_single(priv->dev, txbdp->bufPtr, - txbdp->length, DMA_TO_DEVICE); + dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr), + be16_to_cpu(txbdp->length), DMA_TO_DEVICE); txbdp->lstatus = 0; for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; j++) { txbdp++; - dma_unmap_page(priv->dev, txbdp->bufPtr, - txbdp->length, DMA_TO_DEVICE); + dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr), + be16_to_cpu(txbdp->length), + DMA_TO_DEVICE); } txbdp++; dev_kfree_skb_any(tx_queue->tx_skbuff[i]); @@ -1911,7 +1930,7 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) for (i = 0; i < rx_queue->rx_ring_size; i++) { if (rx_queue->rx_skbuff[i]) { - dma_unmap_single(priv->dev, rxbdp->bufPtr, + dma_unmap_single(priv->dev, be32_to_cpu(rxbdp->bufPtr), priv->rx_buffer_size, DMA_FROM_DEVICE); dev_kfree_skb_any(rx_queue->rx_skbuff[i]); @@ -2167,16 +2186,16 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, */ if (ip_hdr(skb)->protocol == IPPROTO_UDP) { flags |= TXFCB_UDP; - fcb->phcs = udp_hdr(skb)->check; + fcb->phcs = (__force __be16)(udp_hdr(skb)->check); } else - fcb->phcs = tcp_hdr(skb)->check; + fcb->phcs = (__force __be16)(tcp_hdr(skb)->check); /* l3os is the distance between the start of the * frame (skb->data) and the start of the IP hdr. * l4os is the distance between the start of the * l3 hdr and the l4 hdr */ - fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length); + fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length); fcb->l4os = skb_network_header_len(skb); fcb->flags = flags; @@ -2185,7 +2204,7 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) { fcb->flags |= TXFCB_VLN; - fcb->vlctl = skb_vlan_tag_get(skb); + fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb)); } static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, @@ -2298,7 +2317,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_queue->stats.tx_packets++; txbdp = txbdp_start = tx_queue->cur_tx; - lstatus = txbdp->lstatus; + lstatus = be32_to_cpu(txbdp->lstatus); /* Time stamp insertion requires one additional TxBD */ if (unlikely(do_tstamp)) @@ -2306,11 +2325,14 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_queue->tx_ring_size); if (nr_frags == 0) { - if (unlikely(do_tstamp)) - txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST | - TXBD_INTERRUPT); - else + if (unlikely(do_tstamp)) { + u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus); + + lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); + txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts); + } else { lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); + } } else { /* Place the fragment addresses and lengths into the TxBDs */ for (i = 0; i < nr_frags; i++) { @@ -2320,7 +2342,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) frag_len = skb_shinfo(skb)->frags[i].size; - lstatus = txbdp->lstatus | frag_len | + lstatus = be32_to_cpu(txbdp->lstatus) | frag_len | BD_LFLAG(TXBD_READY); /* Handle the last BD specially */ @@ -2336,11 +2358,11 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) goto dma_map_err; /* set the TxBD length and buffer pointer */ - txbdp->bufPtr = bufaddr; - txbdp->lstatus = lstatus; + txbdp->bufPtr = cpu_to_be32(bufaddr); + txbdp->lstatus = cpu_to_be32(lstatus); } - lstatus = txbdp_start->lstatus; + lstatus = be32_to_cpu(txbdp_start->lstatus); } /* Add TxPAL between FCB and frame if required */ @@ -2388,7 +2410,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(dma_mapping_error(priv->dev, bufaddr))) goto dma_map_err; - txbdp_start->bufPtr = bufaddr; + txbdp_start->bufPtr = cpu_to_be32(bufaddr); /* If time stamping is requested one additional TxBD must be set up. The * first TxBD points to the FCB and must have a data length of @@ -2396,9 +2418,15 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) * the full frame length. */ if (unlikely(do_tstamp)) { - txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_len; - txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) | - (skb_headlen(skb) - fcb_len); + u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus); + + bufaddr = be32_to_cpu(txbdp_start->bufPtr); + bufaddr += fcb_len; + lstatus_ts |= BD_LFLAG(TXBD_READY) | + (skb_headlen(skb) - fcb_len); + + txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr); + txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts); lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; } else { lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); @@ -2421,7 +2449,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) gfar_wmb(); - txbdp_start->lstatus = lstatus; + txbdp_start->lstatus = cpu_to_be32(lstatus); gfar_wmb(); /* force lstatus write before tx_skbuff */ @@ -2460,13 +2488,14 @@ dma_map_err: if (do_tstamp) txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); for (i = 0; i < nr_frags; i++) { - lstatus = txbdp->lstatus; + lstatus = be32_to_cpu(txbdp->lstatus); if (!(lstatus & BD_LFLAG(TXBD_READY))) break; - txbdp->lstatus = lstatus & ~BD_LFLAG(TXBD_READY); - bufaddr = txbdp->bufPtr; - dma_unmap_page(priv->dev, bufaddr, txbdp->length, + lstatus &= ~BD_LFLAG(TXBD_READY); + txbdp->lstatus = cpu_to_be32(lstatus); + bufaddr = be32_to_cpu(txbdp->bufPtr); + dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length), DMA_TO_DEVICE); txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); } @@ -2607,7 +2636,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size); - lstatus = lbdp->lstatus; + lstatus = be32_to_cpu(lbdp->lstatus); /* Only clean completed frames */ if ((lstatus & BD_LFLAG(TXBD_READY)) && @@ -2616,11 +2645,12 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { next = next_txbd(bdp, base, tx_ring_size); - buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN; + buflen = be16_to_cpu(next->length) + + GMAC_FCB_LEN + GMAC_TXPAL_LEN; } else - buflen = bdp->length; + buflen = be16_to_cpu(bdp->length); - dma_unmap_single(priv->dev, bdp->bufPtr, + dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr), buflen, DMA_TO_DEVICE); if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { @@ -2631,17 +2661,18 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) shhwtstamps.hwtstamp = ns_to_ktime(*ns); skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN); skb_tstamp_tx(skb, &shhwtstamps); - bdp->lstatus &= BD_LFLAG(TXBD_WRAP); + gfar_clear_txbd_status(bdp); bdp = next; } - bdp->lstatus &= BD_LFLAG(TXBD_WRAP); + gfar_clear_txbd_status(bdp); bdp = next_txbd(bdp, base, tx_ring_size); for (i = 0; i < frags; i++) { - dma_unmap_page(priv->dev, bdp->bufPtr, - bdp->length, DMA_TO_DEVICE); - bdp->lstatus &= BD_LFLAG(TXBD_WRAP); + dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr), + be16_to_cpu(bdp->length), + DMA_TO_DEVICE); + gfar_clear_txbd_status(bdp); bdp = next_txbd(bdp, base, tx_ring_size); } @@ -2798,13 +2829,13 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) * were verified, then we tell the kernel that no * checksumming is necessary. Otherwise, it is [FIXME] */ - if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) + if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) == + (RXFCB_CIP | RXFCB_CTU)) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb_checksum_none_assert(skb); } - /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int amount_pull, struct napi_struct *napi) @@ -2846,8 +2877,9 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, * RXFCB_VLN is pseudo randomly set. */ if (dev->features & NETIF_F_HW_VLAN_CTAG_RX && - fcb->flags & RXFCB_VLN) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), fcb->vlctl); + be16_to_cpu(fcb->flags) & RXFCB_VLN) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + be16_to_cpu(fcb->vlctl)); /* Send the packet up the stack */ napi_gro_receive(napi, skb); @@ -2874,7 +2906,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0; - while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { + while (!(be16_to_cpu(bdp->status) & RXBD_EMPTY) && rx_work_limit--) { struct sk_buff *newskb; dma_addr_t bufaddr; @@ -2885,21 +2917,22 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; - dma_unmap_single(priv->dev, bdp->bufPtr, + dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr), priv->rx_buffer_size, DMA_FROM_DEVICE); - if (unlikely(!(bdp->status & RXBD_ERR) && - bdp->length > priv->rx_buffer_size)) - bdp->status = RXBD_LARGE; + if (unlikely(!(be16_to_cpu(bdp->status) & RXBD_ERR) && + be16_to_cpu(bdp->length) > priv->rx_buffer_size)) + bdp->status = cpu_to_be16(RXBD_LARGE); /* We drop the frame if we failed to allocate a new buffer */ - if (unlikely(!newskb || !(bdp->status & RXBD_LAST) || - bdp->status & RXBD_ERR)) { - count_errors(bdp->status, dev); + if (unlikely(!newskb || + !(be16_to_cpu(bdp->status) & RXBD_LAST) || + be16_to_cpu(bdp->status) & RXBD_ERR)) { + count_errors(be16_to_cpu(bdp->status), dev); if (unlikely(!newskb)) { newskb = skb; - bufaddr = bdp->bufPtr; + bufaddr = be32_to_cpu(bdp->bufPtr); } else if (skb) dev_kfree_skb(skb); } else { @@ -2908,7 +2941,8 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) howmany++; if (likely(skb)) { - pkt_len = bdp->length - ETH_FCS_LEN; + pkt_len = be16_to_cpu(bdp->length) - + ETH_FCS_LEN; /* Remove the FCS from the packet length */ skb_put(skb, pkt_len); rx_queue->stats.rx_bytes += pkt_len; diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 9e1802400c23..daa1d37de642 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -544,12 +544,12 @@ struct txbd8 { union { struct { - u16 status; /* Status Fields */ - u16 length; /* Buffer length */ + __be16 status; /* Status Fields */ + __be16 length; /* Buffer length */ }; - u32 lstatus; + __be32 lstatus; }; - u32 bufPtr; /* Buffer Pointer */ + __be32 bufPtr; /* Buffer Pointer */ }; struct txfcb { @@ -557,28 +557,28 @@ struct txfcb { u8 ptp; /* Flag to enable tx timestamping */ u8 l4os; /* Level 4 Header Offset */ u8 l3os; /* Level 3 Header Offset */ - u16 phcs; /* Pseudo-header Checksum */ - u16 vlctl; /* VLAN control word */ + __be16 phcs; /* Pseudo-header Checksum */ + __be16 vlctl; /* VLAN control word */ }; struct rxbd8 { union { struct { - u16 status; /* Status Fields */ - u16 length; /* Buffer Length */ + __be16 status; /* Status Fields */ + __be16 length; /* Buffer Length */ }; - u32 lstatus; + __be32 lstatus; }; - u32 bufPtr; /* Buffer Pointer */ + __be32 bufPtr; /* Buffer Pointer */ }; struct rxfcb { - u16 flags; + __be16 flags; u8 rq; /* Receive Queue index */ u8 pro; /* Layer 4 Protocol */ u16 reserved; - u16 vlctl; /* VLAN control word */ + __be16 vlctl; /* VLAN control word */ }; struct gianfar_skb_cb { @@ -1287,6 +1287,14 @@ static inline void gfar_wmb(void) #endif } +static inline void gfar_clear_txbd_status(struct txbd8 *bdp) +{ + u32 lstatus = be32_to_cpu(bdp->lstatus); + + lstatus &= BD_LFLAG(TXBD_WRAP); + bdp->lstatus = cpu_to_be32(lstatus); +} + irqreturn_t gfar_receive(int irq, void *dev_id); int startup_gfar(struct net_device *dev); void stop_gfar(struct net_device *dev); |