diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-25 15:25:22 +0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-25 15:25:22 +0400 |
commit | 8a9ea3237e7eb5c25f09e429ad242ae5a3d5ea22 (patch) | |
tree | a0a63398a9983667d52cbbbf4e2405b4f22b1d83 /drivers/net/ethernet/freescale/fs_enet | |
parent | 1be025d3cb40cd295123af2c394f7229ef9b30ca (diff) | |
parent | 8b3408f8ee994973869d8ba32c5bf482bc4ddca4 (diff) | |
download | linux-8a9ea3237e7eb5c25f09e429ad242ae5a3d5ea22.tar.xz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1745 commits)
dp83640: free packet queues on remove
dp83640: use proper function to free transmit time stamping packets
ipv6: Do not use routes from locally generated RAs
|PATCH net-next] tg3: add tx_dropped counter
be2net: don't create multiple RX/TX rings in multi channel mode
be2net: don't create multiple TXQs in BE2
be2net: refactor VF setup/teardown code into be_vf_setup/clear()
be2net: add vlan/rx-mode/flow-control config to be_setup()
net_sched: cls_flow: use skb_header_pointer()
ipv4: avoid useless call of the function check_peer_pmtu
TCP: remove TCP_DEBUG
net: Fix driver name for mdio-gpio.c
ipv4: tcp: fix TOS value in ACK messages sent from TIME_WAIT
rtnetlink: Add missing manual netlink notification in dev_change_net_namespaces
ipv4: fix ipsec forward performance regression
jme: fix irq storm after suspend/resume
route: fix ICMP redirect validation
net: hold sock reference while processing tx timestamps
tcp: md5: add more const attributes
Add ethtool -g support to virtio_net
...
Fix up conflicts in:
- drivers/net/Kconfig:
The split-up generated a trivial conflict with removal of a
stale reference to Documentation/networking/net-modules.txt.
Remove it from the new location instead.
- fs/sysfs/dir.c:
Fairly nasty conflicts with the sysfs rb-tree usage, conflicting
with Eric Biederman's changes for tagged directories.
Diffstat (limited to 'drivers/net/ethernet/freescale/fs_enet')
-rw-r--r-- | drivers/net/ethernet/freescale/fs_enet/Kconfig | 35 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/fs_enet/Makefile | 14 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/fs_enet/fec.h | 42 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c | 1196 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/fs_enet/fs_enet.h | 244 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/fs_enet/mac-fcc.c | 584 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/fs_enet/mac-fec.c | 498 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/fs_enet/mac-scc.c | 484 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c | 246 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/fs_enet/mii-fec.c | 251 |
10 files changed, 3594 insertions, 0 deletions
diff --git a/drivers/net/ethernet/freescale/fs_enet/Kconfig b/drivers/net/ethernet/freescale/fs_enet/Kconfig new file mode 100644 index 000000000000..268414d9f2cb --- /dev/null +++ b/drivers/net/ethernet/freescale/fs_enet/Kconfig @@ -0,0 +1,35 @@ +config FS_ENET + tristate "Freescale Ethernet Driver" + depends on NET_VENDOR_FREESCALE && (CPM1 || CPM2 || PPC_MPC512x) + select NET_CORE + select MII + select PHYLIB + +config FS_ENET_MPC5121_FEC + def_bool y if (FS_ENET && PPC_MPC512x) + select FS_ENET_HAS_FEC + +config FS_ENET_HAS_SCC + bool "Chip has an SCC usable for ethernet" + depends on FS_ENET && (CPM1 || CPM2) + default y + +config FS_ENET_HAS_FCC + bool "Chip has an FCC usable for ethernet" + depends on FS_ENET && CPM2 + default y + +config FS_ENET_HAS_FEC + bool "Chip has an FEC usable for ethernet" + depends on FS_ENET && (CPM1 || FS_ENET_MPC5121_FEC) + select FS_ENET_MDIO_FEC + default y + +config FS_ENET_MDIO_FEC + tristate "MDIO driver for FEC" + depends on FS_ENET && (CPM1 || FS_ENET_MPC5121_FEC) + +config FS_ENET_MDIO_FCC + tristate "MDIO driver for FCC" + depends on FS_ENET && CPM2 + select MDIO_BITBANG diff --git a/drivers/net/ethernet/freescale/fs_enet/Makefile b/drivers/net/ethernet/freescale/fs_enet/Makefile new file mode 100644 index 000000000000..d4a305ee3455 --- /dev/null +++ b/drivers/net/ethernet/freescale/fs_enet/Makefile @@ -0,0 +1,14 @@ +# +# Makefile for the Freescale Ethernet controllers +# + +obj-$(CONFIG_FS_ENET) += fs_enet.o + +fs_enet-$(CONFIG_FS_ENET_HAS_SCC) += mac-scc.o +fs_enet-$(CONFIG_FS_ENET_HAS_FEC) += mac-fec.o +fs_enet-$(CONFIG_FS_ENET_HAS_FCC) += mac-fcc.o + +obj-$(CONFIG_FS_ENET_MDIO_FEC) += mii-fec.o +obj-$(CONFIG_FS_ENET_MDIO_FCC) += mii-bitbang.o + +fs_enet-objs := fs_enet-main.o $(fs_enet-m) diff --git a/drivers/net/ethernet/freescale/fs_enet/fec.h b/drivers/net/ethernet/freescale/fs_enet/fec.h new file mode 100644 index 000000000000..e980527e2b99 --- /dev/null +++ b/drivers/net/ethernet/freescale/fs_enet/fec.h @@ -0,0 +1,42 @@ +#ifndef FS_ENET_FEC_H +#define FS_ENET_FEC_H + +/* CRC polynomium used by the FEC for the multicast group filtering */ +#define FEC_CRC_POLY 0x04C11DB7 + +#define FEC_MAX_MULTICAST_ADDRS 64 + +/* Interrupt events/masks. +*/ +#define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */ +#define FEC_ENET_BABR 0x40000000U /* Babbling receiver */ +#define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */ +#define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */ +#define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */ +#define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */ +#define FEC_ENET_RXF 0x02000000U /* Full frame received */ +#define FEC_ENET_RXB 0x01000000U /* A buffer was received */ +#define FEC_ENET_MII 0x00800000U /* MII interrupt */ +#define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */ + +#define FEC_ECNTRL_PINMUX 0x00000004 +#define FEC_ECNTRL_ETHER_EN 0x00000002 +#define FEC_ECNTRL_RESET 0x00000001 + +#define FEC_RCNTRL_BC_REJ 0x00000010 +#define FEC_RCNTRL_PROM 0x00000008 +#define FEC_RCNTRL_MII_MODE 0x00000004 +#define FEC_RCNTRL_DRT 0x00000002 +#define FEC_RCNTRL_LOOP 0x00000001 + +#define FEC_TCNTRL_FDEN 0x00000004 +#define FEC_TCNTRL_HBC 0x00000002 +#define FEC_TCNTRL_GTS 0x00000001 + + + +/* + * Delay to wait for FEC reset command to complete (in us) + */ +#define FEC_RESET_DELAY 50 +#endif diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c new file mode 100644 index 000000000000..5bf5471f06ff --- /dev/null +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -0,0 +1,1196 @@ +/* + * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. + * + * Copyright (c) 2003 Intracom S.A. + * by Pantelis Antoniou <panto@intracom.gr> + * + * 2005 (c) MontaVista Software, Inc. + * Vitaly Bordug <vbordug@ru.mvista.com> + * + * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com> + * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/string.h> +#include <linux/ptrace.h> +#include <linux/errno.h> +#include <linux/ioport.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/spinlock.h> +#include <linux/mii.h> +#include <linux/ethtool.h> +#include <linux/bitops.h> +#include <linux/fs.h> +#include <linux/platform_device.h> +#include <linux/phy.h> +#include <linux/of.h> +#include <linux/of_mdio.h> +#include <linux/of_platform.h> +#include <linux/of_gpio.h> +#include <linux/of_net.h> + +#include <linux/vmalloc.h> +#include <asm/pgtable.h> +#include <asm/irq.h> +#include <asm/uaccess.h> + +#include "fs_enet.h" + +/*************************************************/ + +MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>"); +MODULE_DESCRIPTION("Freescale Ethernet Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); + +static int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */ +module_param(fs_enet_debug, int, 0); +MODULE_PARM_DESC(fs_enet_debug, + "Freescale bitmapped debugging message enable value"); + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void fs_enet_netpoll(struct net_device *dev); +#endif + +static void fs_set_multicast_list(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + (*fep->ops->set_multicast_list)(dev); +} + +static void skb_align(struct sk_buff *skb, int align) +{ + int off = ((unsigned long)skb->data) & (align - 1); + + if (off) + skb_reserve(skb, align - off); +} + +/* NAPI receive function */ +static int fs_enet_rx_napi(struct napi_struct *napi, int budget) +{ + struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi); + struct net_device *dev = fep->ndev; + const struct fs_platform_info *fpi = fep->fpi; + cbd_t __iomem *bdp; + struct sk_buff *skb, *skbn, *skbt; + int received = 0; + u16 pkt_len, sc; + int curidx; + + /* + * First, grab all of the stats for the incoming packet. + * These get messed up if we get called due to a busy condition. + */ + bdp = fep->cur_rx; + + /* clear RX status bits for napi*/ + (*fep->ops->napi_clear_rx_event)(dev); + + while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) { + curidx = bdp - fep->rx_bd_base; + + /* + * Since we have allocated space to hold a complete frame, + * the last indicator should be set. + */ + if ((sc & BD_ENET_RX_LAST) == 0) + dev_warn(fep->dev, "rcv is not +last\n"); + + /* + * Check for errors. + */ + if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL | + BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { + fep->stats.rx_errors++; + /* Frame too long or too short. */ + if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) + fep->stats.rx_length_errors++; + /* Frame alignment */ + if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL)) + fep->stats.rx_frame_errors++; + /* CRC Error */ + if (sc & BD_ENET_RX_CR) + fep->stats.rx_crc_errors++; + /* FIFO overrun */ + if (sc & BD_ENET_RX_OV) + fep->stats.rx_crc_errors++; + + skb = fep->rx_skbuff[curidx]; + + dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), + DMA_FROM_DEVICE); + + skbn = skb; + + } else { + skb = fep->rx_skbuff[curidx]; + + dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), + DMA_FROM_DEVICE); + + /* + * Process the incoming frame. + */ + fep->stats.rx_packets++; + pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */ + fep->stats.rx_bytes += pkt_len + 4; + + if (pkt_len <= fpi->rx_copybreak) { + /* +2 to make IP header L1 cache aligned */ + skbn = dev_alloc_skb(pkt_len + 2); + if (skbn != NULL) { + skb_reserve(skbn, 2); /* align IP header */ + skb_copy_from_linear_data(skb, + skbn->data, pkt_len); + /* swap */ + skbt = skb; + skb = skbn; + skbn = skbt; + } + } else { + skbn = dev_alloc_skb(ENET_RX_FRSIZE); + + if (skbn) + skb_align(skbn, ENET_RX_ALIGN); + } + + if (skbn != NULL) { + skb_put(skb, pkt_len); /* Make room */ + skb->protocol = eth_type_trans(skb, dev); + received++; + netif_receive_skb(skb); + } else { + dev_warn(fep->dev, + "Memory squeeze, dropping packet.\n"); + fep->stats.rx_dropped++; + skbn = skb; + } + } + + fep->rx_skbuff[curidx] = skbn; + CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data, + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), + DMA_FROM_DEVICE)); + CBDW_DATLEN(bdp, 0); + CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY); + + /* + * Update BD pointer to next entry. + */ + if ((sc & BD_ENET_RX_WRAP) == 0) + bdp++; + else + bdp = fep->rx_bd_base; + + (*fep->ops->rx_bd_done)(dev); + + if (received >= budget) + break; + } + + fep->cur_rx = bdp; + + if (received < budget) { + /* done */ + napi_complete(napi); + (*fep->ops->napi_enable_rx)(dev); + } + return received; +} + +/* non NAPI receive function */ +static int fs_enet_rx_non_napi(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + const struct fs_platform_info *fpi = fep->fpi; + cbd_t __iomem *bdp; + struct sk_buff *skb, *skbn, *skbt; + int received = 0; + u16 pkt_len, sc; + int curidx; + /* + * First, grab all of the stats for the incoming packet. + * These get messed up if we get called due to a busy condition. + */ + bdp = fep->cur_rx; + + while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) { + + curidx = bdp - fep->rx_bd_base; + + /* + * Since we have allocated space to hold a complete frame, + * the last indicator should be set. + */ + if ((sc & BD_ENET_RX_LAST) == 0) + dev_warn(fep->dev, "rcv is not +last\n"); + + /* + * Check for errors. + */ + if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL | + BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { + fep->stats.rx_errors++; + /* Frame too long or too short. */ + if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) + fep->stats.rx_length_errors++; + /* Frame alignment */ + if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL)) + fep->stats.rx_frame_errors++; + /* CRC Error */ + if (sc & BD_ENET_RX_CR) + fep->stats.rx_crc_errors++; + /* FIFO overrun */ + if (sc & BD_ENET_RX_OV) + fep->stats.rx_crc_errors++; + + skb = fep->rx_skbuff[curidx]; + + dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), + DMA_FROM_DEVICE); + + skbn = skb; + + } else { + + skb = fep->rx_skbuff[curidx]; + + dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), + DMA_FROM_DEVICE); + + /* + * Process the incoming frame. + */ + fep->stats.rx_packets++; + pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */ + fep->stats.rx_bytes += pkt_len + 4; + + if (pkt_len <= fpi->rx_copybreak) { + /* +2 to make IP header L1 cache aligned */ + skbn = dev_alloc_skb(pkt_len + 2); + if (skbn != NULL) { + skb_reserve(skbn, 2); /* align IP header */ + skb_copy_from_linear_data(skb, + skbn->data, pkt_len); + /* swap */ + skbt = skb; + skb = skbn; + skbn = skbt; + } + } else { + skbn = dev_alloc_skb(ENET_RX_FRSIZE); + + if (skbn) + skb_align(skbn, ENET_RX_ALIGN); + } + + if (skbn != NULL) { + skb_put(skb, pkt_len); /* Make room */ + skb->protocol = eth_type_trans(skb, dev); + received++; + netif_rx(skb); + } else { + dev_warn(fep->dev, + "Memory squeeze, dropping packet.\n"); + fep->stats.rx_dropped++; + skbn = skb; + } + } + + fep->rx_skbuff[curidx] = skbn; + CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data, + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), + DMA_FROM_DEVICE)); + CBDW_DATLEN(bdp, 0); + CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY); + + /* + * Update BD pointer to next entry. + */ + if ((sc & BD_ENET_RX_WRAP) == 0) + bdp++; + else + bdp = fep->rx_bd_base; + + (*fep->ops->rx_bd_done)(dev); + } + + fep->cur_rx = bdp; + + return 0; +} + +static void fs_enet_tx(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + cbd_t __iomem *bdp; + struct sk_buff *skb; + int dirtyidx, do_wake, do_restart; + u16 sc; + + spin_lock(&fep->tx_lock); + bdp = fep->dirty_tx; + + do_wake = do_restart = 0; + while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) { + dirtyidx = bdp - fep->tx_bd_base; + + if (fep->tx_free == fep->tx_ring) + break; + + skb = fep->tx_skbuff[dirtyidx]; + + /* + * Check for errors. + */ + if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC | + BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) { + + if (sc & BD_ENET_TX_HB) /* No heartbeat */ + fep->stats.tx_heartbeat_errors++; + if (sc & BD_ENET_TX_LC) /* Late collision */ + fep->stats.tx_window_errors++; + if (sc & BD_ENET_TX_RL) /* Retrans limit */ + fep->stats.tx_aborted_errors++; + if (sc & BD_ENET_TX_UN) /* Underrun */ + fep->stats.tx_fifo_errors++; + if (sc & BD_ENET_TX_CSL) /* Carrier lost */ + fep->stats.tx_carrier_errors++; + + if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) { + fep->stats.tx_errors++; + do_restart = 1; + } + } else + fep->stats.tx_packets++; + + if (sc & BD_ENET_TX_READY) { + dev_warn(fep->dev, + "HEY! Enet xmit interrupt and TX_READY.\n"); + } + + /* + * Deferred means some collisions occurred during transmit, + * but we eventually sent the packet OK. + */ + if (sc & BD_ENET_TX_DEF) + fep->stats.collisions++; + + /* unmap */ + dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), + skb->len, DMA_TO_DEVICE); + + /* + * Free the sk buffer associated with this last transmit. + */ + dev_kfree_skb_irq(skb); + fep->tx_skbuff[dirtyidx] = NULL; + + /* + * Update pointer to next buffer descriptor to be transmitted. + */ + if ((sc & BD_ENET_TX_WRAP) == 0) + bdp++; + else + bdp = fep->tx_bd_base; + + /* + * Since we have freed up a buffer, the ring is no longer + * full. + */ + if (!fep->tx_free++) + do_wake = 1; + } + + fep->dirty_tx = bdp; + + if (do_restart) + (*fep->ops->tx_restart)(dev); + + spin_unlock(&fep->tx_lock); + + if (do_wake) + netif_wake_queue(dev); +} + +/* + * The interrupt handler. + * This is called from the MPC core interrupt. + */ +static irqreturn_t +fs_enet_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct fs_enet_private *fep; + const struct fs_platform_info *fpi; + u32 int_events; + u32 int_clr_events; + int nr, napi_ok; + int handled; + + fep = netdev_priv(dev); + fpi = fep->fpi; + + nr = 0; + while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) { + nr++; + + int_clr_events = int_events; + if (fpi->use_napi) + int_clr_events &= ~fep->ev_napi_rx; + + (*fep->ops->clear_int_events)(dev, int_clr_events); + + if (int_events & fep->ev_err) + (*fep->ops->ev_error)(dev, int_events); + + if (int_events & fep->ev_rx) { + if (!fpi->use_napi) + fs_enet_rx_non_napi(dev); + else { + napi_ok = napi_schedule_prep(&fep->napi); + + (*fep->ops->napi_disable_rx)(dev); + (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx); + + /* NOTE: it is possible for FCCs in NAPI mode */ + /* to submit a spurious interrupt while in poll */ + if (napi_ok) + __napi_schedule(&fep->napi); + } + } + + if (int_events & fep->ev_tx) + fs_enet_tx(dev); + } + + handled = nr > 0; + return IRQ_RETVAL(handled); +} + +void fs_init_bds(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + cbd_t __iomem *bdp; + struct sk_buff *skb; + int i; + + fs_cleanup_bds(dev); + + fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; + fep->tx_free = fep->tx_ring; + fep->cur_rx = fep->rx_bd_base; + + /* + * Initialize the receive buffer descriptors. + */ + for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { + skb = dev_alloc_skb(ENET_RX_FRSIZE); + if (skb == NULL) { + dev_warn(fep->dev, + "Memory squeeze, unable to allocate skb\n"); + break; + } + skb_align(skb, ENET_RX_ALIGN); + fep->rx_skbuff[i] = skb; + CBDW_BUFADDR(bdp, + dma_map_single(fep->dev, skb->data, + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), + DMA_FROM_DEVICE)); + CBDW_DATLEN(bdp, 0); /* zero */ + CBDW_SC(bdp, BD_ENET_RX_EMPTY | + ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP)); + } + /* + * if we failed, fillup remainder + */ + for (; i < fep->rx_ring; i++, bdp++) { + fep->rx_skbuff[i] = NULL; + CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP); + } + + /* + * ...and the same for transmit. + */ + for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { + fep->tx_skbuff[i] = NULL; + CBDW_BUFADDR(bdp, 0); + CBDW_DATLEN(bdp, 0); + CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP); + } +} + +void fs_cleanup_bds(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct sk_buff *skb; + cbd_t __iomem *bdp; + int i; + + /* + * Reset SKB transmit buffers. + */ + for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { + if ((skb = fep->tx_skbuff[i]) == NULL) + continue; + + /* unmap */ + dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), + skb->len, DMA_TO_DEVICE); + + fep->tx_skbuff[i] = NULL; + dev_kfree_skb(skb); + } + + /* + * Reset SKB receive buffers + */ + for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { + if ((skb = fep->rx_skbuff[i]) == NULL) + continue; + + /* unmap */ + dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), + DMA_FROM_DEVICE); + + fep->rx_skbuff[i] = NULL; + + dev_kfree_skb(skb); + } +} + +/**********************************************************************************/ + +#ifdef CONFIG_FS_ENET_MPC5121_FEC +/* + * MPC5121 FEC requeries 4-byte alignment for TX data buffer! + */ +static struct sk_buff *tx_skb_align_workaround(struct net_device *dev, + struct sk_buff *skb) +{ + struct sk_buff *new_skb; + struct fs_enet_private *fep = netdev_priv(dev); + + /* Alloc new skb */ + new_skb = dev_alloc_skb(skb->len + 4); + if (!new_skb) { + if (net_ratelimit()) { + dev_warn(fep->dev, + "Memory squeeze, dropping tx packet.\n"); + } + return NULL; + } + + /* Make sure new skb is properly aligned */ + skb_align(new_skb, 4); + + /* Copy data to new skb ... */ + skb_copy_from_linear_data(skb, new_skb->data, skb->len); + skb_put(new_skb, skb->len); + + /* ... and free an old one */ + dev_kfree_skb_any(skb); + + return new_skb; +} +#endif + +static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + cbd_t __iomem *bdp; + int curidx; + u16 sc; + unsigned long flags; + +#ifdef CONFIG_FS_ENET_MPC5121_FEC + if (((unsigned long)skb->data) & 0x3) { + skb = tx_skb_align_workaround(dev, skb); + if (!skb) { + /* + * We have lost packet due to memory allocation error + * in tx_skb_align_workaround(). Hopefully original + * skb is still valid, so try transmit it later. + */ + return NETDEV_TX_BUSY; + } + } +#endif + spin_lock_irqsave(&fep->tx_lock, flags); + + /* + * Fill in a Tx ring entry + */ + bdp = fep->cur_tx; + + if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) { + netif_stop_queue(dev); + spin_unlock_irqrestore(&fep->tx_lock, flags); + + /* + * Ooops. All transmit buffers are full. Bail out. + * This should not happen, since the tx queue should be stopped. + */ + dev_warn(fep->dev, "tx queue full!.\n"); + return NETDEV_TX_BUSY; + } + + curidx = bdp - fep->tx_bd_base; + /* + * Clear all of the status flags. + */ + CBDC_SC(bdp, BD_ENET_TX_STATS); + + /* + * Save skb pointer. + */ + fep->tx_skbuff[curidx] = skb; + + fep->stats.tx_bytes += skb->len; + + /* + * Push the data cache so the CPM does not get stale memory data. + */ + CBDW_BUFADDR(bdp, dma_map_single(fep->dev, + skb->data, skb->len, DMA_TO_DEVICE)); + CBDW_DATLEN(bdp, skb->len); + + /* + * If this was the last BD in the ring, start at the beginning again. + */ + if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) + fep->cur_tx++; + else + fep->cur_tx = fep->tx_bd_base; + + if (!--fep->tx_free) + netif_stop_queue(dev); + + /* Trigger transmission start */ + sc = BD_ENET_TX_READY | BD_ENET_TX_INTR | + BD_ENET_TX_LAST | BD_ENET_TX_TC; + + /* note that while FEC does not have this bit + * it marks it as available for software use + * yay for hw reuse :) */ + if (skb->len <= 60) + sc |= BD_ENET_TX_PAD; + CBDS_SC(bdp, sc); + + skb_tx_timestamp(skb); + + (*fep->ops->tx_kickstart)(dev); + + spin_unlock_irqrestore(&fep->tx_lock, flags); + + return NETDEV_TX_OK; +} + +static void fs_timeout(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + unsigned long flags; + int wake = 0; + + fep->stats.tx_errors++; + + spin_lock_irqsave(&fep->lock, flags); + + if (dev->flags & IFF_UP) { + phy_stop(fep->phydev); + (*fep->ops->stop)(dev); + (*fep->ops->restart)(dev); + phy_start(fep->phydev); + } + + phy_start(fep->phydev); + wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY); + spin_unlock_irqrestore(&fep->lock, flags); + + if (wake) + netif_wake_queue(dev); +} + +/*----------------------------------------------------------------------------- + * generic link-change handler - should be sufficient for most cases + *-----------------------------------------------------------------------------*/ +static void generic_adjust_link(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct phy_device *phydev = fep->phydev; + int new_state = 0; + + if (phydev->link) { + /* adjust to duplex mode */ + if (phydev->duplex != fep->oldduplex) { + new_state = 1; + fep->oldduplex = phydev->duplex; + } + + if (phydev->speed != fep->oldspeed) { + new_state = 1; + fep->oldspeed = phydev->speed; + } + + if (!fep->oldlink) { + new_state = 1; + fep->oldlink = 1; + } + + if (new_state) + fep->ops->restart(dev); + } else if (fep->oldlink) { + new_state = 1; + fep->oldlink = 0; + fep->oldspeed = 0; + fep->oldduplex = -1; + } + + if (new_state && netif_msg_link(fep)) + phy_print_status(phydev); +} + + +static void fs_adjust_link(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&fep->lock, flags); + + if(fep->ops->adjust_link) + fep->ops->adjust_link(dev); + else + generic_adjust_link(dev); + + spin_unlock_irqrestore(&fep->lock, flags); +} + +static int fs_init_phy(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct phy_device *phydev; + + fep->oldlink = 0; + fep->oldspeed = 0; + fep->oldduplex = -1; + + phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0, + PHY_INTERFACE_MODE_MII); + if (!phydev) { + phydev = of_phy_connect_fixed_link(dev, &fs_adjust_link, + PHY_INTERFACE_MODE_MII); + } + if (!phydev) { + dev_err(&dev->dev, "Could not attach to PHY\n"); + return -ENODEV; + } + + fep->phydev = phydev; + + return 0; +} + +static int fs_enet_open(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + int r; + int err; + + /* to initialize the fep->cur_rx,... */ + /* not doing this, will cause a crash in fs_enet_rx_napi */ + fs_init_bds(fep->ndev); + + if (fep->fpi->use_napi) + napi_enable(&fep->napi); + + /* Install our interrupt handler. */ + r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED, + "fs_enet-mac", dev); + if (r != 0) { + dev_err(fep->dev, "Could not allocate FS_ENET IRQ!"); + if (fep->fpi->use_napi) + napi_disable(&fep->napi); + return -EINVAL; + } + + err = fs_init_phy(dev); + if (err) { + free_irq(fep->interrupt, dev); + if (fep->fpi->use_napi) + napi_disable(&fep->napi); + return err; + } + phy_start(fep->phydev); + + netif_start_queue(dev); + + return 0; +} + +static int fs_enet_close(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + unsigned long flags; + + netif_stop_queue(dev); + netif_carrier_off(dev); + if (fep->fpi->use_napi) + napi_disable(&fep->napi); + phy_stop(fep->phydev); + + spin_lock_irqsave(&fep->lock, flags); + spin_lock(&fep->tx_lock); + (*fep->ops->stop)(dev); + spin_unlock(&fep->tx_lock); + spin_unlock_irqrestore(&fep->lock, flags); + + /* release any irqs */ + phy_disconnect(fep->phydev); + fep->phydev = NULL; + free_irq(fep->interrupt, dev); + + return 0; +} + +static struct net_device_stats *fs_enet_get_stats(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + return &fep->stats; +} + +/*************************************************************************/ + +static void fs_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strcpy(info->driver, DRV_MODULE_NAME); + strcpy(info->version, DRV_MODULE_VERSION); +} + +static int fs_get_regs_len(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + return (*fep->ops->get_regs_len)(dev); +} + +static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) +{ + struct fs_enet_private *fep = netdev_priv(dev); + unsigned long flags; + int r, len; + + len = regs->len; + + spin_lock_irqsave(&fep->lock, flags); + r = (*fep->ops->get_regs)(dev, p, &len); + spin_unlock_irqrestore(&fep->lock, flags); + + if (r == 0) + regs->version = 0; +} + +static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + if (!fep->phydev) + return -ENODEV; + + return phy_ethtool_gset(fep->phydev, cmd); +} + +static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + if (!fep->phydev) + return -ENODEV; + + return phy_ethtool_sset(fep->phydev, cmd); +} + +static int fs_nway_reset(struct net_device *dev) +{ + return 0; +} + +static u32 fs_get_msglevel(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + return fep->msg_enable; +} + +static void fs_set_msglevel(struct net_device *dev, u32 value) +{ + struct fs_enet_private *fep = netdev_priv(dev); + fep->msg_enable = value; +} + +static const struct ethtool_ops fs_ethtool_ops = { + .get_drvinfo = fs_get_drvinfo, + .get_regs_len = fs_get_regs_len, + .get_settings = fs_get_settings, + .set_settings = fs_set_settings, + .nway_reset = fs_nway_reset, + .get_link = ethtool_op_get_link, + .get_msglevel = fs_get_msglevel, + .set_msglevel = fs_set_msglevel, + .get_regs = fs_get_regs, +}; + +static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + if (!netif_running(dev)) + return -EINVAL; + + return phy_mii_ioctl(fep->phydev, rq, cmd); +} + +extern int fs_mii_connect(struct net_device *dev); +extern void fs_mii_disconnect(struct net_device *dev); + +/**************************************************************************************/ + +#ifdef CONFIG_FS_ENET_HAS_FEC +#define IS_FEC(match) ((match)->data == &fs_fec_ops) +#else +#define IS_FEC(match) 0 +#endif + +static const struct net_device_ops fs_enet_netdev_ops = { + .ndo_open = fs_enet_open, + .ndo_stop = fs_enet_close, + .ndo_get_stats = fs_enet_get_stats, + .ndo_start_xmit = fs_enet_start_xmit, + .ndo_tx_timeout = fs_timeout, + .ndo_set_rx_mode = fs_set_multicast_list, + .ndo_do_ioctl = fs_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = fs_enet_netpoll, +#endif +}; + +static struct of_device_id fs_enet_match[]; +static int __devinit fs_enet_probe(struct platform_device *ofdev) +{ + const struct of_device_id *match; + struct net_device *ndev; + struct fs_enet_private *fep; + struct fs_platform_info *fpi; + const u32 *data; + const u8 *mac_addr; + int privsize, len, ret = -ENODEV; + + match = of_match_device(fs_enet_match, &ofdev->dev); + if (!match) + return -EINVAL; + + fpi = kzalloc(sizeof(*fpi), GFP_KERNEL); + if (!fpi) + return -ENOMEM; + + if (!IS_FEC(match)) { + data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len); + if (!data || len != 4) + goto out_free_fpi; + + fpi->cp_command = *data; + } + + fpi->rx_ring = 32; + fpi->tx_ring = 32; + fpi->rx_copybreak = 240; + fpi->use_napi = 1; + fpi->napi_weight = 17; + fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0); + if ((!fpi->phy_node) && (!of_get_property(ofdev->dev.of_node, "fixed-link", + NULL))) + goto out_free_fpi; + + privsize = sizeof(*fep) + + sizeof(struct sk_buff **) * + (fpi->rx_ring + fpi->tx_ring); + + ndev = alloc_etherdev(privsize); + if (!ndev) { + ret = -ENOMEM; + goto out_put; + } + + SET_NETDEV_DEV(ndev, &ofdev->dev); + dev_set_drvdata(&ofdev->dev, ndev); + + fep = netdev_priv(ndev); + fep->dev = &ofdev->dev; + fep->ndev = ndev; + fep->fpi = fpi; + fep->ops = match->data; + + ret = fep->ops->setup_data(ndev); + if (ret) + goto out_free_dev; + + fep->rx_skbuff = (struct sk_buff **)&fep[1]; + fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring; + + spin_lock_init(&fep->lock); + spin_lock_init(&fep->tx_lock); + + mac_addr = of_get_mac_address(ofdev->dev.of_node); + if (mac_addr) + memcpy(ndev->dev_addr, mac_addr, 6); + + ret = fep->ops->allocate_bd(ndev); + if (ret) + goto out_cleanup_data; + + fep->rx_bd_base = fep->ring_base; + fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring; + + fep->tx_ring = fpi->tx_ring; + fep->rx_ring = fpi->rx_ring; + + ndev->netdev_ops = &fs_enet_netdev_ops; + ndev->watchdog_timeo = 2 * HZ; + if (fpi->use_napi) + netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, + fpi->napi_weight); + + ndev->ethtool_ops = &fs_ethtool_ops; + + init_timer(&fep->phy_timer_list); + + netif_carrier_off(ndev); + + ret = register_netdev(ndev); + if (ret) + goto out_free_bd; + + pr_info("%s: fs_enet: %pM\n", ndev->name, ndev->dev_addr); + + return 0; + +out_free_bd: + fep->ops->free_bd(ndev); +out_cleanup_data: + fep->ops->cleanup_data(ndev); +out_free_dev: + free_netdev(ndev); + dev_set_drvdata(&ofdev->dev, NULL); +out_put: + of_node_put(fpi->phy_node); +out_free_fpi: + kfree(fpi); + return ret; +} + +static int fs_enet_remove(struct platform_device *ofdev) +{ + struct net_device *ndev = dev_get_drvdata(&ofdev->dev); + struct fs_enet_private *fep = netdev_priv(ndev); + + unregister_netdev(ndev); + + fep->ops->free_bd(ndev); + fep->ops->cleanup_data(ndev); + dev_set_drvdata(fep->dev, NULL); + of_node_put(fep->fpi->phy_node); + free_netdev(ndev); + return 0; +} + +static struct of_device_id fs_enet_match[] = { +#ifdef CONFIG_FS_ENET_HAS_SCC + { + .compatible = "fsl,cpm1-scc-enet", + .data = (void *)&fs_scc_ops, + }, + { + .compatible = "fsl,cpm2-scc-enet", + .data = (void *)&fs_scc_ops, + }, +#endif +#ifdef CONFIG_FS_ENET_HAS_FCC + { + .compatible = "fsl,cpm2-fcc-enet", + .data = (void *)&fs_fcc_ops, + }, +#endif +#ifdef CONFIG_FS_ENET_HAS_FEC +#ifdef CONFIG_FS_ENET_MPC5121_FEC + { + .compatible = "fsl,mpc5121-fec", + .data = (void *)&fs_fec_ops, + }, +#else + { + .compatible = "fsl,pq1-fec-enet", + .data = (void *)&fs_fec_ops, + }, +#endif +#endif + {} +}; +MODULE_DEVICE_TABLE(of, fs_enet_match); + +static struct platform_driver fs_enet_driver = { + .driver = { + .owner = THIS_MODULE, + .name = "fs_enet", + .of_match_table = fs_enet_match, + }, + .probe = fs_enet_probe, + .remove = fs_enet_remove, +}; + +static int __init fs_init(void) +{ + return platform_driver_register(&fs_enet_driver); +} + +static void __exit fs_cleanup(void) +{ + platform_driver_unregister(&fs_enet_driver); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void fs_enet_netpoll(struct net_device *dev) +{ + disable_irq(dev->irq); + fs_enet_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +/**************************************************************************************/ + +module_init(fs_init); +module_exit(fs_cleanup); diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h new file mode 100644 index 000000000000..1ece4b1a689e --- /dev/null +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h @@ -0,0 +1,244 @@ +#ifndef FS_ENET_H +#define FS_ENET_H + +#include <linux/mii.h> +#include <linux/netdevice.h> +#include <linux/types.h> +#include <linux/list.h> +#include <linux/phy.h> +#include <linux/dma-mapping.h> + +#include <linux/fs_enet_pd.h> +#include <asm/fs_pd.h> + +#ifdef CONFIG_CPM1 +#include <asm/cpm1.h> +#endif + +#if defined(CONFIG_FS_ENET_HAS_FEC) +#include <asm/cpm.h> + +#if defined(CONFIG_FS_ENET_MPC5121_FEC) +/* MPC5121 FEC has different register layout */ +struct fec { + u32 fec_reserved0; + u32 fec_ievent; /* Interrupt event reg */ + u32 fec_imask; /* Interrupt mask reg */ + u32 fec_reserved1; + u32 fec_r_des_active; /* Receive descriptor reg */ + u32 fec_x_des_active; /* Transmit descriptor reg */ + u32 fec_reserved2[3]; + u32 fec_ecntrl; /* Ethernet control reg */ + u32 fec_reserved3[6]; + u32 fec_mii_data; /* MII manage frame reg */ + u32 fec_mii_speed; /* MII speed control reg */ + u32 fec_reserved4[7]; + u32 fec_mib_ctrlstat; /* MIB control/status reg */ + u32 fec_reserved5[7]; + u32 fec_r_cntrl; /* Receive control reg */ + u32 fec_reserved6[15]; + u32 fec_x_cntrl; /* Transmit Control reg */ + u32 fec_reserved7[7]; + u32 fec_addr_low; /* Low 32bits MAC address */ + u32 fec_addr_high; /* High 16bits MAC address */ + u32 fec_opd; /* Opcode + Pause duration */ + u32 fec_reserved8[10]; + u32 fec_hash_table_high; /* High 32bits hash table */ + u32 fec_hash_table_low; /* Low 32bits hash table */ + u32 fec_grp_hash_table_high; /* High 32bits hash table */ + u32 fec_grp_hash_table_low; /* Low 32bits hash table */ + u32 fec_reserved9[7]; + u32 fec_x_wmrk; /* FIFO transmit water mark */ + u32 fec_reserved10; + u32 fec_r_bound; /* FIFO receive bound reg */ + u32 fec_r_fstart; /* FIFO receive start reg */ + u32 fec_reserved11[11]; + u32 fec_r_des_start; /* Receive descriptor ring */ + u32 fec_x_des_start; /* Transmit descriptor ring */ + u32 fec_r_buff_size; /* Maximum receive buff size */ + u32 fec_reserved12[26]; + u32 fec_dma_control; /* DMA Endian and other ctrl */ +}; +#endif + +struct fec_info { + struct fec __iomem *fecp; + u32 mii_speed; +}; +#endif + +#ifdef CONFIG_CPM2 +#include <asm/cpm2.h> +#endif + +/* hw driver ops */ +struct fs_ops { + int (*setup_data)(struct net_device *dev); + int (*allocate_bd)(struct net_device *dev); + void (*free_bd)(struct net_device *dev); + void (*cleanup_data)(struct net_device *dev); + void (*set_multicast_list)(struct net_device *dev); + void (*adjust_link)(struct net_device *dev); + void (*restart)(struct net_device *dev); + void (*stop)(struct net_device *dev); + void (*napi_clear_rx_event)(struct net_device *dev); + void (*napi_enable_rx)(struct net_device *dev); + void (*napi_disable_rx)(struct net_device *dev); + void (*rx_bd_done)(struct net_device *dev); + void (*tx_kickstart)(struct net_device *dev); + u32 (*get_int_events)(struct net_device *dev); + void (*clear_int_events)(struct net_device *dev, u32 int_events); + void (*ev_error)(struct net_device *dev, u32 int_events); + int (*get_regs)(struct net_device *dev, void *p, int *sizep); + int (*get_regs_len)(struct net_device *dev); + void (*tx_restart)(struct net_device *dev); +}; + +struct phy_info { + unsigned int id; + const char *name; + void (*startup) (struct net_device * dev); + void (*shutdown) (struct net_device * dev); + void (*ack_int) (struct net_device * dev); +}; + +/* The FEC stores dest/src/type, data, and checksum for receive packets. + */ +#define MAX_MTU 1508 /* Allow fullsized pppoe packets over VLAN */ +#define MIN_MTU 46 /* this is data size */ +#define CRC_LEN 4 + +#define PKT_MAXBUF_SIZE (MAX_MTU+ETH_HLEN+CRC_LEN) +#define PKT_MINBUF_SIZE (MIN_MTU+ETH_HLEN+CRC_LEN) + +/* Must be a multiple of 32 (to cover both FEC & FCC) */ +#define PKT_MAXBLR_SIZE ((PKT_MAXBUF_SIZE + 31) & ~31) +/* This is needed so that invalidate_xxx wont invalidate too much */ +#define ENET_RX_ALIGN 16 +#define ENET_RX_FRSIZE L1_CACHE_ALIGN(PKT_MAXBUF_SIZE + ENET_RX_ALIGN - 1) + +struct fs_enet_private { + struct napi_struct napi; + struct device *dev; /* pointer back to the device (must be initialized first) */ + struct net_device *ndev; + spinlock_t lock; /* during all ops except TX pckt processing */ + spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */ + struct fs_platform_info *fpi; + const struct fs_ops *ops; + int rx_ring, tx_ring; + dma_addr_t ring_mem_addr; + void __iomem *ring_base; + struct sk_buff **rx_skbuff; + struct sk_buff **tx_skbuff; + cbd_t __iomem *rx_bd_base; /* Address of Rx and Tx buffers. */ + cbd_t __iomem *tx_bd_base; + cbd_t __iomem *dirty_tx; /* ring entries to be free()ed. */ + cbd_t __iomem *cur_rx; + cbd_t __iomem *cur_tx; + int tx_free; + struct net_device_stats stats; + struct timer_list phy_timer_list; + const struct phy_info *phy; + u32 msg_enable; + struct mii_if_info mii_if; + unsigned int last_mii_status; + int interrupt; + + struct phy_device *phydev; + int oldduplex, oldspeed, oldlink; /* current settings */ + + /* event masks */ + u32 ev_napi_rx; /* mask of NAPI rx events */ + u32 ev_rx; /* rx event mask */ + u32 ev_tx; /* tx event mask */ + u32 ev_err; /* error event mask */ + + u16 bd_rx_empty; /* mask of BD rx empty */ + u16 bd_rx_err; /* mask of BD rx errors */ + + union { + struct { + int idx; /* FEC1 = 0, FEC2 = 1 */ + void __iomem *fecp; /* hw registers */ + u32 hthi, htlo; /* state for multicast */ + } fec; + + struct { + int idx; /* FCC1-3 = 0-2 */ + void __iomem *fccp; /* hw registers */ + void __iomem *ep; /* parameter ram */ + void __iomem *fcccp; /* hw registers cont. */ + void __iomem *mem; /* FCC DPRAM */ + u32 gaddrh, gaddrl; /* group address */ + } fcc; + + struct { + int idx; /* FEC1 = 0, FEC2 = 1 */ + void __iomem *sccp; /* hw registers */ + void __iomem *ep; /* parameter ram */ + u32 hthi, htlo; /* state for multicast */ + } scc; + + }; +}; + +/***************************************************************************/ + +void fs_init_bds(struct net_device *dev); +void fs_cleanup_bds(struct net_device *dev); + +/***************************************************************************/ + +#define DRV_MODULE_NAME "fs_enet" +#define PFX DRV_MODULE_NAME ": " +#define DRV_MODULE_VERSION "1.0" +#define DRV_MODULE_RELDATE "Aug 8, 2005" + +/***************************************************************************/ + +int fs_enet_platform_init(void); +void fs_enet_platform_cleanup(void); + +/***************************************************************************/ +/* buffer descriptor access macros */ + +/* access macros */ +#if defined(CONFIG_CPM1) +/* for a a CPM1 __raw_xxx's are sufficient */ +#define __cbd_out32(addr, x) __raw_writel(x, addr) +#define __cbd_out16(addr, x) __raw_writew(x, addr) +#define __cbd_in32(addr) __raw_readl(addr) +#define __cbd_in16(addr) __raw_readw(addr) +#else +/* for others play it safe */ +#define __cbd_out32(addr, x) out_be32(addr, x) +#define __cbd_out16(addr, x) out_be16(addr, x) +#define __cbd_in32(addr) in_be32(addr) +#define __cbd_in16(addr) in_be16(addr) +#endif + +/* write */ +#define CBDW_SC(_cbd, _sc) __cbd_out16(&(_cbd)->cbd_sc, (_sc)) +#define CBDW_DATLEN(_cbd, _datlen) __cbd_out16(&(_cbd)->cbd_datlen, (_datlen)) +#define CBDW_BUFADDR(_cbd, _bufaddr) __cbd_out32(&(_cbd)->cbd_bufaddr, (_bufaddr)) + +/* read */ +#define CBDR_SC(_cbd) __cbd_in16(&(_cbd)->cbd_sc) +#define CBDR_DATLEN(_cbd) __cbd_in16(&(_cbd)->cbd_datlen) +#define CBDR_BUFADDR(_cbd) __cbd_in32(&(_cbd)->cbd_bufaddr) + +/* set bits */ +#define CBDS_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) | (_sc)) + +/* clear bits */ +#define CBDC_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) & ~(_sc)) + +/*******************************************************************/ + +extern const struct fs_ops fs_fec_ops; +extern const struct fs_ops fs_fcc_ops; +extern const struct fs_ops fs_scc_ops; + +/*******************************************************************/ + +#endif diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c new file mode 100644 index 000000000000..7583a9572bcc --- /dev/null +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c @@ -0,0 +1,584 @@ +/* + * FCC driver for Motorola MPC82xx (PQ2). + * + * Copyright (c) 2003 Intracom S.A. + * by Pantelis Antoniou <panto@intracom.gr> + * + * 2005 (c) MontaVista Software, Inc. + * Vitaly Bordug <vbordug@ru.mvista.com> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/string.h> +#include <linux/ptrace.h> +#include <linux/errno.h> +#include <linux/ioport.h> +#include <linux/interrupt.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/spinlock.h> +#include <linux/mii.h> +#include <linux/ethtool.h> +#include <linux/bitops.h> +#include <linux/fs.h> +#include <linux/platform_device.h> +#include <linux/phy.h> +#include <linux/of_device.h> +#include <linux/gfp.h> + +#include <asm/immap_cpm2.h> +#include <asm/mpc8260.h> +#include <asm/cpm2.h> + +#include <asm/pgtable.h> +#include <asm/irq.h> +#include <asm/uaccess.h> + +#include "fs_enet.h" + +/*************************************************/ + +/* FCC access macros */ + +/* write, read, set bits, clear bits */ +#define W32(_p, _m, _v) out_be32(&(_p)->_m, (_v)) +#define R32(_p, _m) in_be32(&(_p)->_m) +#define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v)) +#define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v)) + +#define W16(_p, _m, _v) out_be16(&(_p)->_m, (_v)) +#define R16(_p, _m) in_be16(&(_p)->_m) +#define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v)) +#define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v)) + +#define W8(_p, _m, _v) out_8(&(_p)->_m, (_v)) +#define R8(_p, _m) in_8(&(_p)->_m) +#define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v)) +#define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v)) + +/*************************************************/ + +#define FCC_MAX_MULTICAST_ADDRS 64 + +#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) +#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff)) +#define mk_mii_end 0 + +#define MAX_CR_CMD_LOOPS 10000 + +static inline int fcc_cr_cmd(struct fs_enet_private *fep, u32 op) +{ + const struct fs_platform_info *fpi = fep->fpi; + + return cpm_command(fpi->cp_command, op); +} + +static int do_pd_setup(struct fs_enet_private *fep) +{ + struct platform_device *ofdev = to_platform_device(fep->dev); + struct fs_platform_info *fpi = fep->fpi; + int ret = -EINVAL; + + fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL); + if (fep->interrupt == NO_IRQ) + goto out; + + fep->fcc.fccp = of_iomap(ofdev->dev.of_node, 0); + if (!fep->fcc.fccp) + goto out; + + fep->fcc.ep = of_iomap(ofdev->dev.of_node, 1); + if (!fep->fcc.ep) + goto out_fccp; + + fep->fcc.fcccp = of_iomap(ofdev->dev.of_node, 2); + if (!fep->fcc.fcccp) + goto out_ep; + + fep->fcc.mem = (void __iomem *)cpm2_immr; + fpi->dpram_offset = cpm_dpalloc(128, 32); + if (IS_ERR_VALUE(fpi->dpram_offset)) { + ret = fpi->dpram_offset; + goto out_fcccp; + } + + return 0; + +out_fcccp: + iounmap(fep->fcc.fcccp); +out_ep: + iounmap(fep->fcc.ep); +out_fccp: + iounmap(fep->fcc.fccp); +out: + return ret; +} + +#define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB) +#define FCC_RX_EVENT (FCC_ENET_RXF) +#define FCC_TX_EVENT (FCC_ENET_TXB) +#define FCC_ERR_EVENT_MSK (FCC_ENET_TXE) + +static int setup_data(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + if (do_pd_setup(fep) != 0) + return -EINVAL; + + fep->ev_napi_rx = FCC_NAPI_RX_EVENT_MSK; + fep->ev_rx = FCC_RX_EVENT; + fep->ev_tx = FCC_TX_EVENT; + fep->ev_err = FCC_ERR_EVENT_MSK; + + return 0; +} + +static int allocate_bd(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + const struct fs_platform_info *fpi = fep->fpi; + + fep->ring_base = (void __iomem __force *)dma_alloc_coherent(fep->dev, + (fpi->tx_ring + fpi->rx_ring) * + sizeof(cbd_t), &fep->ring_mem_addr, + GFP_KERNEL); + if (fep->ring_base == NULL) + return -ENOMEM; + + return 0; +} + +static void free_bd(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + const struct fs_platform_info *fpi = fep->fpi; + + if (fep->ring_base) + dma_free_coherent(fep->dev, + (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t), + (void __force *)fep->ring_base, fep->ring_mem_addr); +} + +static void cleanup_data(struct net_device *dev) +{ + /* nothing */ +} + +static void set_promiscuous_mode(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + fcc_t __iomem *fccp = fep->fcc.fccp; + + S32(fccp, fcc_fpsmr, FCC_PSMR_PRO); +} + +static void set_multicast_start(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + fcc_enet_t __iomem *ep = fep->fcc.ep; + + W32(ep, fen_gaddrh, 0); + W32(ep, fen_gaddrl, 0); +} + +static void set_multicast_one(struct net_device *dev, const u8 *mac) +{ + struct fs_enet_private *fep = netdev_priv(dev); + fcc_enet_t __iomem *ep = fep->fcc.ep; + u16 taddrh, taddrm, taddrl; + + taddrh = ((u16)mac[5] << 8) | mac[4]; + taddrm = ((u16)mac[3] << 8) | mac[2]; + taddrl = ((u16)mac[1] << 8) | mac[0]; + + W16(ep, fen_taddrh, taddrh); + W16(ep, fen_taddrm, taddrm); + W16(ep, fen_taddrl, taddrl); + fcc_cr_cmd(fep, CPM_CR_SET_GADDR); +} + +static void set_multicast_finish(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + fcc_t __iomem *fccp = fep->fcc.fccp; + fcc_enet_t __iomem *ep = fep->fcc.ep; + + /* clear promiscuous always */ + C32(fccp, fcc_fpsmr, FCC_PSMR_PRO); + + /* if all multi or too many multicasts; just enable all */ + if ((dev->flags & IFF_ALLMULTI) != 0 || + netdev_mc_count(dev) > FCC_MAX_MULTICAST_ADDRS) { + + W32(ep, fen_gaddrh, 0xffffffff); + W32(ep, fen_gaddrl, 0xffffffff); + } + + /* read back */ + fep->fcc.gaddrh = R32(ep, fen_gaddrh); + fep->fcc.gaddrl = R32(ep, fen_gaddrl); +} + +static void set_multicast_list(struct net_device *dev) +{ + struct netdev_hw_addr *ha; + + if ((dev->flags & IFF_PROMISC) == 0) { + set_multicast_start(dev); + netdev_for_each_mc_addr(ha, dev) + set_multicast_one(dev, ha->addr); + set_multicast_finish(dev); + } else + set_promiscuous_mode(dev); +} + +static void restart(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + const struct fs_platform_info *fpi = fep->fpi; + fcc_t __iomem *fccp = fep->fcc.fccp; + fcc_c_t __iomem *fcccp = fep->fcc.fcccp; + fcc_enet_t __iomem *ep = fep->fcc.ep; + dma_addr_t rx_bd_base_phys, tx_bd_base_phys; + u16 paddrh, paddrm, paddrl; + const unsigned char *mac; + int i; + + C32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT); + + /* clear everything (slow & steady does it) */ + for (i = 0; i < sizeof(*ep); i++) + out_8((u8 __iomem *)ep + i, 0); + + /* get physical address */ + rx_bd_base_phys = fep->ring_mem_addr; + tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring; + + /* point to bds */ + W32(ep, fen_genfcc.fcc_rbase, rx_bd_base_phys); + W32(ep, fen_genfcc.fcc_tbase, tx_bd_base_phys); + + /* Set maximum bytes per receive buffer. + * It must be a multiple of 32. + */ + W16(ep, fen_genfcc.fcc_mrblr, PKT_MAXBLR_SIZE); + + W32(ep, fen_genfcc.fcc_rstate, (CPMFCR_GBL | CPMFCR_EB) << 24); + W32(ep, fen_genfcc.fcc_tstate, (CPMFCR_GBL | CPMFCR_EB) << 24); + + /* Allocate space in the reserved FCC area of DPRAM for the + * internal buffers. No one uses this space (yet), so we + * can do this. Later, we will add resource management for + * this area. + */ + + W16(ep, fen_genfcc.fcc_riptr, fpi->dpram_offset); + W16(ep, fen_genfcc.fcc_tiptr, fpi->dpram_offset + 32); + + W16(ep, fen_padptr, fpi->dpram_offset + 64); + + /* fill with special symbol... */ + memset_io(fep->fcc.mem + fpi->dpram_offset + 64, 0x88, 32); + + W32(ep, fen_genfcc.fcc_rbptr, 0); + W32(ep, fen_genfcc.fcc_tbptr, 0); + W32(ep, fen_genfcc.fcc_rcrc, 0); + W32(ep, fen_genfcc.fcc_tcrc, 0); + W16(ep, fen_genfcc.fcc_res1, 0); + W32(ep, fen_genfcc.fcc_res2, 0); + + /* no CAM */ + W32(ep, fen_camptr, 0); + + /* Set CRC preset and mask */ + W32(ep, fen_cmask, 0xdebb20e3); + W32(ep, fen_cpres, 0xffffffff); + + W32(ep, fen_crcec, 0); /* CRC Error counter */ + W32(ep, fen_alec, 0); /* alignment error counter */ + W32(ep, fen_disfc, 0); /* discard frame counter */ + W16(ep, fen_retlim, 15); /* Retry limit threshold */ + W16(ep, fen_pper, 0); /* Normal persistence */ + + /* set group address */ + W32(ep, fen_gaddrh, fep->fcc.gaddrh); + W32(ep, fen_gaddrl, fep->fcc.gaddrh); + + /* Clear hash filter tables */ + W32(ep, fen_iaddrh, 0); + W32(ep, fen_iaddrl, 0); + + /* Clear the Out-of-sequence TxBD */ + W16(ep, fen_tfcstat, 0); + W16(ep, fen_tfclen, 0); + W32(ep, fen_tfcptr, 0); + + W16(ep, fen_mflr, PKT_MAXBUF_SIZE); /* maximum frame length register */ + W16(ep, fen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */ + + /* set address */ + mac = dev->dev_addr; + paddrh = ((u16)mac[5] << 8) | mac[4]; + paddrm = ((u16)mac[3] << 8) | mac[2]; + paddrl = ((u16)mac[1] << 8) | mac[0]; + + W16(ep, fen_paddrh, paddrh); + W16(ep, fen_paddrm, paddrm); + W16(ep, fen_paddrl, paddrl); + + W16(ep, fen_taddrh, 0); + W16(ep, fen_taddrm, 0); + W16(ep, fen_taddrl, 0); + + W16(ep, fen_maxd1, 1520); /* maximum DMA1 length */ + W16(ep, fen_maxd2, 1520); /* maximum DMA2 length */ + + /* Clear stat counters, in case we ever enable RMON */ + W32(ep, fen_octc, 0); + W32(ep, fen_colc, 0); + W32(ep, fen_broc, 0); + W32(ep, fen_mulc, 0); + W32(ep, fen_uspc, 0); + W32(ep, fen_frgc, 0); + W32(ep, fen_ospc, 0); + W32(ep, fen_jbrc, 0); + W32(ep, fen_p64c, 0); + W32(ep, fen_p65c, 0); + W32(ep, fen_p128c, 0); + W32(ep, fen_p256c, 0); + W32(ep, fen_p512c, 0); + W32(ep, fen_p1024c, 0); + + W16(ep, fen_rfthr, 0); /* Suggested by manual */ + W16(ep, fen_rfcnt, 0); + W16(ep, fen_cftype, 0); + + fs_init_bds(dev); + + /* adjust to speed (for RMII mode) */ + if (fpi->use_rmii) { + if (fep->phydev->speed == 100) + C8(fcccp, fcc_gfemr, 0x20); + else + S8(fcccp, fcc_gfemr, 0x20); + } + + fcc_cr_cmd(fep, CPM_CR_INIT_TRX); + + /* clear events */ + W16(fccp, fcc_fcce, 0xffff); + + /* Enable interrupts we wish to service */ + W16(fccp, fcc_fccm, FCC_ENET_TXE | FCC_ENET_RXF | FCC_ENET_TXB); + + /* Set GFMR to enable Ethernet operating mode */ + W32(fccp, fcc_gfmr, FCC_GFMR_TCI | FCC_GFMR_MODE_ENET); + + /* set sync/delimiters */ + W16(fccp, fcc_fdsr, 0xd555); + + W32(fccp, fcc_fpsmr, FCC_PSMR_ENCRC); + + if (fpi->use_rmii) + S32(fccp, fcc_fpsmr, FCC_PSMR_RMII); + + /* adjust to duplex mode */ + if (fep->phydev->duplex) + S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB); + else + C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB); + + /* Restore multicast and promiscuous settings */ + set_multicast_list(dev); + + S32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT); +} + +static void stop(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + fcc_t __iomem *fccp = fep->fcc.fccp; + + /* stop ethernet */ + C32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT); + + /* clear events */ + W16(fccp, fcc_fcce, 0xffff); + + /* clear interrupt mask */ + W16(fccp, fcc_fccm, 0); + + fs_cleanup_bds(dev); +} + +static void napi_clear_rx_event(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + fcc_t __iomem *fccp = fep->fcc.fccp; + + W16(fccp, fcc_fcce, FCC_NAPI_RX_EVENT_MSK); +} + +static void napi_enable_rx(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + fcc_t __iomem *fccp = fep->fcc.fccp; + + S16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK); +} + +static void napi_disable_rx(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + fcc_t __iomem *fccp = fep->fcc.fccp; + + C16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK); +} + +static void rx_bd_done(struct net_device *dev) +{ + /* nothing */ +} + +static void tx_kickstart(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + fcc_t __iomem *fccp = fep->fcc.fccp; + + S16(fccp, fcc_ftodr, 0x8000); +} + +static u32 get_int_events(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + fcc_t __iomem *fccp = fep->fcc.fccp; + + return (u32)R16(fccp, fcc_fcce); +} + +static void clear_int_events(struct net_device *dev, u32 int_events) +{ + struct fs_enet_private *fep = netdev_priv(dev); + fcc_t __iomem *fccp = fep->fcc.fccp; + + W16(fccp, fcc_fcce, int_events & 0xffff); +} + +static void ev_error(struct net_device *dev, u32 int_events) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + dev_warn(fep->dev, "FS_ENET ERROR(s) 0x%x\n", int_events); +} + +static int get_regs(struct net_device *dev, void *p, int *sizep) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + if (*sizep < sizeof(fcc_t) + sizeof(fcc_enet_t) + 1) + return -EINVAL; + + memcpy_fromio(p, fep->fcc.fccp, sizeof(fcc_t)); + p = (char *)p + sizeof(fcc_t); + + memcpy_fromio(p, fep->fcc.ep, sizeof(fcc_enet_t)); + p = (char *)p + sizeof(fcc_enet_t); + + memcpy_fromio(p, fep->fcc.fcccp, 1); + return 0; +} + +static int get_regs_len(struct net_device *dev) +{ + return sizeof(fcc_t) + sizeof(fcc_enet_t) + 1; +} + +/* Some transmit errors cause the transmitter to shut + * down. We now issue a restart transmit. + * Also, to workaround 8260 device erratum CPM37, we must + * disable and then re-enable the transmitterfollowing a + * Late Collision, Underrun, or Retry Limit error. + * In addition, tbptr may point beyond BDs beyond still marked + * as ready due to internal pipelining, so we need to look back + * through the BDs and adjust tbptr to point to the last BD + * marked as ready. This may result in some buffers being + * retransmitted. + */ +static void tx_restart(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + fcc_t __iomem *fccp = fep->fcc.fccp; + const struct fs_platform_info *fpi = fep->fpi; + fcc_enet_t __iomem *ep = fep->fcc.ep; + cbd_t __iomem *curr_tbptr; + cbd_t __iomem *recheck_bd; + cbd_t __iomem *prev_bd; + cbd_t __iomem *last_tx_bd; + + last_tx_bd = fep->tx_bd_base + (fpi->tx_ring * sizeof(cbd_t)); + + /* get the current bd held in TBPTR and scan back from this point */ + recheck_bd = curr_tbptr = (cbd_t __iomem *) + ((R32(ep, fen_genfcc.fcc_tbptr) - fep->ring_mem_addr) + + fep->ring_base); + + prev_bd = (recheck_bd == fep->tx_bd_base) ? last_tx_bd : recheck_bd - 1; + + /* Move through the bds in reverse, look for the earliest buffer + * that is not ready. Adjust TBPTR to the following buffer */ + while ((CBDR_SC(prev_bd) & BD_ENET_TX_READY) != 0) { + /* Go back one buffer */ + recheck_bd = prev_bd; + + /* update the previous buffer */ + prev_bd = (prev_bd == fep->tx_bd_base) ? last_tx_bd : prev_bd - 1; + + /* We should never see all bds marked as ready, check anyway */ + if (recheck_bd == curr_tbptr) + break; + } + /* Now update the TBPTR and dirty flag to the current buffer */ + W32(ep, fen_genfcc.fcc_tbptr, + (uint) (((void *)recheck_bd - fep->ring_base) + + fep->ring_mem_addr)); + fep->dirty_tx = recheck_bd; + + C32(fccp, fcc_gfmr, FCC_GFMR_ENT); + udelay(10); + S32(fccp, fcc_gfmr, FCC_GFMR_ENT); + + fcc_cr_cmd(fep, CPM_CR_RESTART_TX); +} + +/*************************************************************************/ + +const struct fs_ops fs_fcc_ops = { + .setup_data = setup_data, + .cleanup_data = cleanup_data, + .set_multicast_list = set_multicast_list, + .restart = restart, + .stop = stop, + .napi_clear_rx_event = napi_clear_rx_event, + .napi_enable_rx = napi_enable_rx, + .napi_disable_rx = napi_disable_rx, + .rx_bd_done = rx_bd_done, + .tx_kickstart = tx_kickstart, + .get_int_events = get_int_events, + .clear_int_events = clear_int_events, + .ev_error = ev_error, + .get_regs = get_regs, + .get_regs_len = get_regs_len, + .tx_restart = tx_restart, + .allocate_bd = allocate_bd, + .free_bd = free_bd, +}; diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c new file mode 100644 index 000000000000..b9fbc83d64a7 --- /dev/null +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c @@ -0,0 +1,498 @@ +/* + * Freescale Ethernet controllers + * + * Copyright (c) 2005 Intracom S.A. + * by Pantelis Antoniou <panto@intracom.gr> + * + * 2005 (c) MontaVista Software, Inc. + * Vitaly Bordug <vbordug@ru.mvista.com> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/string.h> +#include <linux/ptrace.h> +#include <linux/errno.h> +#include <linux/ioport.h> +#include <linux/interrupt.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/spinlock.h> +#include <linux/mii.h> +#include <linux/ethtool.h> +#include <linux/bitops.h> +#include <linux/fs.h> +#include <linux/platform_device.h> +#include <linux/of_device.h> +#include <linux/gfp.h> + +#include <asm/irq.h> +#include <asm/uaccess.h> + +#ifdef CONFIG_8xx +#include <asm/8xx_immap.h> +#include <asm/pgtable.h> +#include <asm/mpc8xx.h> +#include <asm/cpm1.h> +#endif + +#include "fs_enet.h" +#include "fec.h" + +/*************************************************/ + +#if defined(CONFIG_CPM1) +/* for a CPM1 __raw_xxx's are sufficient */ +#define __fs_out32(addr, x) __raw_writel(x, addr) +#define __fs_out16(addr, x) __raw_writew(x, addr) +#define __fs_in32(addr) __raw_readl(addr) +#define __fs_in16(addr) __raw_readw(addr) +#else +/* for others play it safe */ +#define __fs_out32(addr, x) out_be32(addr, x) +#define __fs_out16(addr, x) out_be16(addr, x) +#define __fs_in32(addr) in_be32(addr) +#define __fs_in16(addr) in_be16(addr) +#endif + +/* write */ +#define FW(_fecp, _reg, _v) __fs_out32(&(_fecp)->fec_ ## _reg, (_v)) + +/* read */ +#define FR(_fecp, _reg) __fs_in32(&(_fecp)->fec_ ## _reg) + +/* set bits */ +#define FS(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) | (_v)) + +/* clear bits */ +#define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v)) + +/* + * Delay to wait for FEC reset command to complete (in us) + */ +#define FEC_RESET_DELAY 50 + +static int whack_reset(struct fec __iomem *fecp) +{ + int i; + + FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET); + for (i = 0; i < FEC_RESET_DELAY; i++) { + if ((FR(fecp, ecntrl) & FEC_ECNTRL_RESET) == 0) + return 0; /* OK */ + udelay(1); + } + + return -1; +} + +static int do_pd_setup(struct fs_enet_private *fep) +{ + struct platform_device *ofdev = to_platform_device(fep->dev); + + fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL); + if (fep->interrupt == NO_IRQ) + return -EINVAL; + + fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0); + if (!fep->fcc.fccp) + return -EINVAL; + + return 0; +} + +#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB) +#define FEC_RX_EVENT (FEC_ENET_RXF) +#define FEC_TX_EVENT (FEC_ENET_TXF) +#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \ + FEC_ENET_BABT | FEC_ENET_EBERR) + +static int setup_data(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + if (do_pd_setup(fep) != 0) + return -EINVAL; + + fep->fec.hthi = 0; + fep->fec.htlo = 0; + + fep->ev_napi_rx = FEC_NAPI_RX_EVENT_MSK; + fep->ev_rx = FEC_RX_EVENT; + fep->ev_tx = FEC_TX_EVENT; + fep->ev_err = FEC_ERR_EVENT_MSK; + + return 0; +} + +static int allocate_bd(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + const struct fs_platform_info *fpi = fep->fpi; + + fep->ring_base = (void __force __iomem *)dma_alloc_coherent(fep->dev, + (fpi->tx_ring + fpi->rx_ring) * + sizeof(cbd_t), &fep->ring_mem_addr, + GFP_KERNEL); + if (fep->ring_base == NULL) + return -ENOMEM; + + return 0; +} + +static void free_bd(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + const struct fs_platform_info *fpi = fep->fpi; + + if(fep->ring_base) + dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) + * sizeof(cbd_t), + (void __force *)fep->ring_base, + fep->ring_mem_addr); +} + +static void cleanup_data(struct net_device *dev) +{ + /* nothing */ +} + +static void set_promiscuous_mode(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct fec __iomem *fecp = fep->fec.fecp; + + FS(fecp, r_cntrl, FEC_RCNTRL_PROM); +} + +static void set_multicast_start(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + fep->fec.hthi = 0; + fep->fec.htlo = 0; +} + +static void set_multicast_one(struct net_device *dev, const u8 *mac) +{ + struct fs_enet_private *fep = netdev_priv(dev); + int temp, hash_index, i, j; + u32 crc, csrVal; + u8 byte, msb; + + crc = 0xffffffff; + for (i = 0; i < 6; i++) { + byte = mac[i]; + for (j = 0; j < 8; j++) { + msb = crc >> 31; + crc <<= 1; + if (msb ^ (byte & 0x1)) + crc ^= FEC_CRC_POLY; + byte >>= 1; + } + } + + temp = (crc & 0x3f) >> 1; + hash_index = ((temp & 0x01) << 4) | + ((temp & 0x02) << 2) | + ((temp & 0x04)) | + ((temp & 0x08) >> 2) | + ((temp & 0x10) >> 4); + csrVal = 1 << hash_index; + if (crc & 1) + fep->fec.hthi |= csrVal; + else + fep->fec.htlo |= csrVal; +} + +static void set_multicast_finish(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct fec __iomem *fecp = fep->fec.fecp; + + /* if all multi or too many multicasts; just enable all */ + if ((dev->flags & IFF_ALLMULTI) != 0 || + netdev_mc_count(dev) > FEC_MAX_MULTICAST_ADDRS) { + fep->fec.hthi = 0xffffffffU; + fep->fec.htlo = 0xffffffffU; + } + + FC(fecp, r_cntrl, FEC_RCNTRL_PROM); + FW(fecp, grp_hash_table_high, fep->fec.hthi); + FW(fecp, grp_hash_table_low, fep->fec.htlo); +} + +static void set_multicast_list(struct net_device *dev) +{ + struct netdev_hw_addr *ha; + + if ((dev->flags & IFF_PROMISC) == 0) { + set_multicast_start(dev); + netdev_for_each_mc_addr(ha, dev) + set_multicast_one(dev, ha->addr); + set_multicast_finish(dev); + } else + set_promiscuous_mode(dev); +} + +static void restart(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct fec __iomem *fecp = fep->fec.fecp; + const struct fs_platform_info *fpi = fep->fpi; + dma_addr_t rx_bd_base_phys, tx_bd_base_phys; + int r; + u32 addrhi, addrlo; + + struct mii_bus* mii = fep->phydev->bus; + struct fec_info* fec_inf = mii->priv; + + r = whack_reset(fep->fec.fecp); + if (r != 0) + dev_err(fep->dev, "FEC Reset FAILED!\n"); + /* + * Set station address. + */ + addrhi = ((u32) dev->dev_addr[0] << 24) | + ((u32) dev->dev_addr[1] << 16) | + ((u32) dev->dev_addr[2] << 8) | + (u32) dev->dev_addr[3]; + addrlo = ((u32) dev->dev_addr[4] << 24) | + ((u32) dev->dev_addr[5] << 16); + FW(fecp, addr_low, addrhi); + FW(fecp, addr_high, addrlo); + + /* + * Reset all multicast. + */ + FW(fecp, grp_hash_table_high, fep->fec.hthi); + FW(fecp, grp_hash_table_low, fep->fec.htlo); + + /* + * Set maximum receive buffer size. + */ + FW(fecp, r_buff_size, PKT_MAXBLR_SIZE); +#ifdef CONFIG_FS_ENET_MPC5121_FEC + FW(fecp, r_cntrl, PKT_MAXBUF_SIZE << 16); +#else + FW(fecp, r_hash, PKT_MAXBUF_SIZE); +#endif + + /* get physical address */ + rx_bd_base_phys = fep->ring_mem_addr; + tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring; + + /* + * Set receive and transmit descriptor base. + */ + FW(fecp, r_des_start, rx_bd_base_phys); + FW(fecp, x_des_start, tx_bd_base_phys); + + fs_init_bds(dev); + + /* + * Enable big endian and don't care about SDMA FC. + */ +#ifdef CONFIG_FS_ENET_MPC5121_FEC + FS(fecp, dma_control, 0xC0000000); +#else + FW(fecp, fun_code, 0x78000000); +#endif + + /* + * Set MII speed. + */ + FW(fecp, mii_speed, fec_inf->mii_speed); + + /* + * Clear any outstanding interrupt. + */ + FW(fecp, ievent, 0xffc0); +#ifndef CONFIG_FS_ENET_MPC5121_FEC + FW(fecp, ivec, (virq_to_hw(fep->interrupt) / 2) << 29); + + FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ +#else + /* + * Only set MII mode - do not touch maximum frame length + * configured before. + */ + FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); +#endif + /* + * adjust to duplex mode + */ + if (fep->phydev->duplex) { + FC(fecp, r_cntrl, FEC_RCNTRL_DRT); + FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */ + } else { + FS(fecp, r_cntrl, FEC_RCNTRL_DRT); + FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */ + } + + /* + * Enable interrupts we wish to service. + */ + FW(fecp, imask, FEC_ENET_TXF | FEC_ENET_TXB | + FEC_ENET_RXF | FEC_ENET_RXB); + + /* + * And last, enable the transmit and receive processing. + */ + FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); + FW(fecp, r_des_active, 0x01000000); +} + +static void stop(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + const struct fs_platform_info *fpi = fep->fpi; + struct fec __iomem *fecp = fep->fec.fecp; + + struct fec_info* feci= fep->phydev->bus->priv; + + int i; + + if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0) + return; /* already down */ + + FW(fecp, x_cntrl, 0x01); /* Graceful transmit stop */ + for (i = 0; ((FR(fecp, ievent) & 0x10000000) == 0) && + i < FEC_RESET_DELAY; i++) + udelay(1); + + if (i == FEC_RESET_DELAY) + dev_warn(fep->dev, "FEC timeout on graceful transmit stop\n"); + /* + * Disable FEC. Let only MII interrupts. + */ + FW(fecp, imask, 0); + FC(fecp, ecntrl, FEC_ECNTRL_ETHER_EN); + + fs_cleanup_bds(dev); + + /* shut down FEC1? that's where the mii bus is */ + if (fpi->has_phy) { + FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ + FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); + FW(fecp, ievent, FEC_ENET_MII); + FW(fecp, mii_speed, feci->mii_speed); + } +} + +static void napi_clear_rx_event(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct fec __iomem *fecp = fep->fec.fecp; + + FW(fecp, ievent, FEC_NAPI_RX_EVENT_MSK); +} + +static void napi_enable_rx(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct fec __iomem *fecp = fep->fec.fecp; + + FS(fecp, imask, FEC_NAPI_RX_EVENT_MSK); +} + +static void napi_disable_rx(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct fec __iomem *fecp = fep->fec.fecp; + + FC(fecp, imask, FEC_NAPI_RX_EVENT_MSK); +} + +static void rx_bd_done(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct fec __iomem *fecp = fep->fec.fecp; + + FW(fecp, r_des_active, 0x01000000); +} + +static void tx_kickstart(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct fec __iomem *fecp = fep->fec.fecp; + + FW(fecp, x_des_active, 0x01000000); +} + +static u32 get_int_events(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct fec __iomem *fecp = fep->fec.fecp; + + return FR(fecp, ievent) & FR(fecp, imask); +} + +static void clear_int_events(struct net_device *dev, u32 int_events) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct fec __iomem *fecp = fep->fec.fecp; + + FW(fecp, ievent, int_events); +} + +static void ev_error(struct net_device *dev, u32 int_events) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + dev_warn(fep->dev, "FEC ERROR(s) 0x%x\n", int_events); +} + +static int get_regs(struct net_device *dev, void *p, int *sizep) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + if (*sizep < sizeof(struct fec)) + return -EINVAL; + + memcpy_fromio(p, fep->fec.fecp, sizeof(struct fec)); + + return 0; +} + +static int get_regs_len(struct net_device *dev) +{ + return sizeof(struct fec); +} + +static void tx_restart(struct net_device *dev) +{ + /* nothing */ +} + +/*************************************************************************/ + +const struct fs_ops fs_fec_ops = { + .setup_data = setup_data, + .cleanup_data = cleanup_data, + .set_multicast_list = set_multicast_list, + .restart = restart, + .stop = stop, + .napi_clear_rx_event = napi_clear_rx_event, + .napi_enable_rx = napi_enable_rx, + .napi_disable_rx = napi_disable_rx, + .rx_bd_done = rx_bd_done, + .tx_kickstart = tx_kickstart, + .get_int_events = get_int_events, + .clear_int_events = clear_int_events, + .ev_error = ev_error, + .get_regs = get_regs, + .get_regs_len = get_regs_len, + .tx_restart = tx_restart, + .allocate_bd = allocate_bd, + .free_bd = free_bd, +}; + diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c new file mode 100644 index 000000000000..22a02a767069 --- /dev/null +++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c @@ -0,0 +1,484 @@ +/* + * Ethernet on Serial Communications Controller (SCC) driver for Motorola MPC8xx and MPC82xx. + * + * Copyright (c) 2003 Intracom S.A. + * by Pantelis Antoniou <panto@intracom.gr> + * + * 2005 (c) MontaVista Software, Inc. + * Vitaly Bordug <vbordug@ru.mvista.com> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/string.h> +#include <linux/ptrace.h> +#include <linux/errno.h> +#include <linux/ioport.h> +#include <linux/interrupt.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/spinlock.h> +#include <linux/mii.h> +#include <linux/ethtool.h> +#include <linux/bitops.h> +#include <linux/fs.h> +#include <linux/platform_device.h> +#include <linux/of_platform.h> + +#include <asm/irq.h> +#include <asm/uaccess.h> + +#ifdef CONFIG_8xx +#include <asm/8xx_immap.h> +#include <asm/pgtable.h> +#include <asm/mpc8xx.h> +#include <asm/cpm1.h> +#endif + +#include "fs_enet.h" + +/*************************************************/ +#if defined(CONFIG_CPM1) +/* for a 8xx __raw_xxx's are sufficient */ +#define __fs_out32(addr, x) __raw_writel(x, addr) +#define __fs_out16(addr, x) __raw_writew(x, addr) +#define __fs_out8(addr, x) __raw_writeb(x, addr) +#define __fs_in32(addr) __raw_readl(addr) +#define __fs_in16(addr) __raw_readw(addr) +#define __fs_in8(addr) __raw_readb(addr) +#else +/* for others play it safe */ +#define __fs_out32(addr, x) out_be32(addr, x) +#define __fs_out16(addr, x) out_be16(addr, x) +#define __fs_in32(addr) in_be32(addr) +#define __fs_in16(addr) in_be16(addr) +#define __fs_out8(addr, x) out_8(addr, x) +#define __fs_in8(addr) in_8(addr) +#endif + +/* write, read, set bits, clear bits */ +#define W32(_p, _m, _v) __fs_out32(&(_p)->_m, (_v)) +#define R32(_p, _m) __fs_in32(&(_p)->_m) +#define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v)) +#define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v)) + +#define W16(_p, _m, _v) __fs_out16(&(_p)->_m, (_v)) +#define R16(_p, _m) __fs_in16(&(_p)->_m) +#define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v)) +#define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v)) + +#define W8(_p, _m, _v) __fs_out8(&(_p)->_m, (_v)) +#define R8(_p, _m) __fs_in8(&(_p)->_m) +#define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v)) +#define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v)) + +#define SCC_MAX_MULTICAST_ADDRS 64 + +/* + * Delay to wait for SCC reset command to complete (in us) + */ +#define SCC_RESET_DELAY 50 + +static inline int scc_cr_cmd(struct fs_enet_private *fep, u32 op) +{ + const struct fs_platform_info *fpi = fep->fpi; + + return cpm_command(fpi->cp_command, op); +} + +static int do_pd_setup(struct fs_enet_private *fep) +{ + struct platform_device *ofdev = to_platform_device(fep->dev); + + fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL); + if (fep->interrupt == NO_IRQ) + return -EINVAL; + + fep->scc.sccp = of_iomap(ofdev->dev.of_node, 0); + if (!fep->scc.sccp) + return -EINVAL; + + fep->scc.ep = of_iomap(ofdev->dev.of_node, 1); + if (!fep->scc.ep) { + iounmap(fep->scc.sccp); + return -EINVAL; + } + + return 0; +} + +#define SCC_NAPI_RX_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB) +#define SCC_RX_EVENT (SCCE_ENET_RXF) +#define SCC_TX_EVENT (SCCE_ENET_TXB) +#define SCC_ERR_EVENT_MSK (SCCE_ENET_TXE | SCCE_ENET_BSY) + +static int setup_data(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + do_pd_setup(fep); + + fep->scc.hthi = 0; + fep->scc.htlo = 0; + + fep->ev_napi_rx = SCC_NAPI_RX_EVENT_MSK; + fep->ev_rx = SCC_RX_EVENT; + fep->ev_tx = SCC_TX_EVENT | SCCE_ENET_TXE; + fep->ev_err = SCC_ERR_EVENT_MSK; + + return 0; +} + +static int allocate_bd(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + const struct fs_platform_info *fpi = fep->fpi; + + fep->ring_mem_addr = cpm_dpalloc((fpi->tx_ring + fpi->rx_ring) * + sizeof(cbd_t), 8); + if (IS_ERR_VALUE(fep->ring_mem_addr)) + return -ENOMEM; + + fep->ring_base = (void __iomem __force*) + cpm_dpram_addr(fep->ring_mem_addr); + + return 0; +} + +static void free_bd(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + if (fep->ring_base) + cpm_dpfree(fep->ring_mem_addr); +} + +static void cleanup_data(struct net_device *dev) +{ + /* nothing */ +} + +static void set_promiscuous_mode(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + scc_t __iomem *sccp = fep->scc.sccp; + + S16(sccp, scc_psmr, SCC_PSMR_PRO); +} + +static void set_multicast_start(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + scc_enet_t __iomem *ep = fep->scc.ep; + + W16(ep, sen_gaddr1, 0); + W16(ep, sen_gaddr2, 0); + W16(ep, sen_gaddr3, 0); + W16(ep, sen_gaddr4, 0); +} + +static void set_multicast_one(struct net_device *dev, const u8 * mac) +{ + struct fs_enet_private *fep = netdev_priv(dev); + scc_enet_t __iomem *ep = fep->scc.ep; + u16 taddrh, taddrm, taddrl; + + taddrh = ((u16) mac[5] << 8) | mac[4]; + taddrm = ((u16) mac[3] << 8) | mac[2]; + taddrl = ((u16) mac[1] << 8) | mac[0]; + + W16(ep, sen_taddrh, taddrh); + W16(ep, sen_taddrm, taddrm); + W16(ep, sen_taddrl, taddrl); + scc_cr_cmd(fep, CPM_CR_SET_GADDR); +} + +static void set_multicast_finish(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + scc_t __iomem *sccp = fep->scc.sccp; + scc_enet_t __iomem *ep = fep->scc.ep; + + /* clear promiscuous always */ + C16(sccp, scc_psmr, SCC_PSMR_PRO); + + /* if all multi or too many multicasts; just enable all */ + if ((dev->flags & IFF_ALLMULTI) != 0 || + netdev_mc_count(dev) > SCC_MAX_MULTICAST_ADDRS) { + + W16(ep, sen_gaddr1, 0xffff); + W16(ep, sen_gaddr2, 0xffff); + W16(ep, sen_gaddr3, 0xffff); + W16(ep, sen_gaddr4, 0xffff); + } +} + +static void set_multicast_list(struct net_device *dev) +{ + struct netdev_hw_addr *ha; + + if ((dev->flags & IFF_PROMISC) == 0) { + set_multicast_start(dev); + netdev_for_each_mc_addr(ha, dev) + set_multicast_one(dev, ha->addr); + set_multicast_finish(dev); + } else + set_promiscuous_mode(dev); +} + +/* + * This function is called to start or restart the FEC during a link + * change. This only happens when switching between half and full + * duplex. + */ +static void restart(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + scc_t __iomem *sccp = fep->scc.sccp; + scc_enet_t __iomem *ep = fep->scc.ep; + const struct fs_platform_info *fpi = fep->fpi; + u16 paddrh, paddrm, paddrl; + const unsigned char *mac; + int i; + + C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); + + /* clear everything (slow & steady does it) */ + for (i = 0; i < sizeof(*ep); i++) + __fs_out8((u8 __iomem *)ep + i, 0); + + /* point to bds */ + W16(ep, sen_genscc.scc_rbase, fep->ring_mem_addr); + W16(ep, sen_genscc.scc_tbase, + fep->ring_mem_addr + sizeof(cbd_t) * fpi->rx_ring); + + /* Initialize function code registers for big-endian. + */ +#ifndef CONFIG_NOT_COHERENT_CACHE + W8(ep, sen_genscc.scc_rfcr, SCC_EB | SCC_GBL); + W8(ep, sen_genscc.scc_tfcr, SCC_EB | SCC_GBL); +#else + W8(ep, sen_genscc.scc_rfcr, SCC_EB); + W8(ep, sen_genscc.scc_tfcr, SCC_EB); +#endif + + /* Set maximum bytes per receive buffer. + * This appears to be an Ethernet frame size, not the buffer + * fragment size. It must be a multiple of four. + */ + W16(ep, sen_genscc.scc_mrblr, 0x5f0); + + /* Set CRC preset and mask. + */ + W32(ep, sen_cpres, 0xffffffff); + W32(ep, sen_cmask, 0xdebb20e3); + + W32(ep, sen_crcec, 0); /* CRC Error counter */ + W32(ep, sen_alec, 0); /* alignment error counter */ + W32(ep, sen_disfc, 0); /* discard frame counter */ + + W16(ep, sen_pads, 0x8888); /* Tx short frame pad character */ + W16(ep, sen_retlim, 15); /* Retry limit threshold */ + + W16(ep, sen_maxflr, 0x5ee); /* maximum frame length register */ + + W16(ep, sen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */ + + W16(ep, sen_maxd1, 0x000005f0); /* maximum DMA1 length */ + W16(ep, sen_maxd2, 0x000005f0); /* maximum DMA2 length */ + + /* Clear hash tables. + */ + W16(ep, sen_gaddr1, 0); + W16(ep, sen_gaddr2, 0); + W16(ep, sen_gaddr3, 0); + W16(ep, sen_gaddr4, 0); + W16(ep, sen_iaddr1, 0); + W16(ep, sen_iaddr2, 0); + W16(ep, sen_iaddr3, 0); + W16(ep, sen_iaddr4, 0); + + /* set address + */ + mac = dev->dev_addr; + paddrh = ((u16) mac[5] << 8) | mac[4]; + paddrm = ((u16) mac[3] << 8) | mac[2]; + paddrl = ((u16) mac[1] << 8) | mac[0]; + + W16(ep, sen_paddrh, paddrh); + W16(ep, sen_paddrm, paddrm); + W16(ep, sen_paddrl, paddrl); + + W16(ep, sen_pper, 0); + W16(ep, sen_taddrl, 0); + W16(ep, sen_taddrm, 0); + W16(ep, sen_taddrh, 0); + + fs_init_bds(dev); + + scc_cr_cmd(fep, CPM_CR_INIT_TRX); + + W16(sccp, scc_scce, 0xffff); + + /* Enable interrupts we wish to service. + */ + W16(sccp, scc_sccm, SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB); + + /* Set GSMR_H to enable all normal operating modes. + * Set GSMR_L to enable Ethernet to MC68160. + */ + W32(sccp, scc_gsmrh, 0); + W32(sccp, scc_gsmrl, + SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 | + SCC_GSMRL_MODE_ENET); + + /* Set sync/delimiters. + */ + W16(sccp, scc_dsr, 0xd555); + + /* Set processing mode. Use Ethernet CRC, catch broadcast, and + * start frame search 22 bit times after RENA. + */ + W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22); + + /* Set full duplex mode if needed */ + if (fep->phydev->duplex) + S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE); + + S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); +} + +static void stop(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + scc_t __iomem *sccp = fep->scc.sccp; + int i; + + for (i = 0; (R16(sccp, scc_sccm) == 0) && i < SCC_RESET_DELAY; i++) + udelay(1); + + if (i == SCC_RESET_DELAY) + dev_warn(fep->dev, "SCC timeout on graceful transmit stop\n"); + + W16(sccp, scc_sccm, 0); + C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); + + fs_cleanup_bds(dev); +} + +static void napi_clear_rx_event(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + scc_t __iomem *sccp = fep->scc.sccp; + + W16(sccp, scc_scce, SCC_NAPI_RX_EVENT_MSK); +} + +static void napi_enable_rx(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + scc_t __iomem *sccp = fep->scc.sccp; + + S16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK); +} + +static void napi_disable_rx(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + scc_t __iomem *sccp = fep->scc.sccp; + + C16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK); +} + +static void rx_bd_done(struct net_device *dev) +{ + /* nothing */ +} + +static void tx_kickstart(struct net_device *dev) +{ + /* nothing */ +} + +static u32 get_int_events(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + scc_t __iomem *sccp = fep->scc.sccp; + + return (u32) R16(sccp, scc_scce); +} + +static void clear_int_events(struct net_device *dev, u32 int_events) +{ + struct fs_enet_private *fep = netdev_priv(dev); + scc_t __iomem *sccp = fep->scc.sccp; + + W16(sccp, scc_scce, int_events & 0xffff); +} + +static void ev_error(struct net_device *dev, u32 int_events) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + dev_warn(fep->dev, "SCC ERROR(s) 0x%x\n", int_events); +} + +static int get_regs(struct net_device *dev, void *p, int *sizep) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + if (*sizep < sizeof(scc_t) + sizeof(scc_enet_t __iomem *)) + return -EINVAL; + + memcpy_fromio(p, fep->scc.sccp, sizeof(scc_t)); + p = (char *)p + sizeof(scc_t); + + memcpy_fromio(p, fep->scc.ep, sizeof(scc_enet_t __iomem *)); + + return 0; +} + +static int get_regs_len(struct net_device *dev) +{ + return sizeof(scc_t) + sizeof(scc_enet_t __iomem *); +} + +static void tx_restart(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + scc_cr_cmd(fep, CPM_CR_RESTART_TX); +} + + + +/*************************************************************************/ + +const struct fs_ops fs_scc_ops = { + .setup_data = setup_data, + .cleanup_data = cleanup_data, + .set_multicast_list = set_multicast_list, + .restart = restart, + .stop = stop, + .napi_clear_rx_event = napi_clear_rx_event, + .napi_enable_rx = napi_enable_rx, + .napi_disable_rx = napi_disable_rx, + .rx_bd_done = rx_bd_done, + .tx_kickstart = tx_kickstart, + .get_int_events = get_int_events, + .clear_int_events = clear_int_events, + .ev_error = ev_error, + .get_regs = get_regs, + .get_regs_len = get_regs_len, + .tx_restart = tx_restart, + .allocate_bd = allocate_bd, + .free_bd = free_bd, +}; diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c new file mode 100644 index 000000000000..b09270b5d0a5 --- /dev/null +++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c @@ -0,0 +1,246 @@ +/* + * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. + * + * Copyright (c) 2003 Intracom S.A. + * by Pantelis Antoniou <panto@intracom.gr> + * + * 2005 (c) MontaVista Software, Inc. + * Vitaly Bordug <vbordug@ru.mvista.com> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include <linux/module.h> +#include <linux/ioport.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/mii.h> +#include <linux/platform_device.h> +#include <linux/mdio-bitbang.h> +#include <linux/of_mdio.h> +#include <linux/of_platform.h> + +#include "fs_enet.h" + +struct bb_info { + struct mdiobb_ctrl ctrl; + __be32 __iomem *dir; + __be32 __iomem *dat; + u32 mdio_msk; + u32 mdc_msk; +}; + +/* FIXME: If any other users of GPIO crop up, then these will have to + * have some sort of global synchronization to avoid races with other + * pins on the same port. The ideal solution would probably be to + * bind the ports to a GPIO driver, and have this be a client of it. + */ +static inline void bb_set(u32 __iomem *p, u32 m) +{ + out_be32(p, in_be32(p) | m); +} + +static inline void bb_clr(u32 __iomem *p, u32 m) +{ + out_be32(p, in_be32(p) & ~m); +} + +static inline int bb_read(u32 __iomem *p, u32 m) +{ + return (in_be32(p) & m) != 0; +} + +static inline void mdio_dir(struct mdiobb_ctrl *ctrl, int dir) +{ + struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); + + if (dir) + bb_set(bitbang->dir, bitbang->mdio_msk); + else + bb_clr(bitbang->dir, bitbang->mdio_msk); + + /* Read back to flush the write. */ + in_be32(bitbang->dir); +} + +static inline int mdio_read(struct mdiobb_ctrl *ctrl) +{ + struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); + return bb_read(bitbang->dat, bitbang->mdio_msk); +} + +static inline void mdio(struct mdiobb_ctrl *ctrl, int what) +{ + struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); + + if (what) + bb_set(bitbang->dat, bitbang->mdio_msk); + else + bb_clr(bitbang->dat, bitbang->mdio_msk); + + /* Read back to flush the write. */ + in_be32(bitbang->dat); +} + +static inline void mdc(struct mdiobb_ctrl *ctrl, int what) +{ + struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); + + if (what) + bb_set(bitbang->dat, bitbang->mdc_msk); + else + bb_clr(bitbang->dat, bitbang->mdc_msk); + + /* Read back to flush the write. */ + in_be32(bitbang->dat); +} + +static struct mdiobb_ops bb_ops = { + .owner = THIS_MODULE, + .set_mdc = mdc, + .set_mdio_dir = mdio_dir, + .set_mdio_data = mdio, + .get_mdio_data = mdio_read, +}; + +static int __devinit fs_mii_bitbang_init(struct mii_bus *bus, + struct device_node *np) +{ + struct resource res; + const u32 *data; + int mdio_pin, mdc_pin, len; + struct bb_info *bitbang = bus->priv; + + int ret = of_address_to_resource(np, 0, &res); + if (ret) + return ret; + + if (resource_size(&res) <= 13) + return -ENODEV; + + /* This should really encode the pin number as well, but all + * we get is an int, and the odds of multiple bitbang mdio buses + * is low enough that it's not worth going too crazy. + */ + snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start); + + data = of_get_property(np, "fsl,mdio-pin", &len); + if (!data || len != 4) + return -ENODEV; + mdio_pin = *data; + + data = of_get_property(np, "fsl,mdc-pin", &len); + if (!data || len != 4) + return -ENODEV; + mdc_pin = *data; + + bitbang->dir = ioremap(res.start, resource_size(&res)); + if (!bitbang->dir) + return -ENOMEM; + + bitbang->dat = bitbang->dir + 4; + bitbang->mdio_msk = 1 << (31 - mdio_pin); + bitbang->mdc_msk = 1 << (31 - mdc_pin); + + return 0; +} + +static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) +{ + struct mii_bus *new_bus; + struct bb_info *bitbang; + int ret = -ENOMEM; + + bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL); + if (!bitbang) + goto out; + + bitbang->ctrl.ops = &bb_ops; + + new_bus = alloc_mdio_bitbang(&bitbang->ctrl); + if (!new_bus) + goto out_free_priv; + + new_bus->name = "CPM2 Bitbanged MII", + + ret = fs_mii_bitbang_init(new_bus, ofdev->dev.of_node); + if (ret) + goto out_free_bus; + + new_bus->phy_mask = ~0; + new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + if (!new_bus->irq) + goto out_unmap_regs; + + new_bus->parent = &ofdev->dev; + dev_set_drvdata(&ofdev->dev, new_bus); + + ret = of_mdiobus_register(new_bus, ofdev->dev.of_node); + if (ret) + goto out_free_irqs; + + return 0; + +out_free_irqs: + dev_set_drvdata(&ofdev->dev, NULL); + kfree(new_bus->irq); +out_unmap_regs: + iounmap(bitbang->dir); +out_free_bus: + free_mdio_bitbang(new_bus); +out_free_priv: + kfree(bitbang); +out: + return ret; +} + +static int fs_enet_mdio_remove(struct platform_device *ofdev) +{ + struct mii_bus *bus = dev_get_drvdata(&ofdev->dev); + struct bb_info *bitbang = bus->priv; + + mdiobus_unregister(bus); + dev_set_drvdata(&ofdev->dev, NULL); + kfree(bus->irq); + free_mdio_bitbang(bus); + iounmap(bitbang->dir); + kfree(bitbang); + + return 0; +} + +static struct of_device_id fs_enet_mdio_bb_match[] = { + { + .compatible = "fsl,cpm2-mdio-bitbang", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, fs_enet_mdio_bb_match); + +static struct platform_driver fs_enet_bb_mdio_driver = { + .driver = { + .name = "fsl-bb-mdio", + .owner = THIS_MODULE, + .of_match_table = fs_enet_mdio_bb_match, + }, + .probe = fs_enet_mdio_probe, + .remove = fs_enet_mdio_remove, +}; + +static int fs_enet_mdio_bb_init(void) +{ + return platform_driver_register(&fs_enet_bb_mdio_driver); +} + +static void fs_enet_mdio_bb_exit(void) +{ + platform_driver_unregister(&fs_enet_bb_mdio_driver); +} + +module_init(fs_enet_mdio_bb_init); +module_exit(fs_enet_mdio_bb_exit); diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c new file mode 100644 index 000000000000..e0e9d6c35d83 --- /dev/null +++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c @@ -0,0 +1,251 @@ +/* + * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. + * + * Copyright (c) 2003 Intracom S.A. + * by Pantelis Antoniou <panto@intracom.gr> + * + * 2005 (c) MontaVista Software, Inc. + * Vitaly Bordug <vbordug@ru.mvista.com> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/ptrace.h> +#include <linux/errno.h> +#include <linux/ioport.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/spinlock.h> +#include <linux/mii.h> +#include <linux/ethtool.h> +#include <linux/bitops.h> +#include <linux/platform_device.h> +#include <linux/of_platform.h> + +#include <asm/pgtable.h> +#include <asm/irq.h> +#include <asm/uaccess.h> +#include <asm/mpc5xxx.h> + +#include "fs_enet.h" +#include "fec.h" + +/* Make MII read/write commands for the FEC. +*/ +#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) +#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff)) +#define mk_mii_end 0 + +#define FEC_MII_LOOPS 10000 + +static int fs_enet_fec_mii_read(struct mii_bus *bus , int phy_id, int location) +{ + struct fec_info* fec = bus->priv; + struct fec __iomem *fecp = fec->fecp; + int i, ret = -1; + + BUG_ON((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0); + + /* Add PHY address to register command. */ + out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_read(location)); + + for (i = 0; i < FEC_MII_LOOPS; i++) + if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0) + break; + + if (i < FEC_MII_LOOPS) { + out_be32(&fecp->fec_ievent, FEC_ENET_MII); + ret = in_be32(&fecp->fec_mii_data) & 0xffff; + } + + return ret; +} + +static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val) +{ + struct fec_info* fec = bus->priv; + struct fec __iomem *fecp = fec->fecp; + int i; + + /* this must never happen */ + BUG_ON((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0); + + /* Add PHY address to register command. */ + out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_write(location, val)); + + for (i = 0; i < FEC_MII_LOOPS; i++) + if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0) + break; + + if (i < FEC_MII_LOOPS) + out_be32(&fecp->fec_ievent, FEC_ENET_MII); + + return 0; + +} + +static int fs_enet_fec_mii_reset(struct mii_bus *bus) +{ + /* nothing here - for now */ + return 0; +} + +static struct of_device_id fs_enet_mdio_fec_match[]; +static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) +{ + const struct of_device_id *match; + struct resource res; + struct mii_bus *new_bus; + struct fec_info *fec; + int (*get_bus_freq)(struct device_node *); + int ret = -ENOMEM, clock, speed; + + match = of_match_device(fs_enet_mdio_fec_match, &ofdev->dev); + if (!match) + return -EINVAL; + get_bus_freq = match->data; + + new_bus = mdiobus_alloc(); + if (!new_bus) + goto out; + + fec = kzalloc(sizeof(struct fec_info), GFP_KERNEL); + if (!fec) + goto out_mii; + + new_bus->priv = fec; + new_bus->name = "FEC MII Bus"; + new_bus->read = &fs_enet_fec_mii_read; + new_bus->write = &fs_enet_fec_mii_write; + new_bus->reset = &fs_enet_fec_mii_reset; + + ret = of_address_to_resource(ofdev->dev.of_node, 0, &res); + if (ret) + goto out_res; + + snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start); + + fec->fecp = ioremap(res.start, resource_size(&res)); + if (!fec->fecp) + goto out_fec; + + if (get_bus_freq) { + clock = get_bus_freq(ofdev->dev.of_node); + if (!clock) { + /* Use maximum divider if clock is unknown */ + dev_warn(&ofdev->dev, "could not determine IPS clock\n"); + clock = 0x3F * 5000000; + } + } else + clock = ppc_proc_freq; + + /* + * Scale for a MII clock <= 2.5 MHz + * Note that only 6 bits (25:30) are available for MII speed. + */ + speed = (clock + 4999999) / 5000000; + if (speed > 0x3F) { + speed = 0x3F; + dev_err(&ofdev->dev, + "MII clock (%d Hz) exceeds max (2.5 MHz)\n", + clock / speed); + } + + fec->mii_speed = speed << 1; + + setbits32(&fec->fecp->fec_r_cntrl, FEC_RCNTRL_MII_MODE); + setbits32(&fec->fecp->fec_ecntrl, FEC_ECNTRL_PINMUX | + FEC_ECNTRL_ETHER_EN); + out_be32(&fec->fecp->fec_ievent, FEC_ENET_MII); + clrsetbits_be32(&fec->fecp->fec_mii_speed, 0x7E, fec->mii_speed); + + new_bus->phy_mask = ~0; + new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + if (!new_bus->irq) + goto out_unmap_regs; + + new_bus->parent = &ofdev->dev; + dev_set_drvdata(&ofdev->dev, new_bus); + + ret = of_mdiobus_register(new_bus, ofdev->dev.of_node); + if (ret) + goto out_free_irqs; + + return 0; + +out_free_irqs: + dev_set_drvdata(&ofdev->dev, NULL); + kfree(new_bus->irq); +out_unmap_regs: + iounmap(fec->fecp); +out_res: +out_fec: + kfree(fec); +out_mii: + mdiobus_free(new_bus); +out: + return ret; +} + +static int fs_enet_mdio_remove(struct platform_device *ofdev) +{ + struct mii_bus *bus = dev_get_drvdata(&ofdev->dev); + struct fec_info *fec = bus->priv; + + mdiobus_unregister(bus); + dev_set_drvdata(&ofdev->dev, NULL); + kfree(bus->irq); + iounmap(fec->fecp); + kfree(fec); + mdiobus_free(bus); + + return 0; +} + +static struct of_device_id fs_enet_mdio_fec_match[] = { + { + .compatible = "fsl,pq1-fec-mdio", + }, +#if defined(CONFIG_PPC_MPC512x) + { + .compatible = "fsl,mpc5121-fec-mdio", + .data = mpc5xxx_get_bus_frequency, + }, +#endif + {}, +}; +MODULE_DEVICE_TABLE(of, fs_enet_mdio_fec_match); + +static struct platform_driver fs_enet_fec_mdio_driver = { + .driver = { + .name = "fsl-fec-mdio", + .owner = THIS_MODULE, + .of_match_table = fs_enet_mdio_fec_match, + }, + .probe = fs_enet_mdio_probe, + .remove = fs_enet_mdio_remove, +}; + +static int fs_enet_mdio_fec_init(void) +{ + return platform_driver_register(&fs_enet_fec_mdio_driver); +} + +static void fs_enet_mdio_fec_exit(void) +{ + platform_driver_unregister(&fs_enet_fec_mdio_driver); +} + +module_init(fs_enet_mdio_fec_init); +module_exit(fs_enet_mdio_fec_exit); |