diff options
Diffstat (limited to 'drivers/net/ethernet/freescale/dpaa/dpaa_eth.c')
-rw-r--r-- | drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 370 |
1 files changed, 190 insertions, 180 deletions
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index b4b82b9c5cd6..6a9d12dad5d9 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -178,31 +178,9 @@ struct fm_port_fqs { /* All the dpa bps in use at any moment */ static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS]; -/* The raw buffer size must be cacheline aligned */ #define DPAA_BP_RAW_SIZE 4096 -/* When using more than one buffer pool, the raw sizes are as follows: - * 1 bp: 4KB - * 2 bp: 2KB, 4KB - * 3 bp: 1KB, 2KB, 4KB - * 4 bp: 1KB, 2KB, 4KB, 8KB - */ -static inline size_t bpool_buffer_raw_size(u8 index, u8 cnt) -{ - size_t res = DPAA_BP_RAW_SIZE / 4; - u8 i; - - for (i = (cnt < 3) ? cnt : 3; i < 3 + index; i++) - res *= 2; - return res; -} -/* FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is - * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that, - * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us - * half-page-aligned buffers, so we reserve some more space for start-of-buffer - * alignment. - */ -#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD((raw_size) - SMP_CACHE_BYTES) +#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD(raw_size) static int dpaa_max_frm; @@ -288,7 +266,7 @@ static int dpaa_stop(struct net_device *net_dev) /* Allow the Fman (Tx) port to process in-flight frames before we * try switching it off. */ - usleep_range(5000, 10000); + msleep(200); err = mac_dev->stop(mac_dev); if (err < 0) @@ -305,6 +283,8 @@ static int dpaa_stop(struct net_device *net_dev) phy_disconnect(net_dev->phydev); net_dev->phydev = NULL; + msleep(200); + return err; } @@ -596,10 +576,7 @@ static void dpaa_bp_free(struct dpaa_bp *dpaa_bp) static void dpaa_bps_free(struct dpaa_priv *priv) { - int i; - - for (i = 0; i < DPAA_BPS_NUM; i++) - dpaa_bp_free(priv->dpaa_bps[i]); + dpaa_bp_free(priv->dpaa_bp); } /* Use multiple WQs for FQ assignment: @@ -773,7 +750,7 @@ static void dpaa_release_channel(void) qman_release_pool(rx_pool_channel); } -static void dpaa_eth_add_channel(u16 channel) +static void dpaa_eth_add_channel(u16 channel, struct device *dev) { u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel); const cpumask_t *cpus = qman_affine_cpus(); @@ -783,6 +760,7 @@ static void dpaa_eth_add_channel(u16 channel) for_each_cpu_and(cpu, cpus, cpu_online_mask) { portal = qman_get_affine_portal(cpu); qman_p_static_dequeue_add(portal, pool); + qman_start_using_portal(portal, dev); } } @@ -901,7 +879,7 @@ static void dpaa_fq_setup(struct dpaa_priv *priv, if (num_portals == 0) dev_err(priv->net_dev->dev.parent, - "No Qman software (affine) channels found"); + "No Qman software (affine) channels found\n"); /* Initialize each FQ in the list */ list_for_each_entry(fq, &priv->dpaa_fq_list, list) { @@ -1197,15 +1175,15 @@ static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq, return err; } -static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps, - size_t count, struct dpaa_fq *errq, +static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp *bp, + struct dpaa_fq *errq, struct dpaa_fq *defq, struct dpaa_fq *pcdq, struct dpaa_buffer_layout *buf_layout) { struct fman_buffer_prefix_content buf_prefix_content; struct fman_port_rx_params *rx_p; struct fman_port_params params; - int i, err; + int err; memset(¶ms, 0, sizeof(params)); memset(&buf_prefix_content, 0, sizeof(buf_prefix_content)); @@ -1224,12 +1202,9 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps, rx_p->pcd_fqs_count = DPAA_ETH_PCD_RXQ_NUM; } - count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count); - rx_p->ext_buf_pools.num_of_pools_used = (u8)count; - for (i = 0; i < count; i++) { - rx_p->ext_buf_pools.ext_buf_pool[i].id = bps[i]->bpid; - rx_p->ext_buf_pools.ext_buf_pool[i].size = (u16)bps[i]->size; - } + rx_p->ext_buf_pools.num_of_pools_used = 1; + rx_p->ext_buf_pools.ext_buf_pool[0].id = bp->bpid; + rx_p->ext_buf_pools.ext_buf_pool[0].size = (u16)bp->size; err = fman_port_config(port, ¶ms); if (err) { @@ -1252,7 +1227,7 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps, } static int dpaa_eth_init_ports(struct mac_device *mac_dev, - struct dpaa_bp **bps, size_t count, + struct dpaa_bp *bp, struct fm_port_fqs *port_fqs, struct dpaa_buffer_layout *buf_layout, struct device *dev) @@ -1266,7 +1241,7 @@ static int dpaa_eth_init_ports(struct mac_device *mac_dev, if (err) return err; - err = dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq, + err = dpaa_eth_init_rx_port(rxport, bp, port_fqs->rx_errq, port_fqs->rx_defq, port_fqs->rx_pcdq, &buf_layout[RX]); @@ -1335,15 +1310,16 @@ static void dpaa_fd_release(const struct net_device *net_dev, vaddr = phys_to_virt(qm_fd_addr(fd)); sgt = vaddr + qm_fd_get_offset(fd); - dma_unmap_single(dpaa_bp->dev, qm_fd_addr(fd), dpaa_bp->size, - DMA_FROM_DEVICE); + dma_unmap_page(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd), + DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE); dpaa_release_sgt_members(sgt); - addr = dma_map_single(dpaa_bp->dev, vaddr, dpaa_bp->size, - DMA_FROM_DEVICE); - if (dma_mapping_error(dpaa_bp->dev, addr)) { - dev_err(dpaa_bp->dev, "DMA mapping failed"); + addr = dma_map_page(dpaa_bp->priv->rx_dma_dev, + virt_to_page(vaddr), 0, DPAA_BP_RAW_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(dpaa_bp->priv->rx_dma_dev, addr)) { + netdev_err(net_dev, "DMA mapping failed\n"); return; } bm_buffer_set64(&bmb, addr); @@ -1396,7 +1372,7 @@ static void count_ern(struct dpaa_percpu_priv *percpu_priv, static int dpaa_enable_tx_csum(struct dpaa_priv *priv, struct sk_buff *skb, struct qm_fd *fd, - char *parse_results) + void *parse_results) { struct fman_prs_result *parse_result; u16 ethertype = ntohs(skb->protocol); @@ -1488,25 +1464,24 @@ return_error: static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp) { - struct device *dev = dpaa_bp->dev; + struct net_device *net_dev = dpaa_bp->priv->net_dev; struct bm_buffer bmb[8]; dma_addr_t addr; - void *new_buf; + struct page *p; u8 i; for (i = 0; i < 8; i++) { - new_buf = netdev_alloc_frag(dpaa_bp->raw_size); - if (unlikely(!new_buf)) { - dev_err(dev, "netdev_alloc_frag() failed, size %zu\n", - dpaa_bp->raw_size); + p = dev_alloc_pages(0); + if (unlikely(!p)) { + netdev_err(net_dev, "dev_alloc_pages() failed\n"); goto release_previous_buffs; } - new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES); - addr = dma_map_single(dev, new_buf, - dpaa_bp->size, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(dev, addr))) { - dev_err(dpaa_bp->dev, "DMA map failed"); + addr = dma_map_page(dpaa_bp->priv->rx_dma_dev, p, 0, + DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dpaa_bp->priv->rx_dma_dev, + addr))) { + netdev_err(net_dev, "DMA map failed\n"); goto release_previous_buffs; } @@ -1581,17 +1556,16 @@ static int dpaa_eth_refill_bpools(struct dpaa_priv *priv) { struct dpaa_bp *dpaa_bp; int *countptr; - int res, i; + int res; + + dpaa_bp = priv->dpaa_bp; + if (!dpaa_bp) + return -EINVAL; + countptr = this_cpu_ptr(dpaa_bp->percpu_count); + res = dpaa_eth_refill_bpool(dpaa_bp, countptr); + if (res) + return res; - for (i = 0; i < DPAA_BPS_NUM; i++) { - dpaa_bp = priv->dpaa_bps[i]; - if (!dpaa_bp) - return -EINVAL; - countptr = this_cpu_ptr(dpaa_bp->percpu_count); - res = dpaa_eth_refill_bpool(dpaa_bp, countptr); - if (res) - return res; - } return 0; } @@ -1600,68 +1574,74 @@ static int dpaa_eth_refill_bpools(struct dpaa_priv *priv) * Skb freeing is not handled here. * * This function may be called on error paths in the Tx function, so guard - * against cases when not all fd relevant fields were filled in. + * against cases when not all fd relevant fields were filled in. To avoid + * reading the invalid transmission timestamp for the error paths set ts to + * false. * * Return the skb backpointer, since for S/G frames the buffer containing it * gets freed here. */ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv, - const struct qm_fd *fd) + const struct qm_fd *fd, bool ts) { const enum dma_data_direction dma_dir = DMA_TO_DEVICE; struct device *dev = priv->net_dev->dev.parent; struct skb_shared_hwtstamps shhwtstamps; dma_addr_t addr = qm_fd_addr(fd); + void *vaddr = phys_to_virt(addr); const struct qm_sg_entry *sgt; - struct sk_buff **skbh, *skb; - int nr_frags, i; + struct sk_buff *skb; u64 ns; - - skbh = (struct sk_buff **)phys_to_virt(addr); - skb = *skbh; - - if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { - memset(&shhwtstamps, 0, sizeof(shhwtstamps)); - - if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh, - &ns)) { - shhwtstamps.hwtstamp = ns_to_ktime(ns); - skb_tstamp_tx(skb, &shhwtstamps); - } else { - dev_warn(dev, "fman_port_get_tstamp failed!\n"); - } - } + int i; if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) { - nr_frags = skb_shinfo(skb)->nr_frags; - dma_unmap_single(dev, addr, - qm_fd_get_offset(fd) + DPAA_SGT_SIZE, - dma_dir); + dma_unmap_page(priv->tx_dma_dev, addr, + qm_fd_get_offset(fd) + DPAA_SGT_SIZE, + dma_dir); /* The sgt buffer has been allocated with netdev_alloc_frag(), * it's from lowmem. */ - sgt = phys_to_virt(addr + qm_fd_get_offset(fd)); + sgt = vaddr + qm_fd_get_offset(fd); /* sgt[0] is from lowmem, was dma_map_single()-ed */ - dma_unmap_single(dev, qm_sg_addr(&sgt[0]), + dma_unmap_single(priv->tx_dma_dev, qm_sg_addr(&sgt[0]), qm_sg_entry_get_len(&sgt[0]), dma_dir); /* remaining pages were mapped with skb_frag_dma_map() */ - for (i = 1; i <= nr_frags; i++) { + for (i = 1; (i < DPAA_SGT_MAX_ENTRIES) && + !qm_sg_entry_is_final(&sgt[i - 1]); i++) { WARN_ON(qm_sg_entry_is_ext(&sgt[i])); - dma_unmap_page(dev, qm_sg_addr(&sgt[i]), + dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[i]), qm_sg_entry_get_len(&sgt[i]), dma_dir); } - - /* Free the page frag that we allocated on Tx */ - skb_free_frag(phys_to_virt(addr)); } else { - dma_unmap_single(dev, addr, - skb_tail_pointer(skb) - (u8 *)skbh, dma_dir); + dma_unmap_single(priv->tx_dma_dev, addr, + priv->tx_headroom + qm_fd_get_length(fd), + dma_dir); + } + + skb = *(struct sk_buff **)vaddr; + + /* DMA unmapping is required before accessing the HW provided info */ + if (ts && priv->tx_tstamp && + skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + + if (!fman_port_get_tstamp(priv->mac_dev->port[TX], vaddr, + &ns)) { + shhwtstamps.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(skb, &shhwtstamps); + } else { + dev_warn(dev, "fman_port_get_tstamp failed!\n"); + } } + if (qm_fd_get_format(fd) == qm_fd_sg) + /* Free the page that we allocated on Tx for the SGT */ + free_pages((unsigned long)vaddr, 0); + return skb; } @@ -1715,7 +1695,7 @@ static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv, return skb; free_buffer: - skb_free_frag(vaddr); + free_pages((unsigned long)vaddr, 0); return NULL; } @@ -1762,8 +1742,8 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, goto free_buffers; count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); - dma_unmap_single(dpaa_bp->dev, sg_addr, dpaa_bp->size, - DMA_FROM_DEVICE); + dma_unmap_page(priv->rx_dma_dev, sg_addr, + DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE); if (!skb) { sz = dpaa_bp->size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); @@ -1815,7 +1795,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n"); /* free the SG table buffer */ - skb_free_frag(vaddr); + free_pages((unsigned long)vaddr, 0); return skb; @@ -1832,7 +1812,7 @@ free_buffers: for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) { sg_addr = qm_sg_addr(&sgt[i]); sg_vaddr = phys_to_virt(sg_addr); - skb_free_frag(sg_vaddr); + free_pages((unsigned long)sg_vaddr, 0); dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); if (dpaa_bp) { count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); @@ -1843,7 +1823,7 @@ free_buffers: break; } /* free the SGT fragment */ - skb_free_frag(vaddr); + free_pages((unsigned long)vaddr, 0); return NULL; } @@ -1853,9 +1833,8 @@ static int skb_to_contig_fd(struct dpaa_priv *priv, int *offset) { struct net_device *net_dev = priv->net_dev; - struct device *dev = net_dev->dev.parent; enum dma_data_direction dma_dir; - unsigned char *buffer_start; + unsigned char *buff_start; struct sk_buff **skbh; dma_addr_t addr; int err; @@ -1864,10 +1843,10 @@ static int skb_to_contig_fd(struct dpaa_priv *priv, * available, so just use that for offset. */ fd->bpid = FSL_DPAA_BPID_INV; - buffer_start = skb->data - priv->tx_headroom; + buff_start = skb->data - priv->tx_headroom; dma_dir = DMA_TO_DEVICE; - skbh = (struct sk_buff **)buffer_start; + skbh = (struct sk_buff **)buff_start; *skbh = skb; /* Enable L3/L4 hardware checksum computation. @@ -1876,7 +1855,7 @@ static int skb_to_contig_fd(struct dpaa_priv *priv, * need to write into the skb. */ err = dpaa_enable_tx_csum(priv, skb, fd, - ((char *)skbh) + DPAA_TX_PRIV_DATA_SIZE); + buff_start + DPAA_TX_PRIV_DATA_SIZE); if (unlikely(err < 0)) { if (net_ratelimit()) netif_err(priv, tx_err, net_dev, "HW csum error: %d\n", @@ -1889,9 +1868,9 @@ static int skb_to_contig_fd(struct dpaa_priv *priv, fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO); /* Map the entire buffer size that may be seen by FMan, but no more */ - addr = dma_map_single(dev, skbh, - skb_tail_pointer(skb) - buffer_start, dma_dir); - if (unlikely(dma_mapping_error(dev, addr))) { + addr = dma_map_single(priv->tx_dma_dev, buff_start, + priv->tx_headroom + skb->len, dma_dir); + if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) { if (net_ratelimit()) netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n"); return -EINVAL; @@ -1907,24 +1886,22 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, const enum dma_data_direction dma_dir = DMA_TO_DEVICE; const int nr_frags = skb_shinfo(skb)->nr_frags; struct net_device *net_dev = priv->net_dev; - struct device *dev = net_dev->dev.parent; struct qm_sg_entry *sgt; struct sk_buff **skbh; - int i, j, err, sz; - void *buffer_start; + void *buff_start; skb_frag_t *frag; dma_addr_t addr; size_t frag_len; - void *sgt_buf; - - /* get a page frag to store the SGTable */ - sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE); - sgt_buf = netdev_alloc_frag(sz); - if (unlikely(!sgt_buf)) { - netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n", - sz); + struct page *p; + int i, j, err; + + /* get a page to store the SGTable */ + p = dev_alloc_pages(0); + if (unlikely(!p)) { + netdev_err(net_dev, "dev_alloc_pages() failed\n"); return -ENOMEM; } + buff_start = page_address(p); /* Enable L3/L4 hardware checksum computation. * @@ -1932,7 +1909,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, * need to write into the skb. */ err = dpaa_enable_tx_csum(priv, skb, fd, - sgt_buf + DPAA_TX_PRIV_DATA_SIZE); + buff_start + DPAA_TX_PRIV_DATA_SIZE); if (unlikely(err < 0)) { if (net_ratelimit()) netif_err(priv, tx_err, net_dev, "HW csum error: %d\n", @@ -1941,15 +1918,15 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, } /* SGT[0] is used by the linear part */ - sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom); + sgt = (struct qm_sg_entry *)(buff_start + priv->tx_headroom); frag_len = skb_headlen(skb); qm_sg_entry_set_len(&sgt[0], frag_len); sgt[0].bpid = FSL_DPAA_BPID_INV; sgt[0].offset = 0; - addr = dma_map_single(dev, skb->data, + addr = dma_map_single(priv->tx_dma_dev, skb->data, skb_headlen(skb), dma_dir); - if (unlikely(dma_mapping_error(dev, addr))) { - dev_err(dev, "DMA mapping failed"); + if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) { + netdev_err(priv->net_dev, "DMA mapping failed\n"); err = -EINVAL; goto sg0_map_failed; } @@ -1960,10 +1937,10 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, frag = &skb_shinfo(skb)->frags[i]; frag_len = skb_frag_size(frag); WARN_ON(!skb_frag_page(frag)); - addr = skb_frag_dma_map(dev, frag, 0, + addr = skb_frag_dma_map(priv->tx_dma_dev, frag, 0, frag_len, dma_dir); - if (unlikely(dma_mapping_error(dev, addr))) { - dev_err(dev, "DMA mapping failed"); + if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) { + netdev_err(priv->net_dev, "DMA mapping failed\n"); err = -EINVAL; goto sg_map_failed; } @@ -1979,17 +1956,17 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, /* Set the final bit in the last used entry of the SGT */ qm_sg_entry_set_f(&sgt[nr_frags], frag_len); + /* set fd offset to priv->tx_headroom */ qm_fd_set_sg(fd, priv->tx_headroom, skb->len); /* DMA map the SGT page */ - buffer_start = (void *)sgt - priv->tx_headroom; - skbh = (struct sk_buff **)buffer_start; + skbh = (struct sk_buff **)buff_start; *skbh = skb; - addr = dma_map_single(dev, buffer_start, - priv->tx_headroom + DPAA_SGT_SIZE, dma_dir); - if (unlikely(dma_mapping_error(dev, addr))) { - dev_err(dev, "DMA mapping failed"); + addr = dma_map_page(priv->tx_dma_dev, p, 0, + priv->tx_headroom + DPAA_SGT_SIZE, dma_dir); + if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) { + netdev_err(priv->net_dev, "DMA mapping failed\n"); err = -EINVAL; goto sgt_map_failed; } @@ -2003,11 +1980,11 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, sgt_map_failed: sg_map_failed: for (j = 0; j < i; j++) - dma_unmap_page(dev, qm_sg_addr(&sgt[j]), + dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[j]), qm_sg_entry_get_len(&sgt[j]), dma_dir); sg0_map_failed: csum_failed: - skb_free_frag(sgt_buf); + free_pages((unsigned long)buff_start, 0); return err; } @@ -2114,7 +2091,7 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0)) return NETDEV_TX_OK; - dpaa_cleanup_tx_fd(priv, &fd); + dpaa_cleanup_tx_fd(priv, &fd, false); skb_to_fd_failed: enomem: percpu_stats->tx_errors++; @@ -2160,7 +2137,7 @@ static void dpaa_tx_error(struct net_device *net_dev, percpu_priv->stats.tx_errors++; - skb = dpaa_cleanup_tx_fd(priv, fd); + skb = dpaa_cleanup_tx_fd(priv, fd, false); dev_kfree_skb(skb); } @@ -2200,7 +2177,7 @@ static void dpaa_tx_conf(struct net_device *net_dev, percpu_priv->tx_confirm++; - skb = dpaa_cleanup_tx_fd(priv, fd); + skb = dpaa_cleanup_tx_fd(priv, fd, true); consume_skb(skb); } @@ -2304,11 +2281,8 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, return qman_cb_dqrr_consume; } - dpaa_bp = dpaa_bpid2pool(fd->bpid); - if (!dpaa_bp) - return qman_cb_dqrr_consume; - - dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE); + dma_unmap_page(dpaa_bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE, + DMA_FROM_DEVICE); /* prefetch the first 64 bytes of the frame or the SGT start */ vaddr = phys_to_virt(addr); @@ -2430,7 +2404,7 @@ static void egress_ern(struct qman_portal *portal, percpu_priv->stats.tx_fifo_errors++; count_ern(percpu_priv, msg); - skb = dpaa_cleanup_tx_fd(priv, fd); + skb = dpaa_cleanup_tx_fd(priv, fd, false); dev_kfree_skb_any(skb); } @@ -2663,7 +2637,8 @@ static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp, { dma_addr_t addr = bm_buf_addr(bmb); - dma_unmap_single(bp->dev, addr, bp->size, DMA_FROM_DEVICE); + dma_unmap_page(bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE, + DMA_FROM_DEVICE); skb_free_frag(phys_to_virt(addr)); } @@ -2764,21 +2739,46 @@ static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl) static int dpaa_eth_probe(struct platform_device *pdev) { - struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM] = {NULL}; struct net_device *net_dev = NULL; + struct dpaa_bp *dpaa_bp = NULL; struct dpaa_fq *dpaa_fq, *tmp; struct dpaa_priv *priv = NULL; struct fm_port_fqs port_fqs; struct mac_device *mac_dev; - int err = 0, i, channel; + int err = 0, channel; struct device *dev; - /* device used for DMA mapping */ - dev = pdev->dev.parent; - err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40)); - if (err) { - dev_err(dev, "dma_coerce_mask_and_coherent() failed\n"); - return err; + dev = &pdev->dev; + + err = bman_is_probed(); + if (!err) + return -EPROBE_DEFER; + if (err < 0) { + dev_err(dev, "failing probe due to bman probe error\n"); + return -ENODEV; + } + err = qman_is_probed(); + if (!err) + return -EPROBE_DEFER; + if (err < 0) { + dev_err(dev, "failing probe due to qman probe error\n"); + return -ENODEV; + } + err = bman_portals_probed(); + if (!err) + return -EPROBE_DEFER; + if (err < 0) { + dev_err(dev, + "failing probe due to bman portals probe error\n"); + return -ENODEV; + } + err = qman_portals_probed(); + if (!err) + return -EPROBE_DEFER; + if (err < 0) { + dev_err(dev, + "failing probe due to qman portals probe error\n"); + return -ENODEV; } /* Allocate this early, so we can store relevant information in @@ -2801,11 +2801,23 @@ static int dpaa_eth_probe(struct platform_device *pdev) mac_dev = dpaa_mac_dev_get(pdev); if (IS_ERR(mac_dev)) { - dev_err(dev, "dpaa_mac_dev_get() failed\n"); + netdev_err(net_dev, "dpaa_mac_dev_get() failed\n"); err = PTR_ERR(mac_dev); goto free_netdev; } + /* Devices used for DMA mapping */ + priv->rx_dma_dev = fman_port_get_device(mac_dev->port[RX]); + priv->tx_dma_dev = fman_port_get_device(mac_dev->port[TX]); + err = dma_coerce_mask_and_coherent(priv->rx_dma_dev, DMA_BIT_MASK(40)); + if (!err) + err = dma_coerce_mask_and_coherent(priv->tx_dma_dev, + DMA_BIT_MASK(40)); + if (err) { + netdev_err(net_dev, "dma_coerce_mask_and_coherent() failed\n"); + return err; + } + /* If fsl_fm_max_frm is set to a higher value than the all-common 1500, * we choose conservatively and let the user explicitly set a higher * MTU via ifconfig. Otherwise, the user may end up with different MTUs @@ -2822,23 +2834,21 @@ static int dpaa_eth_probe(struct platform_device *pdev) priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */ /* bp init */ - for (i = 0; i < DPAA_BPS_NUM; i++) { - dpaa_bps[i] = dpaa_bp_alloc(dev); - if (IS_ERR(dpaa_bps[i])) { - err = PTR_ERR(dpaa_bps[i]); - goto free_dpaa_bps; - } - /* the raw size of the buffers used for reception */ - dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM); - /* avoid runtime computations by keeping the usable size here */ - dpaa_bps[i]->size = dpaa_bp_size(dpaa_bps[i]->raw_size); - dpaa_bps[i]->dev = dev; - - err = dpaa_bp_alloc_pool(dpaa_bps[i]); - if (err < 0) - goto free_dpaa_bps; - priv->dpaa_bps[i] = dpaa_bps[i]; + dpaa_bp = dpaa_bp_alloc(dev); + if (IS_ERR(dpaa_bp)) { + err = PTR_ERR(dpaa_bp); + goto free_dpaa_bps; } + /* the raw size of the buffers used for reception */ + dpaa_bp->raw_size = DPAA_BP_RAW_SIZE; + /* avoid runtime computations by keeping the usable size here */ + dpaa_bp->size = dpaa_bp_size(dpaa_bp->raw_size); + dpaa_bp->priv = priv; + + err = dpaa_bp_alloc_pool(dpaa_bp); + if (err < 0) + goto free_dpaa_bps; + priv->dpaa_bp = dpaa_bp; INIT_LIST_HEAD(&priv->dpaa_fq_list); @@ -2864,7 +2874,7 @@ static int dpaa_eth_probe(struct platform_device *pdev) /* Walk the CPUs with affine portals * and add this pool channel to each's dequeue mask. */ - dpaa_eth_add_channel(priv->channel); + dpaa_eth_add_channel(priv->channel, &pdev->dev); dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]); @@ -2896,7 +2906,7 @@ static int dpaa_eth_probe(struct platform_device *pdev) priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]); /* All real interfaces need their ports initialized */ - err = dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs, + err = dpaa_eth_init_ports(mac_dev, dpaa_bp, &port_fqs, &priv->buf_layout[0], dev); if (err) goto free_dpaa_fqs; @@ -2955,7 +2965,7 @@ static int dpaa_remove(struct platform_device *pdev) struct device *dev; int err; - dev = pdev->dev.parent; + dev = &pdev->dev; net_dev = dev_get_drvdata(dev); priv = netdev_priv(net_dev); |