diff options
author | Yevgeny Petrilin <yevgenyp@mellanox.co.il> | 2009-06-02 00:27:13 +0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-06-02 13:29:03 +0400 |
commit | 453a608277355735190e05c43f909808e0f73641 (patch) | |
tree | 745af467980a75013642915f458a1a8af3035211 /drivers/net/mlx4/en_rx.c | |
parent | f771bef98004d9d141b085d987a77d06669d4f4f (diff) | |
download | linux-453a608277355735190e05c43f909808e0f73641.tar.xz |
mlx4_en: Giving interface name in debug messages
For each debug message, the message will show interface name in case
that the net device was registered, and PCI bus ID with port number
if we were not registered yet. Messages that are not port/netdev specific
stayed in the old format
Signed-off-by: Yevgeny Petrilin <yevgenyp@mellanox.co.il>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/mlx4/en_rx.c')
-rw-r--r-- | drivers/net/mlx4/en_rx.c | 78 |
1 files changed, 37 insertions, 41 deletions
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c index 6bfab6e5ba1d..5a14899c1e25 100644 --- a/drivers/net/mlx4/en_rx.c +++ b/drivers/net/mlx4/en_rx.c @@ -114,8 +114,8 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv, goto out; page_alloc->offset = priv->frag_info[i].frag_align; - mlx4_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n", - i, page_alloc->page); + en_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n", + i, page_alloc->page); } return 0; @@ -136,8 +136,8 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv, for (i = 0; i < priv->num_frags; i++) { page_alloc = &ring->page_alloc[i]; - mlx4_dbg(DRV, priv, "Freeing allocator:%d count:%d\n", - i, page_count(page_alloc->page)); + en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n", + i, page_count(page_alloc->page)); put_page(page_alloc->page); page_alloc->page = NULL; @@ -214,10 +214,10 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv, skb_frags = ring->rx_info + (index << priv->log_rx_info); for (nr = 0; nr < priv->num_frags; nr++) { - mlx4_dbg(DRV, priv, "Freeing fragment:%d\n", nr); + en_dbg(DRV, priv, "Freeing fragment:%d\n", nr); dma = be64_to_cpu(rx_desc->data[nr].addr); - mlx4_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma); + en_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma); pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size, PCI_DMA_FROMDEVICE); put_page(skb_frags[nr].page); @@ -226,7 +226,6 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv, static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) { - struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_rx_ring *ring; int ring_ind; int buf_ind; @@ -239,14 +238,14 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) if (mlx4_en_prepare_rx_desc(priv, ring, ring->actual_size)) { if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { - mlx4_err(mdev, "Failed to allocate " - "enough rx buffers\n"); + en_err(priv, "Failed to allocate " + "enough rx buffers\n"); return -ENOMEM; } else { new_size = rounddown_pow_of_two(ring->actual_size); - mlx4_warn(mdev, "Only %d buffers allocated " - "reducing ring size to %d", - ring->actual_size, new_size); + en_warn(priv, "Only %d buffers allocated " + "reducing ring size to %d", + ring->actual_size, new_size); goto reduce_rings; } } @@ -282,8 +281,7 @@ static int mlx4_en_fill_rx_buf(struct net_device *dev, ring->size_mask); if (err) { if (netif_msg_rx_err(priv)) - mlx4_warn(priv->mdev, - "Failed preparing rx descriptor\n"); + en_warn(priv, "Failed preparing rx descriptor\n"); priv->port_stats.rx_alloc_failed++; break; } @@ -301,14 +299,14 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, { int index; - mlx4_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n", - ring->cons, ring->prod); + en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n", + ring->cons, ring->prod); /* Unmap and free Rx buffers */ BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size); while (ring->cons != ring->prod) { index = ring->cons & ring->size_mask; - mlx4_dbg(DRV, priv, "Processing descriptor:%d\n", index); + en_dbg(DRV, priv, "Processing descriptor:%d\n", index); mlx4_en_free_rx_desc(priv, ring, index); ++ring->cons; } @@ -373,10 +371,10 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, sizeof(struct skb_frag_struct)); ring->rx_info = vmalloc(tmp); if (!ring->rx_info) { - mlx4_err(mdev, "Failed allocating rx_info ring\n"); + en_err(priv, "Failed allocating rx_info ring\n"); return -ENOMEM; } - mlx4_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n", + en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n", ring->rx_info, tmp); err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, @@ -386,7 +384,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, err = mlx4_en_map_buffer(&ring->wqres.buf); if (err) { - mlx4_err(mdev, "Failed to map RX buffer\n"); + en_err(priv, "Failed to map RX buffer\n"); goto err_hwq; } ring->buf = ring->wqres.buf.direct.buf; @@ -404,7 +402,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, sizeof(struct net_lro_desc), GFP_KERNEL); if (!ring->lro.lro_arr) { - mlx4_err(mdev, "Failed to allocate lro array\n"); + en_err(priv, "Failed to allocate lro array\n"); goto err_map; } ring->lro.get_frag_header = mlx4_en_get_frag_header; @@ -455,7 +453,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) /* Initialize page allocators */ err = mlx4_en_init_allocator(priv, ring); if (err) { - mlx4_err(mdev, "Failed initializing ring allocator\n"); + en_err(priv, "Failed initializing ring allocator\n"); ring_ind--; goto err_allocator; } @@ -486,7 +484,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) err = mlx4_srq_alloc(mdev->dev, mdev->priv_pdn, &ring->wqres.mtt, ring->wqres.db.dma, &ring->srq); if (err){ - mlx4_err(mdev, "Failed to allocate srq\n"); + en_err(priv, "Failed to allocate srq\n"); ring_ind--; goto err_srq; } @@ -601,7 +599,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv, skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN); if (!skb) { - mlx4_dbg(RX_ERR, priv, "Failed allocating skb\n"); + en_dbg(RX_ERR, priv, "Failed allocating skb\n"); return NULL; } skb->dev = priv->dev; @@ -680,7 +678,6 @@ static void mlx4_en_copy_desc(struct mlx4_en_priv *priv, int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) { struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_cqe *cqe; struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring]; struct skb_frag_struct *skb_frags; @@ -717,14 +714,14 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud /* Drop packet on bad receive or bad checksum */ if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_ERROR)) { - mlx4_err(mdev, "CQE completed in error - vendor " + en_err(priv, "CQE completed in error - vendor " "syndrom:%d syndrom:%d\n", ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome, ((struct mlx4_err_cqe *) cqe)->syndrome); goto next; } if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { - mlx4_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n"); + en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n"); goto next; } @@ -874,7 +871,7 @@ static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16 u16 res = MLX4_EN_ALLOC_SIZE % stride; u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align; - mlx4_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d " + en_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d " "res:%d offset:%d\n", stride, align, res, offset); return offset; } @@ -919,10 +916,10 @@ void mlx4_en_calc_rx_buf(struct net_device *dev) priv->rx_skb_size = eff_mtu; priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct)); - mlx4_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d " + en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d " "num_frags:%d):\n", eff_mtu, priv->num_frags); for (i = 0; i < priv->num_frags; i++) { - mlx4_dbg(DRV, priv, " frag:%d - size:%d prefix:%d align:%d " + en_dbg(DRV, priv, " frag:%d - size:%d prefix:%d align:%d " "stride:%d last_offset:%d\n", i, priv->frag_info[i].frag_size, priv->frag_info[i].frag_prefix_size, @@ -942,12 +939,12 @@ void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv, int i; rss_map->size = roundup_pow_of_two(num_entries); - mlx4_dbg(DRV, priv, "Setting default RSS map of %d entires\n", - rss_map->size); + en_dbg(DRV, priv, "Setting default RSS map of %d entires\n", + rss_map->size); for (i = 0; i < rss_map->size; i++) { rss_map->map[i] = i % num_rings; - mlx4_dbg(DRV, priv, "Entry %d ---> ring %d\n", i, rss_map->map[i]); + en_dbg(DRV, priv, "Entry %d ---> ring %d\n", i, rss_map->map[i]); } } @@ -962,13 +959,13 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, context = kmalloc(sizeof *context , GFP_KERNEL); if (!context) { - mlx4_err(mdev, "Failed to allocate qp context\n"); + en_err(priv, "Failed to allocate qp context\n"); return -ENOMEM; } err = mlx4_qp_alloc(mdev->dev, qpn, qp); if (err) { - mlx4_err(mdev, "Failed to allocate qp #%d\n", qpn); + en_err(priv, "Failed to allocate qp #%x\n", qpn); goto out; } qp->event = mlx4_en_sqp_event; @@ -1000,12 +997,11 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) int err = 0; int good_qps = 0; - mlx4_dbg(DRV, priv, "Configuring rss steering for port %u\n", priv->port); + en_dbg(DRV, priv, "Configuring rss steering\n"); err = mlx4_qp_reserve_range(mdev->dev, rss_map->size, rss_map->size, &rss_map->base_qpn); if (err) { - mlx4_err(mdev, "Failed reserving %d qps for port %u\n", - rss_map->size, priv->port); + en_err(priv, "Failed reserving %d qps\n", rss_map->size); return err; } @@ -1025,13 +1021,13 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) /* Configure RSS indirection qp */ err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn); if (err) { - mlx4_err(mdev, "Failed to reserve range for RSS " - "indirection qp\n"); + en_err(priv, "Failed to reserve range for RSS " + "indirection qp\n"); goto rss_err; } err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp); if (err) { - mlx4_err(mdev, "Failed to allocate RSS indirection QP\n"); + en_err(priv, "Failed to allocate RSS indirection QP\n"); goto reserve_err; } rss_map->indir_qp.event = mlx4_en_sqp_event; |