diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2011-02-01 08:16:22 +0300 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2011-02-01 08:16:22 +0300 |
commit | a1f3d4bba8ea395a39d34ade6017afee8be16031 (patch) | |
tree | 874d843d35622f17aa6c3e048e42cf0d679bcb75 /drivers/infiniband/hw | |
parent | 723d928417bffff6467da155d8ebbbe016464012 (diff) | |
parent | ebf53826e105f488f4f628703a108e98940d1dc5 (diff) | |
download | linux-a1f3d4bba8ea395a39d34ade6017afee8be16031.tar.xz |
Merge commit 'v2.6.38-rc3' into next
Diffstat (limited to 'drivers/infiniband/hw')
50 files changed, 985 insertions, 376 deletions
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c index 85cfae4cad71..8c81992fa6db 100644 --- a/drivers/infiniband/hw/amso1100/c2_rnic.c +++ b/drivers/infiniband/hw/amso1100/c2_rnic.c @@ -459,13 +459,12 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev) IB_DEVICE_MEM_WINDOW); /* Allocate the qptr_array */ - c2dev->qptr_array = vmalloc(C2_MAX_CQS * sizeof(void *)); + c2dev->qptr_array = vzalloc(C2_MAX_CQS * sizeof(void *)); if (!c2dev->qptr_array) { return -ENOMEM; } - /* Inialize the qptr_array */ - memset(c2dev->qptr_array, 0, C2_MAX_CQS * sizeof(void *)); + /* Initialize the qptr_array */ c2dev->qptr_array[0] = (void *) &c2dev->req_vq; c2dev->qptr_array[1] = (void *) &c2dev->rep_vq; c2dev->qptr_array[2] = (void *) &c2dev->aeq; diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c index 09dda0b8740e..c3f5aca4ef00 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.c +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c @@ -189,6 +189,7 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel) return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); } +#ifdef notyet int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) { struct rdma_cq_setup setup; @@ -200,6 +201,7 @@ int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) setup.ovfl_mode = 1; return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); } +#endif static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx) { diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h index 4bb997aa39d0..83d2e19d31ae 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_wr.h +++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h @@ -689,7 +689,7 @@ struct t3_swrq { * A T3 WQ implements both the SQ and RQ. */ struct t3_wq { - union t3_wr *queue; /* DMA accessable memory */ + union t3_wr *queue; /* DMA accessible memory */ dma_addr_t dma_addr; /* DMA address for HW */ DEFINE_DMA_UNMAP_ADDR(mapping); /* unmap kruft */ u32 error; /* 1 once we go to ERROR */ diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h index a237d49bdcc9..c5406da3f4cd 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.h +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h @@ -335,8 +335,6 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg); int iwch_post_zb_read(struct iwch_qp *qhp); int iwch_register_device(struct iwch_dev *dev); void iwch_unregister_device(struct iwch_dev *dev); -int iwch_quiesce_qps(struct iwch_cq *chp); -int iwch_resume_qps(struct iwch_cq *chp); void stop_read_rep_timer(struct iwch_qp *qhp); int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php, struct iwch_mr *mhp, int shift); diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index 0993137181d7..1b4cd09f74dc 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c @@ -1149,59 +1149,3 @@ out: PDBG("%s exit state %d\n", __func__, qhp->attr.state); return ret; } - -static int quiesce_qp(struct iwch_qp *qhp) -{ - spin_lock_irq(&qhp->lock); - iwch_quiesce_tid(qhp->ep); - qhp->flags |= QP_QUIESCED; - spin_unlock_irq(&qhp->lock); - return 0; -} - -static int resume_qp(struct iwch_qp *qhp) -{ - spin_lock_irq(&qhp->lock); - iwch_resume_tid(qhp->ep); - qhp->flags &= ~QP_QUIESCED; - spin_unlock_irq(&qhp->lock); - return 0; -} - -int iwch_quiesce_qps(struct iwch_cq *chp) -{ - int i; - struct iwch_qp *qhp; - - for (i=0; i < T3_MAX_NUM_QP; i++) { - qhp = get_qhp(chp->rhp, i); - if (!qhp) - continue; - if ((qhp->attr.rcq == chp->cq.cqid) && !qp_quiesced(qhp)) { - quiesce_qp(qhp); - continue; - } - if ((qhp->attr.scq == chp->cq.cqid) && !qp_quiesced(qhp)) - quiesce_qp(qhp); - } - return 0; -} - -int iwch_resume_qps(struct iwch_cq *chp) -{ - int i; - struct iwch_qp *qhp; - - for (i=0; i < T3_MAX_NUM_QP; i++) { - qhp = get_qhp(chp->rhp, i); - if (!qhp) - continue; - if ((qhp->attr.rcq == chp->cq.cqid) && qp_quiesced(qhp)) { - resume_qp(qhp); - continue; - } - if ((qhp->attr.scq == chp->cq.cqid) && qp_quiesced(qhp)) - resume_qp(qhp); - } - return 0; -} diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 16032cdb4337..2fe19ec9ba60 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -46,7 +46,6 @@ #include <linux/timer.h> #include <linux/io.h> #include <linux/kfifo.h> -#include <linux/mutex.h> #include <asm/byteorder.h> @@ -760,7 +759,6 @@ int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count); int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count); int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid); u16 c4iw_rqes_posted(struct c4iw_qp *qhp); -int c4iw_post_zb_read(struct c4iw_qp *qhp); int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe); u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx); void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid, diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 057cb2505ea1..20800900ef3f 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -892,36 +892,6 @@ static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type, } } -int c4iw_post_zb_read(struct c4iw_qp *qhp) -{ - union t4_wr *wqe; - struct sk_buff *skb; - u8 len16; - - PDBG("%s enter\n", __func__); - skb = alloc_skb(40, GFP_KERNEL); - if (!skb) { - printk(KERN_ERR "%s cannot send zb_read!!\n", __func__); - return -ENOMEM; - } - set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); - - wqe = (union t4_wr *)skb_put(skb, sizeof wqe->read); - memset(wqe, 0, sizeof wqe->read); - wqe->read.r2 = cpu_to_be64(0); - wqe->read.stag_sink = cpu_to_be32(1); - wqe->read.to_sink_hi = cpu_to_be32(0); - wqe->read.to_sink_lo = cpu_to_be32(1); - wqe->read.stag_src = cpu_to_be32(1); - wqe->read.plen = cpu_to_be32(0); - wqe->read.to_src_hi = cpu_to_be32(0); - wqe->read.to_src_lo = cpu_to_be32(1); - len16 = DIV_ROUND_UP(sizeof wqe->read, 16); - init_wr_hdr(wqe, 0, FW_RI_RDMA_READ_WR, FW_RI_COMPLETION_FLAG, len16); - - return c4iw_ofld_send(&qhp->rhp->rdev, skb); -} - static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, gfp_t gfp) { @@ -1029,7 +999,6 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, wqe->cookie = (unsigned long) &ep->com.wr_wait; wqe->u.fini.type = FW_RI_TYPE_FINI; - c4iw_init_wr_wait(&ep->com.wr_wait); ret = c4iw_ofld_send(&rhp->rdev, skb); if (ret) goto out; @@ -1125,7 +1094,6 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) if (qhp->attr.mpa_attr.initiator) build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init); - c4iw_init_wr_wait(&qhp->ep->com.wr_wait); ret = c4iw_ofld_send(&rhp->rdev, skb); if (ret) goto out; diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/infiniband/hw/ehca/ipz_pt_fn.c index 1596e3085344..1898d6e7cce5 100644 --- a/drivers/infiniband/hw/ehca/ipz_pt_fn.c +++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.c @@ -222,15 +222,14 @@ int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue, queue->small_page = NULL; /* allocate queue page pointers */ - queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL); + queue->queue_pages = kzalloc(nr_of_pages * sizeof(void *), GFP_KERNEL); if (!queue->queue_pages) { - queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *)); + queue->queue_pages = vzalloc(nr_of_pages * sizeof(void *)); if (!queue->queue_pages) { ehca_gen_err("Couldn't allocate queue page list"); return 0; } } - memset(queue->queue_pages, 0, nr_of_pages * sizeof(void *)); /* allocate actual queue pages */ if (is_small) { diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index 765f0fc1da76..47db4bf34628 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c @@ -199,12 +199,11 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev) goto bail; } - dd = vmalloc(sizeof(*dd)); + dd = vzalloc(sizeof(*dd)); if (!dd) { dd = ERR_PTR(-ENOMEM); goto bail; } - memset(dd, 0, sizeof(*dd)); dd->ipath_unit = -1; spin_lock_irqsave(&ipath_devs_lock, flags); @@ -530,9 +529,8 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, for (j = 0; j < 6; j++) { if (!pdev->resource[j].start) continue; - ipath_cdbg(VERBOSE, "BAR %d start %llx, end %llx, len %llx\n", - j, (unsigned long long)pdev->resource[j].start, - (unsigned long long)pdev->resource[j].end, + ipath_cdbg(VERBOSE, "BAR %d %pR, len %llx\n", + j, &pdev->resource[j], (unsigned long long)pci_resource_len(pdev, j)); } @@ -757,7 +755,7 @@ static void __devexit ipath_remove_one(struct pci_dev *pdev) */ ipath_shutdown_device(dd); - flush_scheduled_work(); + flush_workqueue(ib_wq); if (dd->verbs_dev) ipath_unregister_ib_device(dd->verbs_dev); diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c index 6078992da3f0..6d4b29c4cd89 100644 --- a/drivers/infiniband/hw/ipath/ipath_file_ops.c +++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c @@ -40,7 +40,6 @@ #include <linux/highmem.h> #include <linux/io.h> #include <linux/jiffies.h> -#include <linux/smp_lock.h> #include <asm/pgtable.h> #include "ipath_kernel.h" @@ -1531,7 +1530,7 @@ static int init_subports(struct ipath_devdata *dd, } num_subports = uinfo->spu_subport_cnt; - pd->subport_uregbase = vmalloc(PAGE_SIZE * num_subports); + pd->subport_uregbase = vzalloc(PAGE_SIZE * num_subports); if (!pd->subport_uregbase) { ret = -ENOMEM; goto bail; @@ -1539,13 +1538,13 @@ static int init_subports(struct ipath_devdata *dd, /* Note: pd->port_rcvhdrq_size isn't initialized yet. */ size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize * sizeof(u32), PAGE_SIZE) * num_subports; - pd->subport_rcvhdr_base = vmalloc(size); + pd->subport_rcvhdr_base = vzalloc(size); if (!pd->subport_rcvhdr_base) { ret = -ENOMEM; goto bail_ureg; } - pd->subport_rcvegrbuf = vmalloc(pd->port_rcvegrbuf_chunks * + pd->subport_rcvegrbuf = vzalloc(pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size * num_subports); if (!pd->subport_rcvegrbuf) { @@ -1557,11 +1556,6 @@ static int init_subports(struct ipath_devdata *dd, pd->port_subport_id = uinfo->spu_subport_id; pd->active_slaves = 1; set_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag); - memset(pd->subport_uregbase, 0, PAGE_SIZE * num_subports); - memset(pd->subport_rcvhdr_base, 0, size); - memset(pd->subport_rcvegrbuf, 0, pd->port_rcvegrbuf_chunks * - pd->port_rcvegrbuf_size * - num_subports); goto bail; bail_rhdr: diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c index 8c8afc716b98..31ae1b108aea 100644 --- a/drivers/infiniband/hw/ipath/ipath_fs.c +++ b/drivers/infiniband/hw/ipath/ipath_fs.c @@ -277,18 +277,14 @@ static int remove_file(struct dentry *parent, char *name) goto bail; } - spin_lock(&dcache_lock); spin_lock(&tmp->d_lock); if (!(d_unhashed(tmp) && tmp->d_inode)) { - dget_locked(tmp); + dget_dlock(tmp); __d_drop(tmp); spin_unlock(&tmp->d_lock); - spin_unlock(&dcache_lock); simple_unlink(parent->d_inode, tmp); - } else { + } else spin_unlock(&tmp->d_lock); - spin_unlock(&dcache_lock); - } ret = 0; bail: diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c index 776938299e4c..fef0f4201257 100644 --- a/drivers/infiniband/hw/ipath/ipath_init_chip.c +++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c @@ -442,7 +442,7 @@ static void init_shadow_tids(struct ipath_devdata *dd) struct page **pages; dma_addr_t *addrs; - pages = vmalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt * + pages = vzalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt * sizeof(struct page *)); if (!pages) { ipath_dev_err(dd, "failed to allocate shadow page * " @@ -461,9 +461,6 @@ static void init_shadow_tids(struct ipath_devdata *dd) return; } - memset(pages, 0, dd->ipath_cfgports * dd->ipath_rcvtidcnt * - sizeof(struct page *)); - dd->ipath_pageshadow = pages; dd->ipath_physshadow = addrs; } diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c index 5e86d73eba2a..bab9f74c0665 100644 --- a/drivers/infiniband/hw/ipath/ipath_user_pages.c +++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c @@ -220,7 +220,7 @@ void ipath_release_user_pages_on_close(struct page **p, size_t num_pages) work->mm = mm; work->num_pages = num_pages; - schedule_work(&work->work); + queue_work(ib_wq, &work->work); return; bail_mm: diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 5a219a2fdf16..e8df155bc3b0 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -397,10 +397,14 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) cq->resize_buf = NULL; cq->resize_umem = NULL; } else { + struct mlx4_ib_cq_buf tmp_buf; + int tmp_cqe = 0; + spin_lock_irq(&cq->lock); if (cq->resize_buf) { mlx4_ib_cq_resize_copy_cqes(cq); - mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); + tmp_buf = cq->buf; + tmp_cqe = cq->ibcq.cqe; cq->buf = cq->resize_buf->buf; cq->ibcq.cqe = cq->resize_buf->cqe; @@ -408,6 +412,9 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) cq->resize_buf = NULL; } spin_unlock_irq(&cq->lock); + + if (tmp_cqe) + mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe); } goto out; diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index c9a8dd63b9e2..57ffa50f509e 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -211,6 +211,8 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma if (agent) { send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, GFP_ATOMIC); + if (IS_ERR(send_buf)) + return; /* * We rely here on the fact that MLX QPs don't use the * address handle after the send is posted (this is diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index bf3e20cd0298..c7a6213c6996 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -219,7 +219,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, struct net_device *ndev; enum ib_mtu tmp; - props->active_width = IB_WIDTH_4X; + props->active_width = IB_WIDTH_1X; props->active_speed = 4; props->port_cap_flags = IB_PORT_CM_SUP; props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port]; @@ -242,7 +242,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, tmp = iboe_get_mtu(ndev->mtu); props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256; - props->state = netif_running(ndev) && netif_oper_up(ndev) ? + props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ? IB_PORT_ACTIVE : IB_PORT_DOWN; props->phys_state = state_to_phys_state(props->state); @@ -623,8 +623,9 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); struct mlx4_ib_qp *mqp = to_mqp(ibqp); - err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, !!(mqp->flags & - MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK)); + err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, + !!(mqp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), + MLX4_PROTOCOL_IB); if (err) return err; @@ -635,7 +636,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) return 0; err_add: - mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw); + mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, MLX4_PROTOCOL_IB); return err; } @@ -665,7 +666,7 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) struct mlx4_ib_gid_entry *ge; err = mlx4_multicast_detach(mdev->dev, - &mqp->mqp, gid->raw); + &mqp->mqp, gid->raw, MLX4_PROTOCOL_IB); if (err) return err; @@ -848,8 +849,8 @@ static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear) goto out; } - read_lock(&dev_base_lock); - for_each_netdev(&init_net, tmp) { + rcu_read_lock(); + for_each_netdev_rcu(&init_net, tmp) { if (ndev && (tmp == ndev || rdma_vlan_dev_real_dev(tmp) == ndev)) { gid.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); vid = rdma_vlan_dev_vlan_id(tmp); @@ -884,7 +885,7 @@ static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear) } } } - read_unlock(&dev_base_lock); + rcu_read_unlock(); for (i = 0; i < 128; ++i) if (!hits[i]) { @@ -1005,7 +1006,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) if (mlx4_uar_alloc(dev, &ibdev->priv_uar)) goto err_pd; - ibdev->uar_map = ioremap(ibdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); + ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT, + PAGE_SIZE); if (!ibdev->uar_map) goto err_uar; MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 9a7794ac34c1..2001f20a4361 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -1816,6 +1816,11 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ? MLX4_WQE_CTRL_FENCE : 0) | size; + if (be16_to_cpu(vlan) < 0x1000) { + ctrl->ins_vlan = 1 << 6; + ctrl->vlan_tag = vlan; + } + /* * Make sure descriptor is fully written before * setting ownership bit (because HW can start @@ -1831,11 +1836,6 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] | (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh; - if (be16_to_cpu(vlan) < 0x1000) { - ctrl->ins_vlan = 1 << 6; - ctrl->vlan_tag = vlan; - } - stamp = ind + qp->sq_spare_wqes; ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift); diff --git a/drivers/infiniband/hw/mthca/Kconfig b/drivers/infiniband/hw/mthca/Kconfig index 03efc074967e..da314c3fec23 100644 --- a/drivers/infiniband/hw/mthca/Kconfig +++ b/drivers/infiniband/hw/mthca/Kconfig @@ -7,7 +7,7 @@ config INFINIBAND_MTHCA ("Tavor") and the MT25208 PCI Express HCA ("Arbel"). config INFINIBAND_MTHCA_DEBUG - bool "Verbose debugging output" if EMBEDDED + bool "Verbose debugging output" if EXPERT depends on INFINIBAND_MTHCA default y ---help--- diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c index 0aa0110e4b6c..e4a08c2819e4 100644 --- a/drivers/infiniband/hw/mthca/mthca_catas.c +++ b/drivers/infiniband/hw/mthca/mthca_catas.c @@ -146,7 +146,7 @@ static void poll_catas(unsigned long dev_ptr) void mthca_start_catas_poll(struct mthca_dev *dev) { - unsigned long addr; + phys_addr_t addr; init_timer(&dev->catas_err.timer); dev->catas_err.map = NULL; @@ -158,7 +158,8 @@ void mthca_start_catas_poll(struct mthca_dev *dev) dev->catas_err.map = ioremap(addr, dev->catas_err.size * 4); if (!dev->catas_err.map) { mthca_warn(dev, "couldn't map catastrophic error region " - "at 0x%lx/0x%x\n", addr, dev->catas_err.size * 4); + "at 0x%llx/0x%x\n", (unsigned long long) addr, + dev->catas_err.size * 4); return; } diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index f4ceecd9684b..7bfa2a164955 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c @@ -713,7 +713,7 @@ int mthca_RUN_FW(struct mthca_dev *dev, u8 *status) static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base) { - unsigned long addr; + phys_addr_t addr; u16 max_off = 0; int i; diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c index 8e8c728aff88..76785c653c13 100644 --- a/drivers/infiniband/hw/mthca/mthca_eq.c +++ b/drivers/infiniband/hw/mthca/mthca_eq.c @@ -653,7 +653,7 @@ static int mthca_map_reg(struct mthca_dev *dev, unsigned long offset, unsigned long size, void __iomem **map) { - unsigned long base = pci_resource_start(dev->pdev, 0); + phys_addr_t base = pci_resource_start(dev->pdev, 0); *map = ioremap(base + offset, size); if (!*map) diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c index 5648659ff0b0..03a59534f59e 100644 --- a/drivers/infiniband/hw/mthca/mthca_mad.c +++ b/drivers/infiniband/hw/mthca/mthca_mad.c @@ -171,6 +171,8 @@ static void forward_trap(struct mthca_dev *dev, if (agent) { send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, GFP_ATOMIC); + if (IS_ERR(send_buf)) + return; /* * We rely here on the fact that MLX QPs don't use the * address handle after the send is posted (this is diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index 5eee6665919a..8a40cd539ab1 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c @@ -790,7 +790,7 @@ static int mthca_setup_hca(struct mthca_dev *dev) goto err_uar_table_free; } - dev->kar = ioremap(dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); + dev->kar = ioremap((phys_addr_t) dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); if (!dev->kar) { mthca_err(dev, "Couldn't map kernel access region, " "aborting.\n"); diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c index 065b20899876..44045c8846db 100644 --- a/drivers/infiniband/hw/mthca/mthca_mr.c +++ b/drivers/infiniband/hw/mthca/mthca_mr.c @@ -853,7 +853,7 @@ void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr) int mthca_init_mr_table(struct mthca_dev *dev) { - unsigned long addr; + phys_addr_t addr; int mpts, mtts, err, i; err = mthca_alloc_init(&dev->mr_table.mpt_alloc, diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index 0c9f0aa5d4ea..3b4ec3238ceb 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c @@ -144,6 +144,7 @@ static int nes_inetaddr_event(struct notifier_block *notifier, struct nes_device *nesdev; struct net_device *netdev; struct nes_vnic *nesvnic; + unsigned int is_bonded; nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address %pI4, netmask %pI4.\n", &ifa->ifa_address, &ifa->ifa_mask); @@ -152,7 +153,8 @@ static int nes_inetaddr_event(struct notifier_block *notifier, nesdev, nesdev->netdev[0]->name); netdev = nesdev->netdev[0]; nesvnic = netdev_priv(netdev); - if (netdev == event_netdev) { + is_bonded = (netdev->master == event_netdev); + if ((netdev == event_netdev) || is_bonded) { if (nesvnic->rdma_enabled == 0) { nes_debug(NES_DBG_NETDEV, "Returning without processing event for %s since" " RDMA is not enabled.\n", @@ -169,7 +171,10 @@ static int nes_inetaddr_event(struct notifier_block *notifier, nes_manage_arp_cache(netdev, netdev->dev_addr, ntohl(nesvnic->local_ipaddr), NES_ARP_DELETE); nesvnic->local_ipaddr = 0; - return NOTIFY_OK; + if (is_bonded) + continue; + else + return NOTIFY_OK; break; case NETDEV_UP: nes_debug(NES_DBG_NETDEV, "event:UP\n"); @@ -178,15 +183,24 @@ static int nes_inetaddr_event(struct notifier_block *notifier, nes_debug(NES_DBG_NETDEV, "Interface already has local_ipaddr\n"); return NOTIFY_OK; } + /* fall through */ + case NETDEV_CHANGEADDR: /* Add the address to the IP table */ - nesvnic->local_ipaddr = ifa->ifa_address; + if (netdev->master) + nesvnic->local_ipaddr = + ((struct in_device *)netdev->master->ip_ptr)->ifa_list->ifa_address; + else + nesvnic->local_ipaddr = ifa->ifa_address; nes_write_indexed(nesdev, NES_IDX_DST_IP_ADDR+(0x10*PCI_FUNC(nesdev->pcidev->devfn)), - ntohl(ifa->ifa_address)); + ntohl(nesvnic->local_ipaddr)); nes_manage_arp_cache(netdev, netdev->dev_addr, ntohl(nesvnic->local_ipaddr), NES_ARP_ADD); - return NOTIFY_OK; + if (is_bonded) + continue; + else + return NOTIFY_OK; break; default: break; @@ -660,6 +674,8 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i } nes_notifiers_registered++; + INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status); + /* Initialize network devices */ if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL) goto bail7; @@ -742,6 +758,7 @@ static void __devexit nes_remove(struct pci_dev *pcidev) struct nes_device *nesdev = pci_get_drvdata(pcidev); struct net_device *netdev; int netdev_index = 0; + unsigned long flags; if (nesdev->netdev_count) { netdev = nesdev->netdev[netdev_index]; @@ -768,6 +785,14 @@ static void __devexit nes_remove(struct pci_dev *pcidev) free_irq(pcidev->irq, nesdev); tasklet_kill(&nesdev->dpc_tasklet); + spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags); + if (nesdev->link_recheck) { + spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags); + cancel_delayed_work_sync(&nesdev->work); + } else { + spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags); + } + /* Deallocate the Adapter Structure */ nes_destroy_adapter(nesdev->nesadapter); diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h index b3d145e82b4c..6fe79876009e 100644 --- a/drivers/infiniband/hw/nes/nes.h +++ b/drivers/infiniband/hw/nes/nes.h @@ -268,6 +268,9 @@ struct nes_device { u8 napi_isr_ran; u8 disable_rx_flow_control; u8 disable_tx_flow_control; + + struct delayed_work work; + u8 link_recheck; }; @@ -507,6 +510,7 @@ void nes_nic_ce_handler(struct nes_device *, struct nes_hw_nic_cq *); void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *); int nes_destroy_cqp(struct nes_device *); int nes_nic_cm_xmit(struct sk_buff *, struct net_device *); +void nes_recheck_link_status(struct work_struct *work); /* nes_nic.c */ struct net_device *nes_netdev_init(struct nes_device *, void __iomem *); diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 25ad0f9944c0..009ec814d517 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@ -1107,6 +1107,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi struct flowi fl; struct neighbour *neigh; int rc = arpindex; + struct net_device *netdev; struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter; memset(&fl, 0, sizeof fl); @@ -1117,7 +1118,12 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi return rc; } - neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, nesvnic->netdev); + if (nesvnic->netdev->master) + netdev = nesvnic->netdev->master; + else + netdev = nesvnic->netdev; + + neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, netdev); if (neigh) { if (neigh->nud_state & NUD_VALID) { nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X" diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 1980a461c499..8b606fd64022 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c @@ -2608,6 +2608,13 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) netif_start_queue(nesvnic->netdev); nesvnic->linkup = 1; netif_carrier_on(nesvnic->netdev); + + spin_lock(&nesvnic->port_ibevent_lock); + if (nesdev->iw_status == 0) { + nesdev->iw_status = 1; + nes_port_ibevent(nesvnic); + } + spin_unlock(&nesvnic->port_ibevent_lock); } } } else { @@ -2633,9 +2640,23 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) netif_stop_queue(nesvnic->netdev); nesvnic->linkup = 0; netif_carrier_off(nesvnic->netdev); + + spin_lock(&nesvnic->port_ibevent_lock); + if (nesdev->iw_status == 1) { + nesdev->iw_status = 0; + nes_port_ibevent(nesvnic); + } + spin_unlock(&nesvnic->port_ibevent_lock); } } } + if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_SFP_D) { + if (nesdev->link_recheck) + cancel_delayed_work(&nesdev->work); + nesdev->link_recheck = 1; + schedule_delayed_work(&nesdev->work, + NES_LINK_RECHECK_DELAY); + } } spin_unlock_irqrestore(&nesadapter->phy_lock, flags); @@ -2643,6 +2664,80 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_IDLE; } +void nes_recheck_link_status(struct work_struct *work) +{ + unsigned long flags; + struct nes_device *nesdev = container_of(work, struct nes_device, work.work); + struct nes_adapter *nesadapter = nesdev->nesadapter; + struct nes_vnic *nesvnic; + u32 mac_index = nesdev->mac_index; + u16 phy_data; + u16 temp_phy_data; + + spin_lock_irqsave(&nesadapter->phy_lock, flags); + + /* check link status */ + nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9003); + temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); + + nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 3, 0x0021); + nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); + nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 3, 0x0021); + phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); + + phy_data = (!temp_phy_data && (phy_data == 0x8000)) ? 0x4 : 0x0; + + nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n", + __func__, phy_data, + nesadapter->mac_link_down[mac_index] ? "DOWN" : "UP"); + + if (phy_data & 0x0004) { + nesadapter->mac_link_down[mac_index] = 0; + list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) { + if (nesvnic->linkup == 0) { + printk(PFX "The Link is now up for port %s, netdev %p.\n", + nesvnic->netdev->name, nesvnic->netdev); + if (netif_queue_stopped(nesvnic->netdev)) + netif_start_queue(nesvnic->netdev); + nesvnic->linkup = 1; + netif_carrier_on(nesvnic->netdev); + + spin_lock(&nesvnic->port_ibevent_lock); + if (nesdev->iw_status == 0) { + nesdev->iw_status = 1; + nes_port_ibevent(nesvnic); + } + spin_unlock(&nesvnic->port_ibevent_lock); + } + } + + } else { + nesadapter->mac_link_down[mac_index] = 1; + list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) { + if (nesvnic->linkup == 1) { + printk(PFX "The Link is now down for port %s, netdev %p.\n", + nesvnic->netdev->name, nesvnic->netdev); + if (!(netif_queue_stopped(nesvnic->netdev))) + netif_stop_queue(nesvnic->netdev); + nesvnic->linkup = 0; + netif_carrier_off(nesvnic->netdev); + + spin_lock(&nesvnic->port_ibevent_lock); + if (nesdev->iw_status == 1) { + nesdev->iw_status = 0; + nes_port_ibevent(nesvnic); + } + spin_unlock(&nesvnic->port_ibevent_lock); + } + } + } + if (nesdev->link_recheck++ < NES_LINK_RECHECK_MAX) + schedule_delayed_work(&nesdev->work, NES_LINK_RECHECK_DELAY); + else + nesdev->link_recheck = 0; + + spin_unlock_irqrestore(&nesadapter->phy_lock, flags); +} static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq) diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h index 1204c3432b63..d2abe07133a5 100644 --- a/drivers/infiniband/hw/nes/nes_hw.h +++ b/drivers/infiniband/hw/nes/nes_hw.h @@ -1193,6 +1193,8 @@ struct nes_listener { struct nes_ib_device; +#define NES_EVENT_DELAY msecs_to_jiffies(100) + struct nes_vnic { struct nes_ib_device *nesibdev; u64 sq_full; @@ -1247,6 +1249,10 @@ struct nes_vnic { u32 lro_max_aggr; struct net_lro_mgr lro_mgr; struct net_lro_desc lro_desc[NES_MAX_LRO_DESCRIPTORS]; + struct timer_list event_timer; + enum ib_event_type delayed_event; + enum ib_event_type last_dispatched_event; + spinlock_t port_ibevent_lock; }; struct nes_ib_device { @@ -1348,6 +1354,10 @@ struct nes_terminate_hdr { #define BAD_FRAME_OFFSET 64 #define CQE_MAJOR_DRV 0x8000 +/* Used for link status recheck after interrupt processing */ +#define NES_LINK_RECHECK_DELAY msecs_to_jiffies(50) +#define NES_LINK_RECHECK_MAX 60 + #define nes_vlan_rx vlan_hwaccel_receive_skb #define nes_netif_rx netif_receive_skb diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 3892e2c0e95a..2c9c1933bbe3 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c @@ -144,6 +144,7 @@ static int nes_netdev_open(struct net_device *netdev) u32 nic_active_bit; u32 nic_active; struct list_head *list_pos, *list_temp; + unsigned long flags; assert(nesdev != NULL); @@ -233,18 +234,36 @@ static int nes_netdev_open(struct net_device *netdev) first_nesvnic = nesvnic; } - if (nesvnic->of_device_registered) { - nesdev->iw_status = 1; - nesdev->nesadapter->send_term_ok = 1; - nes_port_ibevent(nesvnic); - } - if (first_nesvnic->linkup) { /* Enable network packets */ nesvnic->linkup = 1; netif_start_queue(netdev); netif_carrier_on(netdev); } + + spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags); + if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_SFP_D) { + if (nesdev->link_recheck) + cancel_delayed_work(&nesdev->work); + nesdev->link_recheck = 1; + schedule_delayed_work(&nesdev->work, NES_LINK_RECHECK_DELAY); + } + spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags); + + spin_lock_irqsave(&nesvnic->port_ibevent_lock, flags); + if (nesvnic->of_device_registered) { + nesdev->nesadapter->send_term_ok = 1; + if (nesvnic->linkup == 1) { + if (nesdev->iw_status == 0) { + nesdev->iw_status = 1; + nes_port_ibevent(nesvnic); + } + } else { + nesdev->iw_status = 0; + } + } + spin_unlock_irqrestore(&nesvnic->port_ibevent_lock, flags); + napi_enable(&nesvnic->napi); nesvnic->netdev_open = 1; @@ -263,6 +282,7 @@ static int nes_netdev_stop(struct net_device *netdev) u32 nic_active; struct nes_vnic *first_nesvnic = NULL; struct list_head *list_pos, *list_temp; + unsigned long flags; nes_debug(NES_DBG_SHUTDOWN, "nesvnic=%p, nesdev=%p, netdev=%p %s\n", nesvnic, nesdev, netdev, netdev->name); @@ -315,12 +335,17 @@ static int nes_netdev_stop(struct net_device *netdev) nic_active &= nic_active_mask; nes_write_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON, nic_active); - + spin_lock_irqsave(&nesvnic->port_ibevent_lock, flags); if (nesvnic->of_device_registered) { nesdev->nesadapter->send_term_ok = 0; nesdev->iw_status = 0; - nes_port_ibevent(nesvnic); + if (nesvnic->linkup == 1) + nes_port_ibevent(nesvnic); } + del_timer_sync(&nesvnic->event_timer); + nesvnic->event_timer.function = NULL; + spin_unlock_irqrestore(&nesvnic->port_ibevent_lock, flags); + nes_destroy_nic_qp(nesvnic); nesvnic->netdev_open = 0; @@ -908,8 +933,8 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev) nesvnic->nic_index && mc_index < max_pft_entries_avaiable) { nes_debug(NES_DBG_NIC_RX, - "mc_index=%d skipping nic_index=%d,\ - used for=%d \n", mc_index, + "mc_index=%d skipping nic_index=%d, " + "used for=%d \n", mc_index, nesvnic->nic_index, nesadapter->pft_mcast_map[mc_index]); mc_index++; @@ -1750,7 +1775,10 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev, nesvnic->rdma_enabled = 0; } nesvnic->nic_cq.cq_number = nesvnic->nic.qp_id; + init_timer(&nesvnic->event_timer); + nesvnic->event_timer.function = NULL; spin_lock_init(&nesvnic->tx_lock); + spin_lock_init(&nesvnic->port_ibevent_lock); nesdev->netdev[nesdev->netdev_count] = netdev; nes_debug(NES_DBG_INIT, "Adding nesvnic (%p) to the adapters nesvnic_list for MAC%d.\n", @@ -1763,8 +1791,11 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev, (((PCI_FUNC(nesdev->pcidev->devfn) == 1) && (nesdev->mac_index == 2)) || ((PCI_FUNC(nesdev->pcidev->devfn) == 2) && (nesdev->mac_index == 1)))))) { u32 u32temp; - u32 link_mask; - u32 link_val; + u32 link_mask = 0; + u32 link_val = 0; + u16 temp_phy_data; + u16 phy_data = 0; + unsigned long flags; u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + (0x200 * (nesdev->mac_index & 1))); @@ -1786,6 +1817,23 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev, link_val = 0x02020000; } break; + case NES_PHY_TYPE_SFP_D: + spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags); + nes_read_10G_phy_reg(nesdev, + nesdev->nesadapter->phy_index[nesdev->mac_index], + 1, 0x9003); + temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); + nes_read_10G_phy_reg(nesdev, + nesdev->nesadapter->phy_index[nesdev->mac_index], + 3, 0x0021); + nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); + nes_read_10G_phy_reg(nesdev, + nesdev->nesadapter->phy_index[nesdev->mac_index], + 3, 0x0021); + phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); + spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags); + phy_data = (!temp_phy_data && (phy_data == 0x8000)) ? 0x4 : 0x0; + break; default: link_mask = 0x0f1f0000; link_val = 0x0f0f0000; @@ -1795,8 +1843,14 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev, u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + (0x200 * (nesdev->mac_index & 1))); - if ((u32temp & link_mask) == link_val) - nesvnic->linkup = 1; + + if (phy_type == NES_PHY_TYPE_SFP_D) { + if (phy_data & 0x0004) + nesvnic->linkup = 1; + } else { + if ((u32temp & link_mask) == link_val) + nesvnic->linkup = 1; + } /* clear the MAC interrupt status, assumes direct logical to physical mapping */ u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index)); diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 99933e4e48ff..26d8018c0a7c 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -3936,6 +3936,30 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev) return nesibdev; } + +/** + * nes_handle_delayed_event + */ +static void nes_handle_delayed_event(unsigned long data) +{ + struct nes_vnic *nesvnic = (void *) data; + + if (nesvnic->delayed_event != nesvnic->last_dispatched_event) { + struct ib_event event; + + event.device = &nesvnic->nesibdev->ibdev; + if (!event.device) + goto stop_timer; + event.event = nesvnic->delayed_event; + event.element.port_num = nesvnic->logical_port + 1; + ib_dispatch_event(&event); + } + +stop_timer: + nesvnic->event_timer.function = NULL; +} + + void nes_port_ibevent(struct nes_vnic *nesvnic) { struct nes_ib_device *nesibdev = nesvnic->nesibdev; @@ -3944,7 +3968,18 @@ void nes_port_ibevent(struct nes_vnic *nesvnic) event.device = &nesibdev->ibdev; event.element.port_num = nesvnic->logical_port + 1; event.event = nesdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; - ib_dispatch_event(&event); + + if (!nesvnic->event_timer.function) { + ib_dispatch_event(&event); + nesvnic->last_dispatched_event = event.event; + nesvnic->event_timer.function = nes_handle_delayed_event; + nesvnic->event_timer.data = (unsigned long) nesvnic; + nesvnic->event_timer.expires = jiffies + NES_EVENT_DELAY; + add_timer(&nesvnic->event_timer); + } else { + mod_timer(&nesvnic->event_timer, jiffies + NES_EVENT_DELAY); + } + nesvnic->delayed_event = event.event; } diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index 64c9e7d02d4a..73225eee3cc6 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h @@ -766,7 +766,7 @@ struct qib_devdata { void (*f_sdma_hw_start_up)(struct qib_pportdata *); void (*f_sdma_init_early)(struct qib_pportdata *); void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32); - void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32); + void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32, u32); u32 (*f_hdrqempty)(struct qib_ctxtdata *); u64 (*f_portcntr)(struct qib_pportdata *, u32); u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **, diff --git a/drivers/infiniband/hw/qib/qib_cq.c b/drivers/infiniband/hw/qib/qib_cq.c index a86cbf880f98..5246aa486bbe 100644 --- a/drivers/infiniband/hw/qib/qib_cq.c +++ b/drivers/infiniband/hw/qib/qib_cq.c @@ -100,7 +100,8 @@ void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int solicited) wc->head = next; if (cq->notify == IB_CQ_NEXT_COMP || - (cq->notify == IB_CQ_SOLICITED && solicited)) { + (cq->notify == IB_CQ_SOLICITED && + (solicited || entry->status != IB_WC_SUCCESS))) { cq->notify = IB_CQ_NONE; cq->triggered++; /* diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c index 9cd193603fb1..23e584f4c36c 100644 --- a/drivers/infiniband/hw/qib/qib_driver.c +++ b/drivers/infiniband/hw/qib/qib_driver.c @@ -71,6 +71,11 @@ MODULE_DESCRIPTION("QLogic IB driver"); */ #define QIB_PIO_MAXIBHDR 128 +/* + * QIB_MAX_PKT_RCV is the max # if packets processed per receive interrupt. + */ +#define QIB_MAX_PKT_RECV 64 + struct qlogic_ib_stats qib_stats; const char *qib_get_unit_name(int unit) @@ -284,14 +289,147 @@ static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail) * Returns 1 if error was a CRC, else 0. * Needed for some chip's synthesized error counters. */ -static u32 qib_rcv_hdrerr(struct qib_pportdata *ppd, u32 ctxt, - u32 eflags, u32 l, u32 etail, __le32 *rhf_addr, - struct qib_message_header *hdr) +static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd, + u32 ctxt, u32 eflags, u32 l, u32 etail, + __le32 *rhf_addr, struct qib_message_header *rhdr) { u32 ret = 0; if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR)) ret = 1; + else if (eflags == QLOGIC_IB_RHF_H_TIDERR) { + /* For TIDERR and RC QPs premptively schedule a NAK */ + struct qib_ib_header *hdr = (struct qib_ib_header *) rhdr; + struct qib_other_headers *ohdr = NULL; + struct qib_ibport *ibp = &ppd->ibport_data; + struct qib_qp *qp = NULL; + u32 tlen = qib_hdrget_length_in_bytes(rhf_addr); + u16 lid = be16_to_cpu(hdr->lrh[1]); + int lnh = be16_to_cpu(hdr->lrh[0]) & 3; + u32 qp_num; + u32 opcode; + u32 psn; + int diff; + unsigned long flags; + + /* Sanity check packet */ + if (tlen < 24) + goto drop; + + if (lid < QIB_MULTICAST_LID_BASE) { + lid &= ~((1 << ppd->lmc) - 1); + if (unlikely(lid != ppd->lid)) + goto drop; + } + + /* Check for GRH */ + if (lnh == QIB_LRH_BTH) + ohdr = &hdr->u.oth; + else if (lnh == QIB_LRH_GRH) { + u32 vtf; + + ohdr = &hdr->u.l.oth; + if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR) + goto drop; + vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow); + if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) + goto drop; + } else + goto drop; + + /* Get opcode and PSN from packet */ + opcode = be32_to_cpu(ohdr->bth[0]); + opcode >>= 24; + psn = be32_to_cpu(ohdr->bth[2]); + + /* Get the destination QP number. */ + qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK; + if (qp_num != QIB_MULTICAST_QPN) { + int ruc_res; + qp = qib_lookup_qpn(ibp, qp_num); + if (!qp) + goto drop; + + /* + * Handle only RC QPs - for other QP types drop error + * packet. + */ + spin_lock(&qp->r_lock); + + /* Check for valid receive state. */ + if (!(ib_qib_state_ops[qp->state] & + QIB_PROCESS_RECV_OK)) { + ibp->n_pkt_drops++; + goto unlock; + } + + switch (qp->ibqp.qp_type) { + case IB_QPT_RC: + spin_lock_irqsave(&qp->s_lock, flags); + ruc_res = + qib_ruc_check_hdr( + ibp, hdr, + lnh == QIB_LRH_GRH, + qp, + be32_to_cpu(ohdr->bth[0])); + if (ruc_res) { + spin_unlock_irqrestore(&qp->s_lock, + flags); + goto unlock; + } + spin_unlock_irqrestore(&qp->s_lock, flags); + + /* Only deal with RDMA Writes for now */ + if (opcode < + IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) { + diff = qib_cmp24(psn, qp->r_psn); + if (!qp->r_nak_state && diff >= 0) { + ibp->n_rc_seqnak++; + qp->r_nak_state = + IB_NAK_PSN_ERROR; + /* Use the expected PSN. */ + qp->r_ack_psn = qp->r_psn; + /* + * Wait to send the sequence + * NAK until all packets + * in the receive queue have + * been processed. + * Otherwise, we end up + * propagating congestion. + */ + if (list_empty(&qp->rspwait)) { + qp->r_flags |= + QIB_R_RSP_NAK; + atomic_inc( + &qp->refcount); + list_add_tail( + &qp->rspwait, + &rcd->qp_wait_list); + } + } /* Out of sequence NAK */ + } /* QP Request NAKs */ + break; + case IB_QPT_SMI: + case IB_QPT_GSI: + case IB_QPT_UD: + case IB_QPT_UC: + default: + /* For now don't handle any other QP types */ + break; + } + +unlock: + spin_unlock(&qp->r_lock); + /* + * Notify qib_destroy_qp() if it is waiting + * for us to finish. + */ + if (atomic_dec_and_test(&qp->refcount)) + wake_up(&qp->wait); + } /* Unicast QP */ + } /* Valid packet with TIDErr */ + +drop: return ret; } @@ -335,7 +473,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts) smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ } - for (last = 0, i = 1; !last && i <= 64; i += !last) { + for (last = 0, i = 1; !last; i += !last) { hdr = dd->f_get_msgheader(dd, rhf_addr); eflags = qib_hdrget_err_flags(rhf_addr); etype = qib_hdrget_rcv_type(rhf_addr); @@ -371,7 +509,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts) * packets; only qibhdrerr should be set. */ if (unlikely(eflags)) - crcs += qib_rcv_hdrerr(ppd, rcd->ctxt, eflags, l, + crcs += qib_rcv_hdrerr(rcd, ppd, rcd->ctxt, eflags, l, etail, rhf_addr, hdr); else if (etype == RCVHQ_RCV_TYPE_NON_KD) { qib_ib_rcv(rcd, hdr, ebuf, tlen); @@ -384,6 +522,9 @@ move_along: l += rsize; if (l >= maxcnt) l = 0; + if (i == QIB_MAX_PKT_RECV) + last = 1; + rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset; if (dd->flags & QIB_NODMA_RTAIL) { u32 seq = qib_hdrget_seq(rhf_addr); @@ -402,7 +543,7 @@ move_along: */ lval = l; if (!last && !(i & 0xf)) { - dd->f_update_usrhead(rcd, lval, updegr, etail); + dd->f_update_usrhead(rcd, lval, updegr, etail, i); updegr = 0; } } @@ -444,7 +585,7 @@ bail: * if no packets were processed. */ lval = (u64)rcd->head | dd->rhdrhead_intr_off; - dd->f_update_usrhead(rcd, lval, updegr, etail); + dd->f_update_usrhead(rcd, lval, updegr, etail, i); return crcs; } diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index 79d9971aff1f..75bfad16c114 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c @@ -1379,17 +1379,17 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo, /* find device (with ACTIVE ports) with fewest ctxts in use */ for (ndev = 0; ndev < devmax; ndev++) { struct qib_devdata *dd = qib_lookup(ndev); - unsigned cused = 0, cfree = 0; + unsigned cused = 0, cfree = 0, pusable = 0; if (!dd) continue; if (port && port <= dd->num_pports && usable(dd->pport + port - 1)) - dusable = 1; + pusable = 1; else for (i = 0; i < dd->num_pports; i++) if (usable(dd->pport + i)) - dusable++; - if (!dusable) + pusable++; + if (!pusable) continue; for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) @@ -1397,7 +1397,7 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo, cused++; else cfree++; - if (cfree && cused < inuse) { + if (pusable && cfree && cused < inuse) { udd = dd; inuse = cused; } diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c index f99bddc01716..df7fa251dcdc 100644 --- a/drivers/infiniband/hw/qib/qib_fs.c +++ b/drivers/infiniband/hw/qib/qib_fs.c @@ -453,17 +453,14 @@ static int remove_file(struct dentry *parent, char *name) goto bail; } - spin_lock(&dcache_lock); spin_lock(&tmp->d_lock); if (!(d_unhashed(tmp) && tmp->d_inode)) { - dget_locked(tmp); + dget_dlock(tmp); __d_drop(tmp); spin_unlock(&tmp->d_lock); - spin_unlock(&dcache_lock); simple_unlink(parent->d_inode, tmp); } else { spin_unlock(&tmp->d_lock); - spin_unlock(&dcache_lock); } ret = 0; diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index a5e29dbb9537..774dea897e9c 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c @@ -2074,7 +2074,7 @@ static void qib_6120_config_ctxts(struct qib_devdata *dd) } static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd, - u32 updegr, u32 egrhd) + u32 updegr, u32 egrhd, u32 npkts) { qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); if (updegr) diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c index 6fd8d74e7392..de799f17cb9e 100644 --- a/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/drivers/infiniband/hw/qib/qib_iba7220.c @@ -1692,8 +1692,7 @@ static void qib_7220_quiet_serdes(struct qib_pportdata *ppd) ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; spin_unlock_irqrestore(&ppd->lflags_lock, flags); wake_up(&ppd->cpspec->autoneg_wait); - cancel_delayed_work(&ppd->cpspec->autoneg_work); - flush_scheduled_work(); + cancel_delayed_work_sync(&ppd->cpspec->autoneg_work); shutdown_7220_relock_poll(ppd->dd); val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg); @@ -2297,7 +2296,7 @@ static void qib_7220_config_ctxts(struct qib_devdata *dd) nchipctxts = qib_read_kreg32(dd, kr_portcnt); dd->cspec->numctxts = nchipctxts; if (qib_n_krcv_queues > 1) { - dd->qpn_mask = 0x3f; + dd->qpn_mask = 0x3e; dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports; if (dd->first_user_ctxt > nchipctxts) dd->first_user_ctxt = nchipctxts; @@ -2703,7 +2702,7 @@ static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what) } static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd, - u32 updegr, u32 egrhd) + u32 updegr, u32 egrhd, u32 npkts) { qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); if (updegr) @@ -3515,8 +3514,8 @@ static void try_7220_autoneg(struct qib_pportdata *ppd) toggle_7220_rclkrls(ppd->dd); /* 2 msec is minimum length of a poll cycle */ - schedule_delayed_work(&ppd->cpspec->autoneg_work, - msecs_to_jiffies(2)); + queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work, + msecs_to_jiffies(2)); } /* diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 584d443b5335..50cceb3ab885 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -71,6 +71,9 @@ static void qib_7322_mini_pcs_reset(struct qib_pportdata *); static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32); static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned); +static void serdes_7322_los_enable(struct qib_pportdata *, int); +static int serdes_7322_init_old(struct qib_pportdata *); +static int serdes_7322_init_new(struct qib_pportdata *); #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb)) @@ -111,6 +114,21 @@ static ushort qib_singleport; module_param_named(singleport, qib_singleport, ushort, S_IRUGO); MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space"); +/* + * Receive header queue sizes + */ +static unsigned qib_rcvhdrcnt; +module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO); +MODULE_PARM_DESC(rcvhdrcnt, "receive header count"); + +static unsigned qib_rcvhdrsize; +module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO); +MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words"); + +static unsigned qib_rcvhdrentsize; +module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO); +MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words"); + #define MAX_ATTEN_LEN 64 /* plenty for any real system */ /* for read back, default index is ~5m copper cable */ static char txselect_list[MAX_ATTEN_LEN] = "10"; @@ -314,7 +332,7 @@ MODULE_PARM_DESC(txselect, \ #define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl) /* - * Per-context kernel registers. Acess only with qib_read_kreg_ctxt() + * Per-context kernel registers. Access only with qib_read_kreg_ctxt() * or qib_write_kreg_ctxt() */ #define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0) @@ -544,6 +562,7 @@ static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *); #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */ #define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */ +#define TXDDS_MFG_SZ 2 /* number of mfg tx settings entries */ #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */ #define H1_FORCE_VAL 8 @@ -604,6 +623,7 @@ struct qib_chippport_specific { u8 ibmalfusesnap; struct qib_qsfp_data qsfp_data; char epmsgbuf[192]; /* for port error interrupt msg buffer */ + u8 bounced; }; static struct { @@ -1677,6 +1697,8 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst) (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) { force_h1(ppd); ppd->cpspec->qdr_reforce = 1; + if (!ppd->dd->cspec->r1) + serdes_7322_los_enable(ppd, 0); } else if (ppd->cpspec->qdr_reforce && (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) && (ibclt == IB_7322_LT_STATE_CFGENH || @@ -1692,18 +1714,37 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst) ibclt <= IB_7322_LT_STATE_SLEEPQUIET))) adj_tx_serdes(ppd); - if (!ppd->cpspec->qdr_dfe_on && ibclt != IB_7322_LT_STATE_LINKUP && - ibclt <= IB_7322_LT_STATE_SLEEPQUIET) { - ppd->cpspec->qdr_dfe_on = 1; - ppd->cpspec->qdr_dfe_time = 0; - /* On link down, reenable QDR adaptation */ - qib_write_kreg_port(ppd, krp_static_adapt_dis(2), - ppd->dd->cspec->r1 ? - QDR_STATIC_ADAPT_DOWN_R1 : - QDR_STATIC_ADAPT_DOWN); + if (ibclt != IB_7322_LT_STATE_LINKUP) { + u8 ltstate = qib_7322_phys_portstate(ibcst); + u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, + LinkTrainingState); + if (!ppd->dd->cspec->r1 && + pibclt == IB_7322_LT_STATE_LINKUP && + ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER && + ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN && + ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT && + ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE) + /* If the link went down (but no into recovery, + * turn LOS back on */ + serdes_7322_los_enable(ppd, 1); + if (!ppd->cpspec->qdr_dfe_on && + ibclt <= IB_7322_LT_STATE_SLEEPQUIET) { + ppd->cpspec->qdr_dfe_on = 1; + ppd->cpspec->qdr_dfe_time = 0; + /* On link down, reenable QDR adaptation */ + qib_write_kreg_port(ppd, krp_static_adapt_dis(2), + ppd->dd->cspec->r1 ? + QDR_STATIC_ADAPT_DOWN_R1 : + QDR_STATIC_ADAPT_DOWN); + printk(KERN_INFO QIB_DRV_NAME + " IB%u:%u re-enabled QDR adaptation " + "ibclt %x\n", ppd->dd->unit, ppd->port, ibclt); + } } } +static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32); + /* * This is per-pport error handling. * will likely get it's own MSIx interrupt (one for each port, @@ -1840,7 +1881,23 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd) IB_PHYSPORTSTATE_DISABLED) qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); - else + else { + u32 lstate; + /* + * We need the current logical link state before + * lflags are set in handle_e_ibstatuschanged. + */ + lstate = qib_7322_iblink_state(ibcs); + + if (IS_QMH(dd) && !ppd->cpspec->bounced && + ltstate == IB_PHYSPORTSTATE_LINKUP && + (lstate >= IB_PORT_INIT && + lstate <= IB_PORT_ACTIVE)) { + ppd->cpspec->bounced = 1; + qib_7322_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE, + IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL); + } + /* * Since going into a recovery state causes the link * state to go down and since recovery is transitory, @@ -1854,6 +1911,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd) ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT && ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE) qib_handle_e_ibstatuschanged(ppd, ibcs); + } } if (*msg && iserr) qib_dev_porterr(dd, ppd->port, "%s error\n", msg); @@ -2348,10 +2406,9 @@ static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd) ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; spin_unlock_irqrestore(&ppd->lflags_lock, flags); wake_up(&ppd->cpspec->autoneg_wait); - cancel_delayed_work(&ppd->cpspec->autoneg_work); + cancel_delayed_work_sync(&ppd->cpspec->autoneg_work); if (ppd->dd->cspec->r1) - cancel_delayed_work(&ppd->cpspec->ipg_work); - flush_scheduled_work(); + cancel_delayed_work_sync(&ppd->cpspec->ipg_work); ppd->cpspec->chase_end = 0; if (ppd->cpspec->chase_timer.data) /* if initted */ @@ -2648,7 +2705,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd) if (!(pins & mask)) { ++handled; qd->t_insert = get_jiffies_64(); - schedule_work(&qd->work); + queue_work(ib_wq, &qd->work); } } } @@ -2785,7 +2842,6 @@ static irqreturn_t qib_7322intr(int irq, void *data) ctxtrbits &= ~rmask; if (dd->rcd[i]) { qib_kreceive(dd->rcd[i], NULL, &npkts); - adjust_rcv_timeout(dd->rcd[i], npkts); } } rmask <<= 1; @@ -2835,7 +2891,6 @@ static irqreturn_t qib_7322pintr(int irq, void *data) (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt); qib_kreceive(rcd, NULL, &npkts); - adjust_rcv_timeout(rcd, npkts); return IRQ_HANDLED; } @@ -3157,6 +3212,10 @@ static unsigned qib_7322_boardname(struct qib_devdata *dd) case BOARD_QME7342: n = "InfiniPath_QME7342"; break; + case 8: + n = "InfiniPath_QME7362"; + dd->flags |= QIB_HAS_QSFP; + break; case 15: n = "InfiniPath_QLE7342_TEST"; dd->flags |= QIB_HAS_QSFP; @@ -3475,11 +3534,6 @@ static void qib_7322_config_ctxts(struct qib_devdata *dd) nchipctxts = qib_read_kreg32(dd, kr_contextcnt); dd->cspec->numctxts = nchipctxts; if (qib_n_krcv_queues > 1 && dd->num_pports) { - /* - * Set the mask for which bits from the QPN are used - * to select a context number. - */ - dd->qpn_mask = 0x3f; dd->first_user_ctxt = NUM_IB_PORTS + (qib_n_krcv_queues - 1) * dd->num_pports; if (dd->first_user_ctxt > nchipctxts) @@ -3530,8 +3584,11 @@ static void qib_7322_config_ctxts(struct qib_devdata *dd) /* kr_rcvegrcnt changes based on the number of contexts enabled */ dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt); - dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, - dd->num_pports > 1 ? 1024U : 2048U); + if (qib_rcvhdrcnt) + dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt); + else + dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, + dd->num_pports > 1 ? 1024U : 2048U); } static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which) @@ -4002,8 +4059,14 @@ static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t) } static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd, - u32 updegr, u32 egrhd) + u32 updegr, u32 egrhd, u32 npkts) { + /* + * Need to write timeout register before updating rcvhdrhead to ensure + * that the timer is enabled on reception of a packet. + */ + if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT) + adjust_rcv_timeout(rcd, npkts); qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); if (updegr) @@ -4926,8 +4989,8 @@ static void try_7322_autoneg(struct qib_pportdata *ppd) set_7322_ibspeed_fast(ppd, QIB_IB_DDR); qib_7322_mini_pcs_reset(ppd); /* 2 msec is minimum length of a poll cycle */ - schedule_delayed_work(&ppd->cpspec->autoneg_work, - msecs_to_jiffies(2)); + queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work, + msecs_to_jiffies(2)); } /* @@ -5057,7 +5120,8 @@ static void try_7322_ipg(struct qib_pportdata *ppd) ib_free_send_mad(send_buf); retry: delay = 2 << ppd->cpspec->ipg_tries; - schedule_delayed_work(&ppd->cpspec->ipg_work, msecs_to_jiffies(delay)); + queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work, + msecs_to_jiffies(delay)); } /* @@ -5522,7 +5586,7 @@ static void qsfp_7322_event(struct work_struct *work) u64 now = get_jiffies_64(); if (time_after64(now, pwrup)) break; - msleep(1); + msleep(20); } ret = qib_refresh_qsfp_cache(ppd, &qd->cache); /* @@ -5579,6 +5643,7 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change) u32 pidx, unit, port, deflt, h1; unsigned long val; int any = 0, seth1; + int txdds_size; str = txselect_list; @@ -5587,6 +5652,10 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change) for (pidx = 0; pidx < dd->num_pports; ++pidx) dd->pport[pidx].cpspec->no_eep = deflt; + txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ; + if (IS_QME(dd) || IS_QMH(dd)) + txdds_size += TXDDS_MFG_SZ; + while (*nxt && nxt[1]) { str = ++nxt; unit = simple_strtoul(str, &nxt, 0); @@ -5609,7 +5678,7 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change) ; continue; } - if (val >= TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ) + if (val >= txdds_size) continue; seth1 = 0; h1 = 0; /* gcc thinks it might be used uninitted */ @@ -5661,10 +5730,11 @@ static int setup_txselect(const char *str, struct kernel_param *kp) return -ENOSPC; } val = simple_strtoul(str, &n, 0); - if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) { + if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + + TXDDS_MFG_SZ)) { printk(KERN_INFO QIB_DRV_NAME "txselect_values must start with a number < %d\n", - TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ); + TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ); return -EINVAL; } strcpy(txselect_list, str); @@ -5810,7 +5880,8 @@ static void write_7322_initregs(struct qib_devdata *dd) unsigned n, regno; unsigned long flags; - if (!dd->qpn_mask || !dd->pport[pidx].link_speed_supported) + if (dd->n_krcv_queues < 2 || + !dd->pport[pidx].link_speed_supported) continue; ppd = &dd->pport[pidx]; @@ -6097,8 +6168,10 @@ static int qib_init_7322_variables(struct qib_devdata *dd) ppd++; } - dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE; - dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE; + dd->rcvhdrentsize = qib_rcvhdrentsize ? + qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE; + dd->rcvhdrsize = qib_rcvhdrsize ? + qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE; dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32); /* we always allocate at least 2048 bytes for eager buffers */ @@ -6495,7 +6568,7 @@ static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start, /* make sure we see an updated copy next time around */ sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); sleeps++; - msleep(1); + msleep(20); } switch (which) { @@ -6993,6 +7066,12 @@ static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = { { 0, 1, 0, 12 }, /* QMH7342 backplane settings */ }; +static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = { + /* amp, pre, main, post */ + { 0, 0, 0, 0 }, /* QME7342 mfg settings */ + { 0, 0, 0, 6 }, /* QME7342 P2 mfg settings */ +}; + static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds, unsigned atten) { @@ -7066,6 +7145,16 @@ static void find_best_ent(struct qib_pportdata *ppd, *sdr_dds = &txdds_extra_sdr[idx]; *ddr_dds = &txdds_extra_ddr[idx]; *qdr_dds = &txdds_extra_qdr[idx]; + } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) && + ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + + TXDDS_MFG_SZ)) { + idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ); + printk(KERN_INFO QIB_DRV_NAME + " IB%u:%u use idx %u into txdds_mfg\n", + ppd->dd->unit, ppd->port, idx); + *sdr_dds = &txdds_extra_mfg[idx]; + *ddr_dds = &txdds_extra_mfg[idx]; + *qdr_dds = &txdds_extra_mfg[idx]; } else { /* this shouldn't happen, it's range checked */ *sdr_dds = txdds_sdr + qib_long_atten; @@ -7210,9 +7299,30 @@ static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data, } } +static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable) +{ + u64 data = qib_read_kreg_port(ppd, krp_serdesctrl); + printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS %s\n", + ppd->dd->unit, ppd->port, (enable ? "on" : "off")); + if (enable) + data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN); + else + data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN); + qib_write_kreg_port(ppd, krp_serdesctrl, data); +} + static int serdes_7322_init(struct qib_pportdata *ppd) { - u64 data; + int ret = 0; + if (ppd->dd->cspec->r1) + ret = serdes_7322_init_old(ppd); + else + ret = serdes_7322_init_new(ppd); + return ret; +} + +static int serdes_7322_init_old(struct qib_pportdata *ppd) +{ u32 le_val; /* @@ -7270,11 +7380,7 @@ static int serdes_7322_init(struct qib_pportdata *ppd) ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */ ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */ - data = qib_read_kreg_port(ppd, krp_serdesctrl); - /* Turn off IB latency mode */ - data &= ~SYM_MASK(IBSerdesCtrl_0, IB_LAT_MODE); - qib_write_kreg_port(ppd, krp_serdesctrl, data | - SYM_MASK(IBSerdesCtrl_0, RXLOSEN)); + serdes_7322_los_enable(ppd, 1); /* rxbistena; set 0 to avoid effects of it switch later */ ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15); @@ -7314,6 +7420,205 @@ static int serdes_7322_init(struct qib_pportdata *ppd) return 0; } +static int serdes_7322_init_new(struct qib_pportdata *ppd) +{ + u64 tstart; + u32 le_val, rxcaldone; + int chan, chan_done = (1 << SERDES_CHANS) - 1; + + /* + * Initialize the Tx DDS tables. Also done every QSFP event, + * for adapters with QSFP + */ + init_txdds_table(ppd, 0); + + /* Clear cmode-override, may be set from older driver */ + ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14); + + /* ensure no tx overrides from earlier driver loads */ + qib_write_kreg_port(ppd, krp_tx_deemph_override, + SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, + reset_tx_deemphasis_override)); + + /* START OF LSI SUGGESTED SERDES BRINGUP */ + /* Reset - Calibration Setup */ + /* Stop DFE adaptaion */ + ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1)); + /* Disable LE1 */ + ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5)); + /* Disable autoadapt for LE1 */ + ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15)); + /* Disable LE2 */ + ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6)); + /* Disable VGA */ + ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0)); + /* Disable AFE Offset Cancel */ + ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12)); + /* Disable Timing Loop */ + ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3)); + /* Disable Frequency Loop */ + ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4)); + /* Disable Baseline Wander Correction */ + ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13)); + /* Disable RX Calibration */ + ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10)); + /* Disable RX Offset Calibration */ + ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4)); + /* Select BB CDR */ + ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15)); + /* CDR Step Size */ + ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8)); + /* Enable phase Calibration */ + ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5)); + /* DFE Bandwidth [2:14-12] */ + ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12)); + /* DFE Config (4 taps only) */ + ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0)); + /* Gain Loop Bandwidth */ + if (!ppd->dd->cspec->r1) { + ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12)); + ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8)); + } else { + ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11)); + } + /* Baseline Wander Correction Gain [13:4-0] (leave as default) */ + /* Baseline Wander Correction Gain [3:7-5] (leave as default) */ + /* Data Rate Select [5:7-6] (leave as default) */ + /* RX Parralel Word Width [3:10-8] (leave as default) */ + + /* RX REST */ + /* Single- or Multi-channel reset */ + /* RX Analog reset */ + /* RX Digital reset */ + ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13)); + msleep(20); + /* RX Analog reset */ + ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14)); + msleep(20); + /* RX Digital reset */ + ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13)); + msleep(20); + + /* setup LoS params; these are subsystem, so chan == 5 */ + /* LoS filter threshold_count on, ch 0-3, set to 8 */ + ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11)); + ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4)); + ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11)); + ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4)); + + /* LoS filter threshold_count off, ch 0-3, set to 4 */ + ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0)); + ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8)); + ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0)); + ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8)); + + /* LoS filter select enabled */ + ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15); + + /* LoS target data: SDR=4, DDR=2, QDR=1 */ + ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */ + ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */ + ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */ + + /* Turn on LOS on initial SERDES init */ + serdes_7322_los_enable(ppd, 1); + /* FLoop LOS gate: PPM filter enabled */ + ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10); + + /* RX LATCH CALIBRATION */ + /* Enable Eyefinder Phase Calibration latch */ + ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0)); + /* Enable RX Offset Calibration latch */ + ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4)); + msleep(20); + /* Start Calibration */ + ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10)); + tstart = get_jiffies_64(); + while (chan_done && + !time_after64(tstart, tstart + msecs_to_jiffies(500))) { + msleep(20); + for (chan = 0; chan < SERDES_CHANS; ++chan) { + rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), + (chan + (chan >> 1)), + 25, 0, 0); + if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 && + (~chan_done & (1 << chan)) == 0) + chan_done &= ~(1 << chan); + } + } + if (chan_done) { + printk(KERN_INFO QIB_DRV_NAME + " Serdes %d calibration not done after .5 sec: 0x%x\n", + IBSD(ppd->hw_pidx), chan_done); + } else { + for (chan = 0; chan < SERDES_CHANS; ++chan) { + rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), + (chan + (chan >> 1)), + 25, 0, 0); + if ((~rxcaldone & (u32)BMASK(10, 10)) == 0) + printk(KERN_INFO QIB_DRV_NAME + " Serdes %d chan %d calibration " + "failed\n", IBSD(ppd->hw_pidx), chan); + } + } + + /* Turn off Calibration */ + ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10)); + msleep(20); + + /* BRING RX UP */ + /* Set LE2 value (May be overridden in qsfp_7322_event) */ + le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT; + ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7)); + /* Set LE2 Loop bandwidth */ + ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5)); + /* Enable LE2 */ + ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6)); + msleep(20); + /* Enable H0 only */ + ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1)); + /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */ + le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac; + ibsd_wr_allchans(ppd, 21, le_val, 0xfffe); + /* Enable VGA */ + ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0)); + msleep(20); + /* Set Frequency Loop Bandwidth */ + ibsd_wr_allchans(ppd, 2, (7 << 5), BMASK(8, 5)); + /* Enable Frequency Loop */ + ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4)); + /* Set Timing Loop Bandwidth */ + ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9)); + /* Enable Timing Loop */ + ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3)); + msleep(50); + /* Enable DFE + * Set receive adaptation mode. SDR and DDR adaptation are + * always on, and QDR is initially enabled; later disabled. + */ + qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL); + qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL); + qib_write_kreg_port(ppd, krp_static_adapt_dis(2), + ppd->dd->cspec->r1 ? + QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN); + ppd->cpspec->qdr_dfe_on = 1; + /* Disable LE1 */ + ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5)); + /* Disable auto adapt for LE1 */ + ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15)); + msleep(20); + /* Enable AFE Offset Cancel */ + ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12)); + /* Enable Baseline Wander Correction */ + ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13)); + /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */ + ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11)); + /* VGA output common mode */ + ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2)); + + return 0; +} + /* start adjust QMH serdes parameters */ static void set_man_code(struct qib_pportdata *ppd, int chan, int code) diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index f3b503936043..ffefb78b8949 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c @@ -80,7 +80,6 @@ unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */ module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO); MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism"); -struct workqueue_struct *qib_wq; struct workqueue_struct *qib_cq_wq; static void verify_interrupt(unsigned long); @@ -92,9 +91,11 @@ unsigned long *qib_cpulist; /* set number of contexts we'll actually use */ void qib_set_ctxtcnt(struct qib_devdata *dd) { - if (!qib_cfgctxts) + if (!qib_cfgctxts) { dd->cfgctxts = dd->first_user_ctxt + num_online_cpus(); - else if (qib_cfgctxts < dd->num_pports) + if (dd->cfgctxts > dd->ctxtcnt) + dd->cfgctxts = dd->ctxtcnt; + } else if (qib_cfgctxts < dd->num_pports) dd->cfgctxts = dd->ctxtcnt; else if (qib_cfgctxts <= dd->ctxtcnt) dd->cfgctxts = qib_cfgctxts; @@ -268,23 +269,20 @@ static void init_shadow_tids(struct qib_devdata *dd) struct page **pages; dma_addr_t *addrs; - pages = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); + pages = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); if (!pages) { qib_dev_err(dd, "failed to allocate shadow page * " "array, no expected sends!\n"); goto bail; } - addrs = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); + addrs = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); if (!addrs) { qib_dev_err(dd, "failed to allocate shadow dma handle " "array, no expected sends!\n"); goto bail_free; } - memset(pages, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); - memset(addrs, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); - dd->pageshadow = pages; dd->physshadow = addrs; return; @@ -1045,24 +1043,10 @@ static int __init qlogic_ib_init(void) if (ret) goto bail; - /* - * We create our own workqueue mainly because we want to be - * able to flush it when devices are being removed. We can't - * use schedule_work()/flush_scheduled_work() because both - * unregister_netdev() and linkwatch_event take the rtnl lock, - * so flush_scheduled_work() can deadlock during device - * removal. - */ - qib_wq = create_workqueue("qib"); - if (!qib_wq) { - ret = -ENOMEM; - goto bail_dev; - } - qib_cq_wq = create_singlethread_workqueue("qib_cq"); if (!qib_cq_wq) { ret = -ENOMEM; - goto bail_wq; + goto bail_dev; } /* @@ -1092,8 +1076,6 @@ bail_unit: idr_destroy(&qib_unit_table); bail_cq_wq: destroy_workqueue(qib_cq_wq); -bail_wq: - destroy_workqueue(qib_wq); bail_dev: qib_dev_cleanup(); bail: @@ -1117,7 +1099,6 @@ static void __exit qlogic_ib_cleanup(void) pci_unregister_driver(&qib_driver); - destroy_workqueue(qib_wq); destroy_workqueue(qib_cq_wq); qib_cpulist_count = 0; @@ -1290,7 +1271,7 @@ static int __devinit qib_init_one(struct pci_dev *pdev, if (qib_mini_init || initfail || ret) { qib_stop_timers(dd); - flush_scheduled_work(); + flush_workqueue(ib_wq); for (pidx = 0; pidx < dd->num_pports; ++pidx) dd->f_quiet_serdes(dd->pport + pidx); if (qib_mini_init) @@ -1339,8 +1320,8 @@ static void __devexit qib_remove_one(struct pci_dev *pdev) qib_stop_timers(dd); - /* wait until all of our (qsfp) schedule_work() calls complete */ - flush_scheduled_work(); + /* wait until all of our (qsfp) queue_work() calls complete */ + flush_workqueue(ib_wq); ret = qibfs_remove(dd); if (ret) diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c index 54a40828a106..a693c56ec8a6 100644 --- a/drivers/infiniband/hw/qib/qib_intr.c +++ b/drivers/infiniband/hw/qib/qib_intr.c @@ -131,7 +131,8 @@ void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs) /* start a 75msec timer to clear symbol errors */ mod_timer(&ppd->symerr_clear_timer, msecs_to_jiffies(75)); - } else if (ltstate == IB_PHYSPORTSTATE_LINKUP) { + } else if (ltstate == IB_PHYSPORTSTATE_LINKUP && + !(ppd->lflags & QIBL_LINKACTIVE)) { /* active, but not active defered */ qib_hol_up(ppd); /* useful only for 6120 now */ *ppd->statusp |= diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c index 4b80eb153d57..8fd19a47df0c 100644 --- a/drivers/infiniband/hw/qib/qib_keys.c +++ b/drivers/infiniband/hw/qib/qib_keys.c @@ -136,7 +136,6 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, struct qib_mregion *mr; unsigned n, m; size_t off; - int ret = 0; unsigned long flags; /* @@ -152,6 +151,8 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, if (!dev->dma_mr) goto bail; atomic_inc(&dev->dma_mr->refcount); + spin_unlock_irqrestore(&rkt->lock, flags); + isge->mr = dev->dma_mr; isge->vaddr = (void *) sge->addr; isge->length = sge->length; @@ -170,19 +171,34 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, off + sge->length > mr->length || (mr->access_flags & acc) != acc)) goto bail; + atomic_inc(&mr->refcount); + spin_unlock_irqrestore(&rkt->lock, flags); off += mr->offset; - m = 0; - n = 0; - while (off >= mr->map[m]->segs[n].length) { - off -= mr->map[m]->segs[n].length; - n++; - if (n >= QIB_SEGSZ) { - m++; - n = 0; + if (mr->page_shift) { + /* + page sizes are uniform power of 2 so no loop is necessary + entries_spanned_by_off is the number of times the loop below + would have executed. + */ + size_t entries_spanned_by_off; + + entries_spanned_by_off = off >> mr->page_shift; + off -= (entries_spanned_by_off << mr->page_shift); + m = entries_spanned_by_off/QIB_SEGSZ; + n = entries_spanned_by_off%QIB_SEGSZ; + } else { + m = 0; + n = 0; + while (off >= mr->map[m]->segs[n].length) { + off -= mr->map[m]->segs[n].length; + n++; + if (n >= QIB_SEGSZ) { + m++; + n = 0; + } } } - atomic_inc(&mr->refcount); isge->mr = mr; isge->vaddr = mr->map[m]->segs[n].vaddr + off; isge->length = mr->map[m]->segs[n].length - off; @@ -190,10 +206,10 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, isge->m = m; isge->n = n; ok: - ret = 1; + return 1; bail: spin_unlock_irqrestore(&rkt->lock, flags); - return ret; + return 0; } /** @@ -214,7 +230,6 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, struct qib_mregion *mr; unsigned n, m; size_t off; - int ret = 0; unsigned long flags; /* @@ -231,6 +246,8 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, if (!dev->dma_mr) goto bail; atomic_inc(&dev->dma_mr->refcount); + spin_unlock_irqrestore(&rkt->lock, flags); + sge->mr = dev->dma_mr; sge->vaddr = (void *) vaddr; sge->length = len; @@ -248,19 +265,34 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, if (unlikely(vaddr < mr->iova || off + len > mr->length || (mr->access_flags & acc) == 0)) goto bail; + atomic_inc(&mr->refcount); + spin_unlock_irqrestore(&rkt->lock, flags); off += mr->offset; - m = 0; - n = 0; - while (off >= mr->map[m]->segs[n].length) { - off -= mr->map[m]->segs[n].length; - n++; - if (n >= QIB_SEGSZ) { - m++; - n = 0; + if (mr->page_shift) { + /* + page sizes are uniform power of 2 so no loop is necessary + entries_spanned_by_off is the number of times the loop below + would have executed. + */ + size_t entries_spanned_by_off; + + entries_spanned_by_off = off >> mr->page_shift; + off -= (entries_spanned_by_off << mr->page_shift); + m = entries_spanned_by_off/QIB_SEGSZ; + n = entries_spanned_by_off%QIB_SEGSZ; + } else { + m = 0; + n = 0; + while (off >= mr->map[m]->segs[n].length) { + off -= mr->map[m]->segs[n].length; + n++; + if (n >= QIB_SEGSZ) { + m++; + n = 0; + } } } - atomic_inc(&mr->refcount); sge->mr = mr; sge->vaddr = mr->map[m]->segs[n].vaddr + off; sge->length = mr->map[m]->segs[n].length - off; @@ -268,10 +300,10 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, sge->m = m; sge->n = n; ok: - ret = 1; + return 1; bail: spin_unlock_irqrestore(&rkt->lock, flags); - return ret; + return 0; } /* diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c index 94b0d1f3a8f0..5ad224e4a38b 100644 --- a/drivers/infiniband/hw/qib/qib_mad.c +++ b/drivers/infiniband/hw/qib/qib_mad.c @@ -668,8 +668,8 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, lid = be16_to_cpu(pip->lid); /* Must be a valid unicast LID address. */ if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE) - goto err; - if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) { + smp->status |= IB_SMP_INVALID_FIELD; + else if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) { if (ppd->lid != lid) qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT); if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) @@ -683,8 +683,8 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, msl = pip->neighbormtu_mastersmsl & 0xF; /* Must be a valid unicast LID address. */ if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE) - goto err; - if (smlid != ibp->sm_lid || msl != ibp->sm_sl) { + smp->status |= IB_SMP_INVALID_FIELD; + else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) { spin_lock_irqsave(&ibp->lock, flags); if (ibp->sm_ah) { if (smlid != ibp->sm_lid) @@ -707,8 +707,9 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, if (lwe == 0xFF) lwe = ppd->link_width_supported; else if (lwe >= 16 || (lwe & ~ppd->link_width_supported)) - goto err; - set_link_width_enabled(ppd, lwe); + smp->status |= IB_SMP_INVALID_FIELD; + else if (lwe != ppd->link_width_enabled) + set_link_width_enabled(ppd, lwe); } lse = pip->linkspeedactive_enabled & 0xF; @@ -721,8 +722,9 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, if (lse == 15) lse = ppd->link_speed_supported; else if (lse >= 8 || (lse & ~ppd->link_speed_supported)) - goto err; - set_link_speed_enabled(ppd, lse); + smp->status |= IB_SMP_INVALID_FIELD; + else if (lse != ppd->link_speed_enabled) + set_link_speed_enabled(ppd, lse); } /* Set link down default state. */ @@ -738,7 +740,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, IB_LINKINITCMD_POLL); break; default: - goto err; + smp->status |= IB_SMP_INVALID_FIELD; } ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6; @@ -748,15 +750,17 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF); if (mtu == -1) - goto err; - qib_set_mtu(ppd, mtu); + smp->status |= IB_SMP_INVALID_FIELD; + else + qib_set_mtu(ppd, mtu); /* Set operational VLs */ vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF; if (vls) { if (vls > ppd->vls_supported) - goto err; - (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls); + smp->status |= IB_SMP_INVALID_FIELD; + else + (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls); } if (pip->mkey_violations == 0) @@ -770,10 +774,10 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, ore = pip->localphyerrors_overrunerrors; if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF)) - goto err; + smp->status |= IB_SMP_INVALID_FIELD; if (set_overrunthreshold(ppd, (ore & 0xF))) - goto err; + smp->status |= IB_SMP_INVALID_FIELD; ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F; @@ -792,7 +796,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, state = pip->linkspeed_portstate & 0xF; lstate = (pip->portphysstate_linkdown >> 4) & 0xF; if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP)) - goto err; + smp->status |= IB_SMP_INVALID_FIELD; /* * Only state changes of DOWN, ARM, and ACTIVE are valid @@ -812,8 +816,10 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, lstate = QIB_IB_LINKDOWN; else if (lstate == 3) lstate = QIB_IB_LINKDOWN_DISABLE; - else - goto err; + else { + smp->status |= IB_SMP_INVALID_FIELD; + break; + } spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->lflags &= ~QIBL_LINKV; spin_unlock_irqrestore(&ppd->lflags_lock, flags); @@ -835,8 +841,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, qib_set_linkstate(ppd, QIB_IB_LINKACTIVE); break; default: - /* XXX We have already partially updated our state! */ - goto err; + smp->status |= IB_SMP_INVALID_FIELD; } ret = subn_get_portinfo(smp, ibdev, port); diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c index 5f95f0f6385d..08944e2ee334 100644 --- a/drivers/infiniband/hw/qib/qib_mr.c +++ b/drivers/infiniband/hw/qib/qib_mr.c @@ -39,7 +39,6 @@ /* Fast memory region */ struct qib_fmr { struct ib_fmr ibfmr; - u8 page_shift; struct qib_mregion mr; /* must be last */ }; @@ -107,6 +106,7 @@ static struct qib_mr *alloc_mr(int count, struct qib_lkey_table *lk_table) goto bail; } mr->mr.mapsz = m; + mr->mr.page_shift = 0; mr->mr.max_segs = count; /* @@ -231,6 +231,8 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, mr->mr.access_flags = mr_access_flags; mr->umem = umem; + if (is_power_of_2(umem->page_size)) + mr->mr.page_shift = ilog2(umem->page_size); m = 0; n = 0; list_for_each_entry(chunk, &umem->chunk_list, list) { @@ -390,7 +392,7 @@ struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags, fmr->mr.offset = 0; fmr->mr.access_flags = mr_access_flags; fmr->mr.max_segs = fmr_attr->max_pages; - fmr->page_shift = fmr_attr->page_shift; + fmr->mr.page_shift = fmr_attr->page_shift; atomic_set(&fmr->mr.refcount, 0); ret = &fmr->ibfmr; @@ -437,7 +439,7 @@ int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, spin_lock_irqsave(&rkt->lock, flags); fmr->mr.user_base = iova; fmr->mr.iova = iova; - ps = 1 << fmr->page_shift; + ps = 1 << fmr->mr.page_shift; fmr->mr.length = list_len * ps; m = 0; n = 0; diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 6c39851d2ded..e16751f8639e 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c @@ -48,13 +48,12 @@ static inline unsigned mk_qpn(struct qib_qpn_table *qpt, static inline unsigned find_next_offset(struct qib_qpn_table *qpt, struct qpn_map *map, unsigned off, - unsigned r) + unsigned n) { if (qpt->mask) { off++; - if ((off & qpt->mask) >> 1 != r) - off = ((off & qpt->mask) ? - (off | qpt->mask) + 1 : off) | (r << 1); + if (((off & qpt->mask) >> 1) >= n) + off = (off | qpt->mask) + 2; } else off = find_next_zero_bit(map->page, BITS_PER_PAGE, off); return off; @@ -123,7 +122,6 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt, u32 i, offset, max_scan, qpn; struct qpn_map *map; u32 ret; - int r; if (type == IB_QPT_SMI || type == IB_QPT_GSI) { unsigned n; @@ -139,15 +137,11 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt, goto bail; } - r = smp_processor_id(); - if (r >= dd->n_krcv_queues) - r %= dd->n_krcv_queues; - qpn = qpt->last + 1; + qpn = qpt->last + 2; if (qpn >= QPN_MAX) qpn = 2; - if (qpt->mask && ((qpn & qpt->mask) >> 1) != r) - qpn = ((qpn & qpt->mask) ? (qpn | qpt->mask) + 1 : qpn) | - (r << 1); + if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues) + qpn = (qpn | qpt->mask) + 2; offset = qpn & BITS_PER_PAGE_MASK; map = &qpt->map[qpn / BITS_PER_PAGE]; max_scan = qpt->nmaps - !offset; @@ -163,7 +157,8 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt, ret = qpn; goto bail; } - offset = find_next_offset(qpt, map, offset, r); + offset = find_next_offset(qpt, map, offset, + dd->n_krcv_queues); qpn = mk_qpn(qpt, map, offset); /* * This test differs from alloc_pidmap(). @@ -183,13 +178,13 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt, if (qpt->nmaps == QPNMAP_ENTRIES) break; map = &qpt->map[qpt->nmaps++]; - offset = qpt->mask ? (r << 1) : 0; + offset = 0; } else if (map < &qpt->map[qpt->nmaps]) { ++map; - offset = qpt->mask ? (r << 1) : 0; + offset = 0; } else { map = &qpt->map[0]; - offset = qpt->mask ? (r << 1) : 2; + offset = 2; } qpn = mk_qpn(qpt, map, offset); } @@ -468,6 +463,10 @@ int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err) qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); del_timer(&qp->s_timer); } + + if (qp->s_flags & QIB_S_ANY_WAIT_SEND) + qp->s_flags &= ~QIB_S_ANY_WAIT_SEND; + spin_lock(&dev->pending_lock); if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) { qp->s_flags &= ~QIB_S_ANY_WAIT_IO; @@ -1061,7 +1060,6 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd, } qp->ibqp.qp_num = err; qp->port_num = init_attr->port_num; - qp->processor_id = smp_processor_id(); qib_reset_qp(qp, init_attr->qp_type); break; diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c index 35b3604b691d..3374a52232c1 100644 --- a/drivers/infiniband/hw/qib/qib_qsfp.c +++ b/drivers/infiniband/hw/qib/qib_qsfp.c @@ -485,7 +485,7 @@ void qib_qsfp_init(struct qib_qsfp_data *qd, goto bail; /* We see a module, but it may be unwise to look yet. Just schedule */ qd->t_insert = get_jiffies_64(); - schedule_work(&qd->work); + queue_work(ib_wq, &qd->work); bail: return; } @@ -493,10 +493,9 @@ bail: void qib_qsfp_deinit(struct qib_qsfp_data *qd) { /* - * There is nothing to do here for now. our - * work is scheduled with schedule_work(), and - * flush_scheduled_work() from remove_one will - * block until all work ssetup with schedule_work() + * There is nothing to do here for now. our work is scheduled + * with queue_work(), and flush_workqueue() from remove_one + * will block until all work setup with queue_work() * completes. */ } diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index 955fb7157793..8245237b67ce 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c @@ -1407,6 +1407,7 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, struct qib_ctxtdata *rcd) { struct qib_swqe *wqe; + struct qib_pportdata *ppd = ppd_from_ibp(ibp); enum ib_wc_status status; unsigned long flags; int diff; @@ -1414,6 +1415,29 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, u32 aeth; u64 val; + if (opcode != OP(RDMA_READ_RESPONSE_MIDDLE)) { + /* + * If ACK'd PSN on SDMA busy list try to make progress to + * reclaim SDMA credits. + */ + if ((qib_cmp24(psn, qp->s_sending_psn) >= 0) && + (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) { + + /* + * If send tasklet not running attempt to progress + * SDMA queue. + */ + if (!(qp->s_flags & QIB_S_BUSY)) { + /* Acquire SDMA Lock */ + spin_lock_irqsave(&ppd->sdma_lock, flags); + /* Invoke sdma make progress */ + qib_sdma_make_progress(ppd); + /* Release SDMA Lock */ + spin_unlock_irqrestore(&ppd->sdma_lock, flags); + } + } + } + spin_lock_irqsave(&qp->s_lock, flags); /* Ignore invalid responses. */ diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c index e1b3da2a1f85..4a51fd1e9cb7 100644 --- a/drivers/infiniband/hw/qib/qib_ud.c +++ b/drivers/infiniband/hw/qib/qib_ud.c @@ -445,13 +445,14 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, qkey = be32_to_cpu(ohdr->u.ud.deth[0]); src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK; - /* Get the number of bytes the message was padded by. */ + /* + * Get the number of bytes the message was padded by + * and drop incomplete packets. + */ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; - if (unlikely(tlen < (hdrsize + pad + 4))) { - /* Drop incomplete packets. */ - ibp->n_pkt_drops++; - goto bail; - } + if (unlikely(tlen < (hdrsize + pad + 4))) + goto drop; + tlen -= hdrsize + pad + 4; /* @@ -460,10 +461,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, */ if (qp->ibqp.qp_num) { if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE || - hdr->lrh[3] == IB_LID_PERMISSIVE)) { - ibp->n_pkt_drops++; - goto bail; - } + hdr->lrh[3] == IB_LID_PERMISSIVE)) + goto drop; if (qp->ibqp.qp_num > 1) { u16 pkey1, pkey2; @@ -476,7 +475,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, 0xF, src_qp, qp->ibqp.qp_num, hdr->lrh[3], hdr->lrh[1]); - goto bail; + return; } } if (unlikely(qkey != qp->qkey)) { @@ -484,30 +483,24 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF, src_qp, qp->ibqp.qp_num, hdr->lrh[3], hdr->lrh[1]); - goto bail; + return; } /* Drop invalid MAD packets (see 13.5.3.1). */ if (unlikely(qp->ibqp.qp_num == 1 && (tlen != 256 || - (be16_to_cpu(hdr->lrh[0]) >> 12) == 15))) { - ibp->n_pkt_drops++; - goto bail; - } + (be16_to_cpu(hdr->lrh[0]) >> 12) == 15))) + goto drop; } else { struct ib_smp *smp; /* Drop invalid MAD packets (see 13.5.3.1). */ - if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15) { - ibp->n_pkt_drops++; - goto bail; - } + if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15) + goto drop; smp = (struct ib_smp *) data; if ((hdr->lrh[1] == IB_LID_PERMISSIVE || hdr->lrh[3] == IB_LID_PERMISSIVE) && - smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { - ibp->n_pkt_drops++; - goto bail; - } + smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) + goto drop; } /* @@ -519,14 +512,12 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { wc.ex.imm_data = ohdr->u.ud.imm_data; wc.wc_flags = IB_WC_WITH_IMM; - hdrsize += sizeof(u32); + tlen -= sizeof(u32); } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { wc.ex.imm_data = 0; wc.wc_flags = 0; - } else { - ibp->n_pkt_drops++; - goto bail; - } + } else + goto drop; /* * A GRH is expected to preceed the data even if not @@ -556,8 +547,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, /* Silently drop packets which are too big. */ if (unlikely(wc.byte_len > qp->r_len)) { qp->r_flags |= QIB_R_REUSE_SGE; - ibp->n_pkt_drops++; - return; + goto drop; } if (has_grh) { qib_copy_sge(&qp->r_sge, &hdr->u.l.grh, @@ -594,5 +584,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, (ohdr->bth[0] & cpu_to_be32(IB_BTH_SOLICITED)) != 0); -bail:; + return; + +drop: + ibp->n_pkt_drops++; } diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c index 4c19e06b5e85..66208bcd7c13 100644 --- a/drivers/infiniband/hw/qib/qib_user_sdma.c +++ b/drivers/infiniband/hw/qib/qib_user_sdma.c @@ -382,6 +382,7 @@ static void qib_user_sdma_free_pkt_list(struct device *dev, kmem_cache_free(pq->pkt_slab, pkt); } + INIT_LIST_HEAD(list); } /* diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index bd57c1273225..95e5b47223b3 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h @@ -301,6 +301,7 @@ struct qib_mregion { int access_flags; u32 max_segs; /* number of qib_segs in all the arrays */ u32 mapsz; /* size of the map array */ + u8 page_shift; /* 0 - non unform/non powerof2 sizes */ atomic_t refcount; struct qib_segarray *map[0]; /* the segments */ }; @@ -435,7 +436,6 @@ struct qib_qp { spinlock_t r_lock; /* used for APM */ spinlock_t s_lock; atomic_t s_dma_busy; - unsigned processor_id; /* Processor ID QP is bound to */ u32 s_flags; u32 s_cur_size; /* size of send packet in bytes */ u32 s_len; /* total length of s_sge */ @@ -805,7 +805,6 @@ static inline int qib_send_ok(struct qib_qp *qp) !(qp->s_flags & QIB_S_ANY_WAIT_SEND)); } -extern struct workqueue_struct *qib_wq; extern struct workqueue_struct *qib_cq_wq; /* @@ -813,13 +812,8 @@ extern struct workqueue_struct *qib_cq_wq; */ static inline void qib_schedule_send(struct qib_qp *qp) { - if (qib_send_ok(qp)) { - if (qp->processor_id == smp_processor_id()) - queue_work(qib_wq, &qp->s_work); - else - queue_work_on(qp->processor_id, - qib_wq, &qp->s_work); - } + if (qib_send_ok(qp)) + queue_work(ib_wq, &qp->s_work); } static inline int qib_pkey_ok(u16 pkey1, u16 pkey2) |