diff options
Diffstat (limited to 'drivers/infiniband/hw/qib/qib_keys.c')
-rw-r--r-- | drivers/infiniband/hw/qib/qib_keys.c | 186 |
1 files changed, 16 insertions, 170 deletions
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c index d725c565518d..2c3c93572c17 100644 --- a/drivers/infiniband/hw/qib/qib_keys.c +++ b/drivers/infiniband/hw/qib/qib_keys.c @@ -46,20 +46,20 @@ * */ -int qib_alloc_lkey(struct qib_mregion *mr, int dma_region) +int qib_alloc_lkey(struct rvt_mregion *mr, int dma_region) { unsigned long flags; u32 r; u32 n; int ret = 0; struct qib_ibdev *dev = to_idev(mr->pd->device); - struct qib_lkey_table *rkt = &dev->lk_table; + struct rvt_lkey_table *rkt = &dev->lk_table; spin_lock_irqsave(&rkt->lock, flags); /* special case for dma_mr lkey == 0 */ if (dma_region) { - struct qib_mregion *tmr; + struct rvt_mregion *tmr; tmr = rcu_access_pointer(dev->dma_mr); if (!tmr) { @@ -90,8 +90,8 @@ int qib_alloc_lkey(struct qib_mregion *mr, int dma_region) * bits are capped in qib_verbs.c to insure enough bits * for generation number */ - mr->lkey = (r << (32 - ib_qib_lkey_table_size)) | - ((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen) + mr->lkey = (r << (32 - ib_rvt_lkey_table_size)) | + ((((1 << (24 - ib_rvt_lkey_table_size)) - 1) & rkt->gen) << 8); if (mr->lkey == 0) { mr->lkey |= 1 << 8; @@ -114,13 +114,13 @@ bail: * qib_free_lkey - free an lkey * @mr: mr to free from tables */ -void qib_free_lkey(struct qib_mregion *mr) +void qib_free_lkey(struct rvt_mregion *mr) { unsigned long flags; u32 lkey = mr->lkey; u32 r; struct qib_ibdev *dev = to_idev(mr->pd->device); - struct qib_lkey_table *rkt = &dev->lk_table; + struct rvt_lkey_table *rkt = &dev->lk_table; spin_lock_irqsave(&rkt->lock, flags); if (!mr->lkey_published) @@ -128,7 +128,7 @@ void qib_free_lkey(struct qib_mregion *mr) if (lkey == 0) RCU_INIT_POINTER(dev->dma_mr, NULL); else { - r = lkey >> (32 - ib_qib_lkey_table_size); + r = lkey >> (32 - ib_rvt_lkey_table_size); RCU_INIT_POINTER(rkt->table[r], NULL); } qib_put_mr(mr); @@ -138,105 +138,6 @@ out: } /** - * qib_lkey_ok - check IB SGE for validity and initialize - * @rkt: table containing lkey to check SGE against - * @pd: protection domain - * @isge: outgoing internal SGE - * @sge: SGE to check - * @acc: access flags - * - * Return 1 if valid and successful, otherwise returns 0. - * - * increments the reference count upon success - * - * Check the IB SGE for validity and initialize our internal version - * of it. - */ -int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, - struct qib_sge *isge, struct ib_sge *sge, int acc) -{ - struct qib_mregion *mr; - unsigned n, m; - size_t off; - - /* - * We use LKEY == zero for kernel virtual addresses - * (see qib_get_dma_mr and qib_dma.c). - */ - rcu_read_lock(); - if (sge->lkey == 0) { - struct qib_ibdev *dev = to_idev(pd->ibpd.device); - - if (pd->user) - goto bail; - mr = rcu_dereference(dev->dma_mr); - if (!mr) - goto bail; - if (unlikely(!atomic_inc_not_zero(&mr->refcount))) - goto bail; - rcu_read_unlock(); - - isge->mr = mr; - isge->vaddr = (void *) sge->addr; - isge->length = sge->length; - isge->sge_length = sge->length; - isge->m = 0; - isge->n = 0; - goto ok; - } - mr = rcu_dereference( - rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))]); - if (unlikely(!mr || mr->lkey != sge->lkey || mr->pd != &pd->ibpd)) - goto bail; - - off = sge->addr - mr->user_base; - if (unlikely(sge->addr < mr->user_base || - off + sge->length > mr->length || - (mr->access_flags & acc) != acc)) - goto bail; - if (unlikely(!atomic_inc_not_zero(&mr->refcount))) - goto bail; - rcu_read_unlock(); - - off += mr->offset; - if (mr->page_shift) { - /* - page sizes are uniform power of 2 so no loop is necessary - entries_spanned_by_off is the number of times the loop below - would have executed. - */ - size_t entries_spanned_by_off; - - entries_spanned_by_off = off >> mr->page_shift; - off -= (entries_spanned_by_off << mr->page_shift); - m = entries_spanned_by_off/QIB_SEGSZ; - n = entries_spanned_by_off%QIB_SEGSZ; - } else { - m = 0; - n = 0; - while (off >= mr->map[m]->segs[n].length) { - off -= mr->map[m]->segs[n].length; - n++; - if (n >= QIB_SEGSZ) { - m++; - n = 0; - } - } - } - isge->mr = mr; - isge->vaddr = mr->map[m]->segs[n].vaddr + off; - isge->length = mr->map[m]->segs[n].length - off; - isge->sge_length = sge->length; - isge->m = m; - isge->n = n; -ok: - return 1; -bail: - rcu_read_unlock(); - return 0; -} - -/** * qib_rkey_ok - check the IB virtual address, length, and RKEY * @qp: qp for validation * @sge: SGE state @@ -249,11 +150,11 @@ bail: * * increments the reference count upon success */ -int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, +int qib_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge, u32 len, u64 vaddr, u32 rkey, int acc) { - struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; - struct qib_mregion *mr; + struct rvt_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; + struct rvt_mregion *mr; unsigned n, m; size_t off; @@ -263,7 +164,7 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, */ rcu_read_lock(); if (rkey == 0) { - struct qib_pd *pd = to_ipd(qp->ibqp.pd); + struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd); struct qib_ibdev *dev = to_idev(pd->ibpd.device); if (pd->user) @@ -285,7 +186,7 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, } mr = rcu_dereference( - rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))]); + rkt->table[(rkey >> (32 - ib_rvt_lkey_table_size))]); if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd)) goto bail; @@ -308,15 +209,15 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, entries_spanned_by_off = off >> mr->page_shift; off -= (entries_spanned_by_off << mr->page_shift); - m = entries_spanned_by_off/QIB_SEGSZ; - n = entries_spanned_by_off%QIB_SEGSZ; + m = entries_spanned_by_off / RVT_SEGSZ; + n = entries_spanned_by_off % RVT_SEGSZ; } else { m = 0; n = 0; while (off >= mr->map[m]->segs[n].length) { off -= mr->map[m]->segs[n].length; n++; - if (n >= QIB_SEGSZ) { + if (n >= RVT_SEGSZ) { m++; n = 0; } @@ -335,58 +236,3 @@ bail: return 0; } -/* - * Initialize the memory region specified by the work request. - */ -int qib_reg_mr(struct qib_qp *qp, struct ib_reg_wr *wr) -{ - struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; - struct qib_pd *pd = to_ipd(qp->ibqp.pd); - struct qib_mr *mr = to_imr(wr->mr); - struct qib_mregion *mrg; - u32 key = wr->key; - unsigned i, n, m; - int ret = -EINVAL; - unsigned long flags; - u64 *page_list; - size_t ps; - - spin_lock_irqsave(&rkt->lock, flags); - if (pd->user || key == 0) - goto bail; - - mrg = rcu_dereference_protected( - rkt->table[(key >> (32 - ib_qib_lkey_table_size))], - lockdep_is_held(&rkt->lock)); - if (unlikely(mrg == NULL || qp->ibqp.pd != mrg->pd)) - goto bail; - - if (mr->npages > mrg->max_segs) - goto bail; - - ps = mr->ibmr.page_size; - if (mr->ibmr.length > ps * mr->npages) - goto bail; - - mrg->user_base = mr->ibmr.iova; - mrg->iova = mr->ibmr.iova; - mrg->lkey = key; - mrg->length = mr->ibmr.length; - mrg->access_flags = wr->access; - page_list = mr->pages; - m = 0; - n = 0; - for (i = 0; i < mr->npages; i++) { - mrg->map[m]->segs[n].vaddr = (void *) page_list[i]; - mrg->map[m]->segs[n].length = ps; - if (++n == QIB_SEGSZ) { - m++; - n = 0; - } - } - - ret = 0; -bail: - spin_unlock_irqrestore(&rkt->lock, flags); - return ret; -} |