diff options
Diffstat (limited to 'drivers/scsi/libfc/fc_exch.c')
-rw-r--r-- | drivers/scsi/libfc/fc_exch.c | 251 |
1 files changed, 161 insertions, 90 deletions
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 587992952b3c..1b3a09473452 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -27,6 +27,7 @@ #include <linux/slab.h> #include <linux/err.h> #include <linux/export.h> +#include <linux/log2.h> #include <scsi/fc/fc_fc2.h> @@ -303,10 +304,7 @@ static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp, fr_eof(fp) = FC_EOF_N; } - /* - * Initialize remainig fh fields - * from fc_fill_fc_hdr - */ + /* Initialize remaining fh fields from fc_fill_fc_hdr */ fh->fh_ox_id = htons(ep->oxid); fh->fh_rx_id = htons(ep->rxid); fh->fh_seq_id = ep->seq.id; @@ -362,9 +360,10 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep, FC_EXCH_DBG(ep, "Exchange timer armed : %d msecs\n", timer_msec); - if (queue_delayed_work(fc_exch_workqueue, &ep->timeout_work, - msecs_to_jiffies(timer_msec))) - fc_exch_hold(ep); /* hold for timer */ + fc_exch_hold(ep); /* hold for timer */ + if (!queue_delayed_work(fc_exch_workqueue, &ep->timeout_work, + msecs_to_jiffies(timer_msec))) + fc_exch_release(ep); } /** @@ -382,6 +381,8 @@ static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec) /** * fc_exch_done_locked() - Complete an exchange with the exchange lock held * @ep: The exchange that is complete + * + * Note: May sleep if invoked from outside a response handler. */ static int fc_exch_done_locked(struct fc_exch *ep) { @@ -393,7 +394,6 @@ static int fc_exch_done_locked(struct fc_exch *ep) * ep, and in that case we only clear the resp and set it as * complete, so it can be reused by the timer to send the rrq. */ - ep->resp = NULL; if (ep->state & FC_EX_DONE) return rc; ep->esb_stat |= ESB_ST_COMPLETE; @@ -464,15 +464,21 @@ static void fc_exch_delete(struct fc_exch *ep) } static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp, - struct fc_frame *fp) + struct fc_frame *fp) { struct fc_exch *ep; struct fc_frame_header *fh = fc_frame_header_get(fp); - int error; + int error = -ENXIO; u32 f_ctl; u8 fh_type = fh->fh_type; ep = fc_seq_exch(sp); + + if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL)) { + fc_frame_free(fp); + goto out; + } + WARN_ON(!(ep->esb_stat & ESB_ST_SEQ_INIT)); f_ctl = ntoh24(fh->fh_f_ctl); @@ -515,6 +521,9 @@ out: * @lport: The local port that the exchange will be sent on * @sp: The sequence to be sent * @fp: The frame to be sent on the exchange + * + * Note: The frame will be freed either by a direct call to fc_frame_free(fp) + * or indirectly by calling libfc_function_template.frame_send(). */ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, struct fc_frame *fp) @@ -581,6 +590,8 @@ static struct fc_seq *fc_seq_start_next(struct fc_seq *sp) /* * Set the response handler for the exchange associated with a sequence. + * + * Note: May sleep if invoked from outside a response handler. */ static void fc_seq_set_resp(struct fc_seq *sp, void (*resp)(struct fc_seq *, struct fc_frame *, @@ -588,8 +599,18 @@ static void fc_seq_set_resp(struct fc_seq *sp, void *arg) { struct fc_exch *ep = fc_seq_exch(sp); + DEFINE_WAIT(wait); spin_lock_bh(&ep->ex_lock); + while (ep->resp_active && ep->resp_task != current) { + prepare_to_wait(&ep->resp_wq, &wait, TASK_UNINTERRUPTIBLE); + spin_unlock_bh(&ep->ex_lock); + + schedule(); + + spin_lock_bh(&ep->ex_lock); + } + finish_wait(&ep->resp_wq, &wait); ep->resp = resp; ep->arg = arg; spin_unlock_bh(&ep->ex_lock); @@ -622,27 +643,31 @@ static int fc_exch_abort_locked(struct fc_exch *ep, if (!sp) return -ENOMEM; - ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL; if (timer_msec) fc_exch_timer_set_locked(ep, timer_msec); - /* - * If not logged into the fabric, don't send ABTS but leave - * sequence active until next timeout. - */ - if (!ep->sid) - return 0; - - /* - * Send an abort for the sequence that timed out. - */ - fp = fc_frame_alloc(ep->lp, 0); - if (fp) { - fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid, - FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); - error = fc_seq_send_locked(ep->lp, sp, fp); - } else - error = -ENOBUFS; + if (ep->sid) { + /* + * Send an abort for the sequence that timed out. + */ + fp = fc_frame_alloc(ep->lp, 0); + if (fp) { + ep->esb_stat |= ESB_ST_SEQ_INIT; + fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid, + FC_TYPE_BLS, FC_FC_END_SEQ | + FC_FC_SEQ_INIT, 0); + error = fc_seq_send_locked(ep->lp, sp, fp); + } else { + error = -ENOBUFS; + } + } else { + /* + * If not logged into the fabric, don't send ABTS but leave + * sequence active until next timeout. + */ + error = 0; + } + ep->esb_stat |= ESB_ST_ABNORMAL; return error; } @@ -669,6 +694,61 @@ static int fc_seq_exch_abort(const struct fc_seq *req_sp, } /** + * fc_invoke_resp() - invoke ep->resp() + * + * Notes: + * It is assumed that after initialization finished (this means the + * first unlock of ex_lock after fc_exch_alloc()) ep->resp and ep->arg are + * modified only via fc_seq_set_resp(). This guarantees that none of these + * two variables changes if ep->resp_active > 0. + * + * If an fc_seq_set_resp() call is busy modifying ep->resp and ep->arg when + * this function is invoked, the first spin_lock_bh() call in this function + * will wait until fc_seq_set_resp() has finished modifying these variables. + * + * Since fc_exch_done() invokes fc_seq_set_resp() it is guaranteed that that + * ep->resp() won't be invoked after fc_exch_done() has returned. + * + * The response handler itself may invoke fc_exch_done(), which will clear the + * ep->resp pointer. + * + * Return value: + * Returns true if and only if ep->resp has been invoked. + */ +static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp, + struct fc_frame *fp) +{ + void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg); + void *arg; + bool res = false; + + spin_lock_bh(&ep->ex_lock); + ep->resp_active++; + if (ep->resp_task != current) + ep->resp_task = !ep->resp_task ? current : NULL; + resp = ep->resp; + arg = ep->arg; + spin_unlock_bh(&ep->ex_lock); + + if (resp) { + resp(sp, fp, arg); + res = true; + } else if (!IS_ERR(fp)) { + fc_frame_free(fp); + } + + spin_lock_bh(&ep->ex_lock); + if (--ep->resp_active == 0) + ep->resp_task = NULL; + spin_unlock_bh(&ep->ex_lock); + + if (ep->resp_active == 0) + wake_up(&ep->resp_wq); + + return res; +} + +/** * fc_exch_timeout() - Handle exchange timer expiration * @work: The work_struct identifying the exchange that timed out */ @@ -677,8 +757,6 @@ static void fc_exch_timeout(struct work_struct *work) struct fc_exch *ep = container_of(work, struct fc_exch, timeout_work.work); struct fc_seq *sp = &ep->seq; - void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg); - void *arg; u32 e_stat; int rc = 1; @@ -696,16 +774,13 @@ static void fc_exch_timeout(struct work_struct *work) fc_exch_rrq(ep); goto done; } else { - resp = ep->resp; - arg = ep->arg; - ep->resp = NULL; if (e_stat & ESB_ST_ABNORMAL) rc = fc_exch_done_locked(ep); spin_unlock_bh(&ep->ex_lock); if (!rc) fc_exch_delete(ep); - if (resp) - resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg); + fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_TIMEOUT)); + fc_seq_set_resp(sp, NULL, ep->arg); fc_seq_exch_abort(sp, 2 * ep->r_a_tov); goto done; } @@ -792,6 +867,8 @@ hit: ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */ ep->rxid = FC_XID_UNKNOWN; ep->class = mp->class; + ep->resp_active = 0; + init_waitqueue_head(&ep->resp_wq); INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout); out: return ep; @@ -838,8 +915,10 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid) pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask); spin_lock_bh(&pool->lock); ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order); - if (ep && ep->xid == xid) + if (ep) { + WARN_ON(ep->xid != xid); fc_exch_hold(ep); + } spin_unlock_bh(&pool->lock); } return ep; @@ -850,6 +929,8 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid) * fc_exch_done() - Indicate that an exchange/sequence tuple is complete and * the memory allocated for the related objects may be freed. * @sp: The sequence that has completed + * + * Note: May sleep if invoked from outside a response handler. */ static void fc_exch_done(struct fc_seq *sp) { @@ -859,6 +940,8 @@ static void fc_exch_done(struct fc_seq *sp) spin_lock_bh(&ep->ex_lock); rc = fc_exch_done_locked(ep); spin_unlock_bh(&ep->ex_lock); + + fc_seq_set_resp(sp, NULL, ep->arg); if (!rc) fc_exch_delete(ep); } @@ -987,6 +1070,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, } } + spin_lock_bh(&ep->ex_lock); /* * At this point, we have the exchange held. * Find or create the sequence. @@ -1014,11 +1098,11 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, * sending RSP, hence write request on other * end never finishes. */ - spin_lock_bh(&ep->ex_lock); sp->ssb_stat |= SSB_ST_RESP; sp->id = fh->fh_seq_id; - spin_unlock_bh(&ep->ex_lock); } else { + spin_unlock_bh(&ep->ex_lock); + /* sequence/exch should exist */ reject = FC_RJT_SEQ_ID; goto rel; @@ -1029,6 +1113,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, if (f_ctl & FC_FC_SEQ_INIT) ep->esb_stat |= ESB_ST_SEQ_INIT; + spin_unlock_bh(&ep->ex_lock); fr_seq(fp) = sp; out: @@ -1291,21 +1376,23 @@ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp) if (!ep) goto reject; + + fp = fc_frame_alloc(ep->lp, sizeof(*ap)); + if (!fp) + goto free; + spin_lock_bh(&ep->ex_lock); if (ep->esb_stat & ESB_ST_COMPLETE) { spin_unlock_bh(&ep->ex_lock); + + fc_frame_free(fp); goto reject; } - if (!(ep->esb_stat & ESB_ST_REC_QUAL)) + if (!(ep->esb_stat & ESB_ST_REC_QUAL)) { + ep->esb_stat |= ESB_ST_REC_QUAL; fc_exch_hold(ep); /* hold for REC_QUAL */ - ep->esb_stat |= ESB_ST_ABNORMAL | ESB_ST_REC_QUAL; - fc_exch_timer_set_locked(ep, ep->r_a_tov); - - fp = fc_frame_alloc(ep->lp, sizeof(*ap)); - if (!fp) { - spin_unlock_bh(&ep->ex_lock); - goto free; } + fc_exch_timer_set_locked(ep, ep->r_a_tov); fh = fc_frame_header_get(fp); ap = fc_frame_payload_get(fp, sizeof(*ap)); memset(ap, 0, sizeof(*ap)); @@ -1319,14 +1406,16 @@ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp) } sp = fc_seq_start_next_locked(sp); fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS); + ep->esb_stat |= ESB_ST_ABNORMAL; spin_unlock_bh(&ep->ex_lock); + +free: fc_frame_free(rx_fp); return; reject: fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID); -free: - fc_frame_free(rx_fp); + goto free; } /** @@ -1416,9 +1505,7 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp, * If new exch resp handler is valid then call that * first. */ - if (ep->resp) - ep->resp(sp, fp, ep->arg); - else + if (!fc_invoke_resp(ep, sp, fp)) lport->tt.lport_recv(lport, fp); fc_exch_release(ep); /* release from lookup */ } else { @@ -1442,8 +1529,6 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) struct fc_exch *ep; enum fc_sof sof; u32 f_ctl; - void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg); - void *ex_resp_arg; int rc; ep = fc_exch_find(mp, ntohs(fh->fh_ox_id)); @@ -1478,19 +1563,19 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) f_ctl = ntoh24(fh->fh_f_ctl); fr_seq(fp) = sp; + + spin_lock_bh(&ep->ex_lock); if (f_ctl & FC_FC_SEQ_INIT) ep->esb_stat |= ESB_ST_SEQ_INIT; + spin_unlock_bh(&ep->ex_lock); if (fc_sof_needs_ack(sof)) fc_seq_send_ack(sp, fp); - resp = ep->resp; - ex_resp_arg = ep->arg; if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T && (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) == (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) { spin_lock_bh(&ep->ex_lock); - resp = ep->resp; rc = fc_exch_done_locked(ep); WARN_ON(fc_seq_exch(sp) != ep); spin_unlock_bh(&ep->ex_lock); @@ -1511,10 +1596,8 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) * If new exch resp handler is valid then call that * first. */ - if (resp) - resp(sp, fp, ex_resp_arg); - else - fc_frame_free(fp); + fc_invoke_resp(ep, sp, fp); + fc_exch_release(ep); return; rel: @@ -1553,8 +1636,6 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) */ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) { - void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg); - void *ex_resp_arg; struct fc_frame_header *fh; struct fc_ba_acc *ap; struct fc_seq *sp; @@ -1599,9 +1680,6 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) break; } - resp = ep->resp; - ex_resp_arg = ep->arg; - /* do we need to do some other checks here. Can we reuse more of * fc_exch_recv_seq_resp */ @@ -1613,17 +1691,14 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ) rc = fc_exch_done_locked(ep); spin_unlock_bh(&ep->ex_lock); + + fc_exch_hold(ep); if (!rc) fc_exch_delete(ep); - - if (resp) - resp(sp, fp, ex_resp_arg); - else - fc_frame_free(fp); - + fc_invoke_resp(ep, sp, fp); if (has_rec) fc_exch_timer_set(ep, ep->r_a_tov); - + fc_exch_release(ep); } /** @@ -1662,7 +1737,7 @@ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp) break; default: if (ep) - FC_EXCH_DBG(ep, "BLS rctl %x - %s received", + FC_EXCH_DBG(ep, "BLS rctl %x - %s received\n", fh->fh_r_ctl, fc_exch_rctl_name(fh->fh_r_ctl)); break; @@ -1745,32 +1820,33 @@ static void fc_seq_ls_rjt(struct fc_frame *rx_fp, enum fc_els_rjt_reason reason, /** * fc_exch_reset() - Reset an exchange * @ep: The exchange to be reset + * + * Note: May sleep if invoked from outside a response handler. */ static void fc_exch_reset(struct fc_exch *ep) { struct fc_seq *sp; - void (*resp)(struct fc_seq *, struct fc_frame *, void *); - void *arg; int rc = 1; spin_lock_bh(&ep->ex_lock); fc_exch_abort_locked(ep, 0); ep->state |= FC_EX_RST_CLEANUP; fc_exch_timer_cancel(ep); - resp = ep->resp; - ep->resp = NULL; if (ep->esb_stat & ESB_ST_REC_QUAL) atomic_dec(&ep->ex_refcnt); /* drop hold for rec_qual */ ep->esb_stat &= ~ESB_ST_REC_QUAL; - arg = ep->arg; sp = &ep->seq; rc = fc_exch_done_locked(ep); spin_unlock_bh(&ep->ex_lock); + + fc_exch_hold(ep); + if (!rc) fc_exch_delete(ep); - if (resp) - resp(sp, ERR_PTR(-FC_EX_CLOSED), arg); + fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED)); + fc_seq_set_resp(sp, NULL, ep->arg); + fc_exch_release(ep); } /** @@ -1956,13 +2032,13 @@ static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg) switch (op) { case ELS_LS_RJT: - FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ"); + FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ\n"); /* fall through */ case ELS_LS_ACC: goto cleanup; default: - FC_EXCH_DBG(aborted_ep, "unexpected response op %x " - "for RRQ", op); + FC_EXCH_DBG(aborted_ep, "unexpected response op %x for RRQ\n", + op); return; } @@ -2533,13 +2609,8 @@ int fc_setup_exch_mgr(void) * cpu on which exchange originated by simple bitwise * AND operation between fc_cpu_mask and exchange id. */ - fc_cpu_mask = 1; - fc_cpu_order = 0; - while (fc_cpu_mask < nr_cpu_ids) { - fc_cpu_mask <<= 1; - fc_cpu_order++; - } - fc_cpu_mask--; + fc_cpu_order = ilog2(roundup_pow_of_two(nr_cpu_ids)); + fc_cpu_mask = (1 << fc_cpu_order) - 1; fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue"); if (!fc_exch_workqueue) |