diff options
author | Ursula Braun <ursula.braun@de.ibm.com> | 2016-08-05 13:33:10 +0300 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2016-08-08 17:01:28 +0300 |
commit | 9bce8b2cbebf9f11b41021ccb98c6b18d1796edd (patch) | |
tree | e688298aae0aa41d08fe908b9a9c86c7948e1dda | |
parent | 6e30c549f6cab7a41d0934cea80822a2211132ae (diff) | |
download | linux-9bce8b2cbebf9f11b41021ccb98c6b18d1796edd.tar.xz |
s390/qdio: avoid reschedule of outbound tasklet once killed
During qdio_shutdown the queue tasklets are killed for all inbound and
outbound queues. The queue structures might be freed after
qdio_shutdown.
Thus it must be guaranteed that these queue tasklets are not rescheduled
after that. In addition the outbound queue timers are deleted and it
must
be guaranteed that these timers are not restarted after qdio_shutdown
processing. Timer deletion should make use of del_timer_sync() to make
sure qdio_outbound_timer() is finished on other CPUs as well. Queue
tasklets should be scheduled in state QDIO_IRQ_STATE_ACTIVE only.
Signed-off-by: Ursula Braun <ubraun@linux.vnet.ibm.com>
Reviewed-by: Benjamin Block <bblock@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r-- | drivers/s390/cio/qdio_main.c | 49 |
1 files changed, 25 insertions, 24 deletions
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index fda3b2617ec9..71bf9bded485 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -686,6 +686,15 @@ static void qdio_kick_handler(struct qdio_q *q) q->qdio_error = 0; } +static inline int qdio_tasklet_schedule(struct qdio_q *q) +{ + if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) { + tasklet_schedule(&q->tasklet); + return 0; + } + return -EPERM; +} + static void __qdio_inbound_processing(struct qdio_q *q) { qperf_inc(q, tasklet_inbound); @@ -698,10 +707,8 @@ static void __qdio_inbound_processing(struct qdio_q *q) if (!qdio_inbound_q_done(q)) { /* means poll time is not yet over */ qperf_inc(q, tasklet_inbound_resched); - if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { - tasklet_schedule(&q->tasklet); + if (!qdio_tasklet_schedule(q)) return; - } } qdio_stop_polling(q); @@ -711,8 +718,7 @@ static void __qdio_inbound_processing(struct qdio_q *q) */ if (!qdio_inbound_q_done(q)) { qperf_inc(q, tasklet_inbound_resched2); - if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) - tasklet_schedule(&q->tasklet); + qdio_tasklet_schedule(q); } } @@ -869,16 +875,15 @@ static void __qdio_outbound_processing(struct qdio_q *q) * is noticed and outbound_handler is called after some time. */ if (qdio_outbound_q_done(q)) - del_timer(&q->u.out.timer); + del_timer_sync(&q->u.out.timer); else - if (!timer_pending(&q->u.out.timer)) + if (!timer_pending(&q->u.out.timer) && + likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) mod_timer(&q->u.out.timer, jiffies + 10 * HZ); return; sched: - if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) - return; - tasklet_schedule(&q->tasklet); + qdio_tasklet_schedule(q); } /* outbound tasklet */ @@ -892,9 +897,7 @@ void qdio_outbound_timer(unsigned long data) { struct qdio_q *q = (struct qdio_q *)data; - if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) - return; - tasklet_schedule(&q->tasklet); + qdio_tasklet_schedule(q); } static inline void qdio_check_outbound_after_thinint(struct qdio_q *q) @@ -907,7 +910,7 @@ static inline void qdio_check_outbound_after_thinint(struct qdio_q *q) for_each_output_queue(q->irq_ptr, out, i) if (!qdio_outbound_q_done(out)) - tasklet_schedule(&out->tasklet); + qdio_tasklet_schedule(out); } static void __tiqdio_inbound_processing(struct qdio_q *q) @@ -929,10 +932,8 @@ static void __tiqdio_inbound_processing(struct qdio_q *q) if (!qdio_inbound_q_done(q)) { qperf_inc(q, tasklet_inbound_resched); - if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { - tasklet_schedule(&q->tasklet); + if (!qdio_tasklet_schedule(q)) return; - } } qdio_stop_polling(q); @@ -942,8 +943,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q) */ if (!qdio_inbound_q_done(q)) { qperf_inc(q, tasklet_inbound_resched2); - if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) - tasklet_schedule(&q->tasklet); + qdio_tasklet_schedule(q); } } @@ -977,7 +977,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) int i; struct qdio_q *q; - if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) + if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) return; for_each_input_queue(irq_ptr, q, i) { @@ -1003,7 +1003,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) continue; if (need_siga_sync(q) && need_siga_sync_out_after_pci(q)) qdio_siga_sync_q(q); - tasklet_schedule(&q->tasklet); + qdio_tasklet_schedule(q); } } @@ -1145,7 +1145,7 @@ static void qdio_shutdown_queues(struct ccw_device *cdev) tasklet_kill(&q->tasklet); for_each_output_queue(irq_ptr, q, i) { - del_timer(&q->u.out.timer); + del_timer_sync(&q->u.out.timer); tasklet_kill(&q->tasklet); } } @@ -1585,10 +1585,11 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags, /* in case of SIGA errors we must process the error immediately */ if (used >= q->u.out.scan_threshold || rc) - tasklet_schedule(&q->tasklet); + qdio_tasklet_schedule(q); else /* free the SBALs in case of no further traffic */ - if (!timer_pending(&q->u.out.timer)) + if (!timer_pending(&q->u.out.timer) && + likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) mod_timer(&q->u.out.timer, jiffies + HZ); return rc; } |