summaryrefslogtreecommitdiff
path: root/drivers/s390/cio/qdio_main.c
diff options
context:
space:
mode:
authorJulian Wiedmann <jwi@linux.ibm.com>2021-01-30 16:56:20 +0300
committerHeiko Carstens <hca@linux.ibm.com>2021-03-22 13:36:05 +0300
commit396c100472dd63bb1a5389d9dfb25a94943c41c9 (patch)
treefa6cce781434347752192a274e80e899806e98c4 /drivers/s390/cio/qdio_main.c
parent95b3a8b4014d82e79dc3ad03a1f8d6ee5f56b29d (diff)
downloadlinux-396c100472dd63bb1a5389d9dfb25a94943c41c9.tar.xz
s390/qdio: let driver manage the QAOB
We are spending way too much effort on qdio-internal bookkeeping for QAOB management & caching, and it's still not robust. Once qdio's TX path has detached the QAOB from a PENDING buffer, we lost all track of it until it shows up in a CQ notification again. So if the device is torn down before that notification arrives, we leak the QAOB. Just have the driver take care of it, and simply pass down a QAOB if they want a TX with async-completion capability. For a buffer in PENDING state that requires the QAOB for final completion, qeth can now also try to recycle the buffer's QAOB rather than unconditionally freeing it. This also eliminates the qdio_outbuf_state array, which was only needed to transfer the aob->user1 tag from the driver to the qdio layer. Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com> Acked-by: Benjamin Block <bblock@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Diffstat (limited to 'drivers/s390/cio/qdio_main.c')
-rw-r--r--drivers/s390/cio/qdio_main.c63
1 files changed, 8 insertions, 55 deletions
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 03a011619908..307ce7ff5ca4 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -517,24 +517,6 @@ static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
return 1;
}
-static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
- int bufnr)
-{
- unsigned long phys_aob = 0;
-
- if (!q->aobs[bufnr]) {
- struct qaob *aob = qdio_allocate_aob();
- q->aobs[bufnr] = aob;
- }
- if (q->aobs[bufnr]) {
- q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
- phys_aob = virt_to_phys(q->aobs[bufnr]);
- WARN_ON_ONCE(phys_aob & 0xFF);
- }
-
- return phys_aob;
-}
-
static inline int qdio_tasklet_schedule(struct qdio_q *q)
{
if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) {
@@ -548,7 +530,6 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
unsigned int *error)
{
unsigned char state = 0;
- unsigned int i;
int count;
q->timestamp = get_tod_clock_fast();
@@ -570,10 +551,6 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
switch (state) {
case SLSB_P_OUTPUT_PENDING:
- /* detach the utilized QAOBs: */
- for (i = 0; i < count; i++)
- q->u.out.aobs[QDIO_BUFNR(start + i)] = NULL;
-
*error = QDIO_ERROR_SLSB_PENDING;
fallthrough;
case SLSB_P_OUTPUT_EMPTY:
@@ -999,7 +976,6 @@ int qdio_free(struct ccw_device *cdev)
cdev->private->qdio_data = NULL;
mutex_unlock(&irq_ptr->setup_mutex);
- qdio_free_async_data(irq_ptr);
qdio_free_queues(irq_ptr);
free_page((unsigned long) irq_ptr->qdr);
free_page(irq_ptr->chsc_page);
@@ -1075,28 +1051,6 @@ err_dbf:
}
EXPORT_SYMBOL_GPL(qdio_allocate);
-static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
-{
- struct qdio_q *q = irq_ptr->input_qs[0];
- int i, use_cq = 0;
-
- if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
- use_cq = 1;
-
- for_each_output_queue(irq_ptr, q, i) {
- if (use_cq) {
- if (multicast_outbound(q))
- continue;
- if (qdio_enable_async_operation(&q->u.out) < 0) {
- use_cq = 0;
- continue;
- }
- } else
- qdio_disable_async_operation(&q->u.out);
- }
- DBF_EVENT("use_cq:%d", use_cq);
-}
-
static void qdio_trace_init_data(struct qdio_irq *irq,
struct qdio_initialize *data)
{
@@ -1191,8 +1145,6 @@ int qdio_establish(struct ccw_device *cdev,
qdio_setup_ssqd_info(irq_ptr);
- qdio_detect_hsicq(irq_ptr);
-
/* qebsm is now setup if available, initialize buffer states */
qdio_init_buf_states(irq_ptr);
@@ -1297,9 +1249,11 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
* @callflags: flags
* @bufnr: first buffer to process
* @count: how many buffers are filled
+ * @aob: asynchronous operation block
*/
static int handle_outbound(struct qdio_q *q, unsigned int callflags,
- unsigned int bufnr, unsigned int count)
+ unsigned int bufnr, unsigned int count,
+ struct qaob *aob)
{
const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
unsigned char state = 0;
@@ -1320,11 +1274,9 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
q->u.out.pci_out_enabled = 0;
if (queue_type(q) == QDIO_IQDIO_QFMT) {
- unsigned long phys_aob = 0;
-
- if (q->u.out.use_cq && count == 1)
- phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
+ unsigned long phys_aob = aob ? virt_to_phys(aob) : 0;
+ WARN_ON_ONCE(!IS_ALIGNED(phys_aob, 256));
rc = qdio_kick_outbound_q(q, count, phys_aob);
} else if (need_siga_sync(q)) {
rc = qdio_siga_sync_q(q);
@@ -1359,9 +1311,10 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
* @q_nr: queue number
* @bufnr: buffer number
* @count: how many buffers to process
+ * @aob: asynchronous operation block (outbound only)
*/
int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
- int q_nr, unsigned int bufnr, unsigned int count)
+ int q_nr, unsigned int bufnr, unsigned int count, struct qaob *aob)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
@@ -1383,7 +1336,7 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
callflags, bufnr, count);
else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
return handle_outbound(irq_ptr->output_qs[q_nr],
- callflags, bufnr, count);
+ callflags, bufnr, count, aob);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(do_QDIO);