diff options
Diffstat (limited to 'drivers/net/ethernet/cavium/liquidio/request_manager.c')
-rw-r--r-- | drivers/net/ethernet/cavium/liquidio/request_manager.c | 125 |
1 files changed, 87 insertions, 38 deletions
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index 8f746e1348d4..c6f4cbda040f 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -123,6 +123,7 @@ int octeon_init_instr_queue(struct octeon_device *oct, iq->do_auto_flush = 1; iq->db_timeout = (u32)conf->db_timeout; atomic_set(&iq->instr_pending, 0); + iq->pkts_processed = 0; /* Initialize the spinlock for this instruction queue */ spin_lock_init(&iq->lock); @@ -379,7 +380,6 @@ lio_process_iq_request_list(struct octeon_device *oct, u32 inst_count = 0; unsigned int pkts_compl = 0, bytes_compl = 0; struct octeon_soft_command *sc; - struct octeon_instr_irh *irh; unsigned long flags; while (old != iq->octeon_read_index) { @@ -401,40 +401,21 @@ lio_process_iq_request_list(struct octeon_device *oct, case REQTYPE_RESP_NET: case REQTYPE_SOFT_COMMAND: sc = buf; - - if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) - irh = (struct octeon_instr_irh *) - &sc->cmd.cmd3.irh; - else - irh = (struct octeon_instr_irh *) - &sc->cmd.cmd2.irh; - if (irh->rflag) { - /* We're expecting a response from Octeon. - * It's up to lio_process_ordered_list() to - * process sc. Add sc to the ordered soft - * command response list because we expect - * a response from Octeon. - */ - spin_lock_irqsave - (&oct->response_list - [OCTEON_ORDERED_SC_LIST].lock, - flags); - atomic_inc(&oct->response_list - [OCTEON_ORDERED_SC_LIST]. - pending_req_count); - list_add_tail(&sc->node, &oct->response_list - [OCTEON_ORDERED_SC_LIST].head); - spin_unlock_irqrestore - (&oct->response_list - [OCTEON_ORDERED_SC_LIST].lock, - flags); - } else { - if (sc->callback) { - /* This callback must not sleep */ - sc->callback(oct, OCTEON_REQUEST_DONE, - sc->callback_arg); - } - } + /* We're expecting a response from Octeon. + * It's up to lio_process_ordered_list() to + * process sc. Add sc to the ordered soft + * command response list because we expect + * a response from Octeon. + */ + spin_lock_irqsave(&oct->response_list + [OCTEON_ORDERED_SC_LIST].lock, flags); + atomic_inc(&oct->response_list + [OCTEON_ORDERED_SC_LIST].pending_req_count); + list_add_tail(&sc->node, &oct->response_list + [OCTEON_ORDERED_SC_LIST].head); + spin_unlock_irqrestore(&oct->response_list + [OCTEON_ORDERED_SC_LIST].lock, + flags); break; default: dev_err(&oct->pci_dev->dev, @@ -459,7 +440,7 @@ lio_process_iq_request_list(struct octeon_device *oct, if (atomic_read(&oct->response_list [OCTEON_ORDERED_SC_LIST].pending_req_count)) - queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(1)); + queue_work(cwq->wq, &cwq->wk.work.work); return inst_count; } @@ -495,6 +476,7 @@ octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, lio_process_iq_request_list(oct, iq, 0); if (inst_processed) { + iq->pkts_processed += inst_processed; atomic_sub(inst_processed, &iq->instr_pending); iq->stats.instr_processed += inst_processed; } @@ -753,8 +735,7 @@ int octeon_send_soft_command(struct octeon_device *oct, len = (u32)ih2->dlengsz; } - if (sc->wait_time) - sc->timeout = jiffies + sc->wait_time; + sc->expiry_time = jiffies + msecs_to_jiffies(LIO_SC_MAX_TMO_MS); return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc, len, REQTYPE_SOFT_COMMAND)); @@ -789,11 +770,76 @@ int octeon_setup_sc_buffer_pool(struct octeon_device *oct) return 0; } +int octeon_free_sc_done_list(struct octeon_device *oct) +{ + struct octeon_response_list *done_sc_list, *zombie_sc_list; + struct octeon_soft_command *sc; + struct list_head *tmp, *tmp2; + spinlock_t *sc_lists_lock; /* lock for response_list */ + + done_sc_list = &oct->response_list[OCTEON_DONE_SC_LIST]; + zombie_sc_list = &oct->response_list[OCTEON_ZOMBIE_SC_LIST]; + + if (!atomic_read(&done_sc_list->pending_req_count)) + return 0; + + sc_lists_lock = &oct->response_list[OCTEON_ORDERED_SC_LIST].lock; + + spin_lock_bh(sc_lists_lock); + + list_for_each_safe(tmp, tmp2, &done_sc_list->head) { + sc = list_entry(tmp, struct octeon_soft_command, node); + + if (READ_ONCE(sc->caller_is_done)) { + list_del(&sc->node); + atomic_dec(&done_sc_list->pending_req_count); + + if (*sc->status_word == COMPLETION_WORD_INIT) { + /* timeout; move sc to zombie list */ + list_add_tail(&sc->node, &zombie_sc_list->head); + atomic_inc(&zombie_sc_list->pending_req_count); + } else { + octeon_free_soft_command(oct, sc); + } + } + } + + spin_unlock_bh(sc_lists_lock); + + return 0; +} + +int octeon_free_sc_zombie_list(struct octeon_device *oct) +{ + struct octeon_response_list *zombie_sc_list; + struct octeon_soft_command *sc; + struct list_head *tmp, *tmp2; + spinlock_t *sc_lists_lock; /* lock for response_list */ + + zombie_sc_list = &oct->response_list[OCTEON_ZOMBIE_SC_LIST]; + sc_lists_lock = &oct->response_list[OCTEON_ORDERED_SC_LIST].lock; + + spin_lock_bh(sc_lists_lock); + + list_for_each_safe(tmp, tmp2, &zombie_sc_list->head) { + list_del(tmp); + atomic_dec(&zombie_sc_list->pending_req_count); + sc = list_entry(tmp, struct octeon_soft_command, node); + octeon_free_soft_command(oct, sc); + } + + spin_unlock_bh(sc_lists_lock); + + return 0; +} + int octeon_free_sc_buffer_pool(struct octeon_device *oct) { struct list_head *tmp, *tmp2; struct octeon_soft_command *sc; + octeon_free_sc_zombie_list(oct); + spin_lock_bh(&oct->sc_buf_pool.lock); list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) { @@ -822,6 +868,9 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct, struct octeon_soft_command *sc = NULL; struct list_head *tmp; + if (!rdatasize) + rdatasize = 16; + WARN_ON((offset + datasize + rdatasize + ctxsize) > SOFT_COMMAND_BUFFER_SIZE); |