diff options
Diffstat (limited to 'net/rxrpc/call_event.c')
-rw-r--r-- | net/rxrpc/call_event.c | 171 |
1 files changed, 70 insertions, 101 deletions
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c index c9f835292f7b..9db62fa55c62 100644 --- a/net/rxrpc/call_event.c +++ b/net/rxrpc/call_event.c @@ -74,11 +74,6 @@ void rxrpc_send_ACK(struct rxrpc_call *call, u8 ack_reason, if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags)) return; - if (ack_reason == RXRPC_ACK_DELAY && - test_and_set_bit(RXRPC_CALL_DELAY_ACK_PENDING, &call->flags)) { - trace_rxrpc_drop_ack(call, why, ack_reason, serial, false); - return; - } rxrpc_inc_stat(call->rxnet, stat_tx_acks[ack_reason]); @@ -111,12 +106,7 @@ void rxrpc_send_ACK(struct rxrpc_call *call, u8 ack_reason, spin_unlock_bh(&local->ack_tx_lock); trace_rxrpc_send_ack(call, why, ack_reason, serial); - if (!rcu_read_lock_held()) { - rxrpc_transmit_ack_packets(call->peer->local); - } else { - rxrpc_get_local(local, rxrpc_local_get_queue); - rxrpc_queue_local(local); - } + rxrpc_wake_up_io_thread(local); } /* @@ -130,11 +120,10 @@ static void rxrpc_congestion_timeout(struct rxrpc_call *call) /* * Perform retransmission of NAK'd and unack'd packets. */ -static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) +void rxrpc_resend(struct rxrpc_call *call, struct sk_buff *ack_skb) { struct rxrpc_ackpacket *ack = NULL; struct rxrpc_txbuf *txb; - struct sk_buff *ack_skb = NULL; unsigned long resend_at; rxrpc_seq_t transmitted = READ_ONCE(call->tx_transmitted); ktime_t now, max_age, oldest, ack_ts; @@ -148,32 +137,21 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) max_age = ktime_sub_us(now, jiffies_to_usecs(call->peer->rto_j)); oldest = now; - /* See if there's an ACK saved with a soft-ACK table in it. */ - if (call->acks_soft_tbl) { - spin_lock_bh(&call->acks_ack_lock); - ack_skb = call->acks_soft_tbl; - if (ack_skb) { - rxrpc_get_skb(ack_skb, rxrpc_skb_get_ack); - ack = (void *)ack_skb->data + sizeof(struct rxrpc_wire_header); - } - spin_unlock_bh(&call->acks_ack_lock); - } - if (list_empty(&call->tx_buffer)) goto no_resend; - spin_lock(&call->tx_lock); - if (list_empty(&call->tx_buffer)) goto no_further_resend; - trace_rxrpc_resend(call); + trace_rxrpc_resend(call, ack_skb); txb = list_first_entry(&call->tx_buffer, struct rxrpc_txbuf, call_link); /* Scan the soft ACK table without dropping the lock and resend any * explicitly NAK'd packets. */ - if (ack) { + if (ack_skb) { + ack = (void *)ack_skb->data + sizeof(struct rxrpc_wire_header); + for (i = 0; i < ack->nAcks; i++) { rxrpc_seq_t seq; @@ -197,7 +175,6 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) rxrpc_see_txbuf(txb, rxrpc_txbuf_see_unacked); if (list_empty(&txb->tx_link)) { - rxrpc_get_txbuf(txb, rxrpc_txbuf_get_retrans); list_add_tail(&txb->tx_link, &retrans_queue); set_bit(RXRPC_TXBUF_RESENT, &txb->flags); } @@ -241,7 +218,6 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) do_resend: unacked = true; if (list_empty(&txb->tx_link)) { - rxrpc_get_txbuf(txb, rxrpc_txbuf_get_retrans); list_add_tail(&txb->tx_link, &retrans_queue); set_bit(RXRPC_TXBUF_RESENT, &txb->flags); rxrpc_inc_stat(call->rxnet, stat_tx_data_retrans); @@ -249,10 +225,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) } no_further_resend: - spin_unlock(&call->tx_lock); no_resend: - rxrpc_free_skb(ack_skb, rxrpc_skb_put_ack); - resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest))); resend_at += jiffies + rxrpc_get_rto_backoff(call->peer, !list_empty(&retrans_queue)); @@ -266,7 +239,7 @@ no_resend: * retransmitting data. */ if (list_empty(&retrans_queue)) { - rxrpc_reduce_call_timer(call, resend_at, now_j, + rxrpc_reduce_call_timer(call, resend_at, jiffies, rxrpc_timer_set_for_resend); ack_ts = ktime_sub(now, call->acks_latest_ts); if (ktime_to_us(ack_ts) < (call->peer->srtt_us >> 3)) @@ -276,15 +249,11 @@ no_resend: goto out; } + /* Retransmit the queue */ while ((txb = list_first_entry_or_null(&retrans_queue, struct rxrpc_txbuf, tx_link))) { list_del_init(&txb->tx_link); - rxrpc_send_data_packet(call, txb); - rxrpc_put_txbuf(txb, rxrpc_txbuf_put_trans); - - trace_rxrpc_retransmit(call, txb->seq, - ktime_to_ns(ktime_sub(txb->last_sent, - max_age))); + rxrpc_transmit_one(call, txb); } out: @@ -358,15 +327,26 @@ static void rxrpc_transmit_some_data(struct rxrpc_call *call) } /* + * Ping the other end to fill our RTT cache and to retrieve the rwind + * and MTU parameters. + */ +static void rxrpc_send_initial_ping(struct rxrpc_call *call) +{ + if (call->peer->rtt_count < 3 || + ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), + ktime_get_real())) + rxrpc_send_ACK(call, RXRPC_ACK_PING, 0, + rxrpc_propose_ack_ping_for_params); +} + +/* * Handle retransmission and deferred ACK/abort generation. */ -void rxrpc_process_call(struct work_struct *work) +void rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb) { - struct rxrpc_call *call = - container_of(work, struct rxrpc_call, processor); unsigned long now, next, t; - unsigned int iterations = 0; rxrpc_serial_t ackr_serial; + bool resend = false, expired = false; rxrpc_see_call(call, rxrpc_call_see_input); @@ -374,47 +354,31 @@ void rxrpc_process_call(struct work_struct *work) _enter("{%d,%s,%lx}", call->debug_id, rxrpc_call_states[call->state], call->events); -recheck_state: - if (call->acks_hard_ack != call->tx_bottom) - rxrpc_shrink_call_tx_buffer(call); - - /* Limit the number of times we do this before returning to the manager */ - if (!rxrpc_tx_window_has_space(call) || - list_empty(&call->tx_sendmsg)) { - iterations++; - if (iterations > 5) - goto requeue; - } - - if (test_and_clear_bit(RXRPC_CALL_EV_ABORT, &call->events)) { - rxrpc_send_abort_packet(call); - goto recheck_state; - } + if (call->state == RXRPC_CALL_COMPLETE) + goto out; - if (call->state == RXRPC_CALL_COMPLETE) { - del_timer_sync(&call->timer); + if (skb && skb->mark == RXRPC_SKB_MARK_ERROR) goto out; - } - /* Work out if any timeouts tripped */ + /* If we see our async-event poke, check for timeout trippage. */ now = jiffies; t = READ_ONCE(call->expect_rx_by); if (time_after_eq(now, t)) { trace_rxrpc_timer(call, rxrpc_timer_exp_normal, now); - set_bit(RXRPC_CALL_EV_EXPIRED, &call->events); + expired = true; } t = READ_ONCE(call->expect_req_by); if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST && time_after_eq(now, t)) { trace_rxrpc_timer(call, rxrpc_timer_exp_idle, now); - set_bit(RXRPC_CALL_EV_EXPIRED, &call->events); + expired = true; } t = READ_ONCE(call->expect_term_by); if (time_after_eq(now, t)) { trace_rxrpc_timer(call, rxrpc_timer_exp_hard, now); - set_bit(RXRPC_CALL_EV_EXPIRED, &call->events); + expired = true; } t = READ_ONCE(call->delay_ack_at); @@ -453,13 +417,19 @@ recheck_state: if (time_after_eq(now, t)) { trace_rxrpc_timer(call, rxrpc_timer_exp_resend, now); cmpxchg(&call->resend_at, t, now + MAX_JIFFY_OFFSET); - set_bit(RXRPC_CALL_EV_RESEND, &call->events); + resend = true; } + if (skb) + rxrpc_input_call_packet(call, skb); + rxrpc_transmit_some_data(call); + if (test_and_clear_bit(RXRPC_CALL_EV_INITIAL_PING, &call->events)) + rxrpc_send_initial_ping(call); + /* Process events */ - if (test_and_clear_bit(RXRPC_CALL_EV_EXPIRED, &call->events)) { + if (expired) { if (test_bit(RXRPC_CALL_RX_HEARD, &call->flags) && (int)call->conn->hi_serial - (int)call->rx_serial > 0) { trace_rxrpc_call_reset(call); @@ -467,51 +437,50 @@ recheck_state: } else { rxrpc_abort_call("EXP", call, 0, RX_CALL_TIMEOUT, -ETIME); } - set_bit(RXRPC_CALL_EV_ABORT, &call->events); - goto recheck_state; + rxrpc_send_abort_packet(call); + goto out; } - if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) { - call->acks_lost_top = call->tx_top; + if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) rxrpc_send_ACK(call, RXRPC_ACK_PING, 0, rxrpc_propose_ack_ping_for_lost_ack); - } - if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events) && - call->state != RXRPC_CALL_CLIENT_RECV_REPLY) { - rxrpc_resend(call, now); - goto recheck_state; - } + if (resend && call->state != RXRPC_CALL_CLIENT_RECV_REPLY) + rxrpc_resend(call, NULL); + + if (test_and_clear_bit(RXRPC_CALL_RX_IS_IDLE, &call->flags)) + rxrpc_send_ACK(call, RXRPC_ACK_IDLE, 0, + rxrpc_propose_ack_rx_idle); + + if (atomic_read(&call->ackr_nr_unacked) > 2) + rxrpc_send_ACK(call, RXRPC_ACK_IDLE, 0, + rxrpc_propose_ack_input_data); /* Make sure the timer is restarted */ - next = call->expect_rx_by; + if (call->state != RXRPC_CALL_COMPLETE) { + next = call->expect_rx_by; #define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; } - set(call->expect_req_by); - set(call->expect_term_by); - set(call->delay_ack_at); - set(call->ack_lost_at); - set(call->resend_at); - set(call->keepalive_at); - set(call->ping_at); - - now = jiffies; - if (time_after_eq(now, next)) - goto recheck_state; + set(call->expect_req_by); + set(call->expect_term_by); + set(call->delay_ack_at); + set(call->ack_lost_at); + set(call->resend_at); + set(call->keepalive_at); + set(call->ping_at); - rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart); + now = jiffies; + if (time_after_eq(now, next)) + rxrpc_poke_call(call, rxrpc_call_poke_timer_now); - /* other events may have been raised since we started checking */ - if (call->events) - goto requeue; + rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart); + } out: + if (call->state == RXRPC_CALL_COMPLETE) + del_timer_sync(&call->timer); + if (call->acks_hard_ack != call->tx_bottom) + rxrpc_shrink_call_tx_buffer(call); _leave(""); - return; - -requeue: - if (call->state < RXRPC_CALL_COMPLETE) - rxrpc_queue_call(call, rxrpc_call_queue_requeue); - goto out; } |