From 827efed6a66ef8a1c071400b5952fee4a5ffedf9 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 27 Mar 2018 23:02:47 +0100 Subject: rxrpc: Trace resend Add a tracepoint to trace packet resend events and to dump the Tx annotation buffer for added illumination. Signed-off-by: David Howells --- include/trace/events/rxrpc.h | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'include/trace') diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index 36cb50c111a6..41979f907575 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -420,6 +420,7 @@ rxrpc_rtt_rx_traces; rxrpc_timer_traces; rxrpc_propose_ack_traces; rxrpc_propose_ack_outcomes; +rxrpc_congest_modes; rxrpc_congest_changes; /* @@ -1229,6 +1230,29 @@ TRACE_EVENT(rxrpc_connect_call, __entry->call_id) ); +TRACE_EVENT(rxrpc_resend, + TP_PROTO(struct rxrpc_call *call, int ix), + + TP_ARGS(call, ix), + + TP_STRUCT__entry( + __field(struct rxrpc_call *, call ) + __field(int, ix ) + __array(u8, anno, 64 ) + ), + + TP_fast_assign( + __entry->call = call; + __entry->ix = ix; + memcpy(__entry->anno, call->rxtx_annotations, 64); + ), + + TP_printk("c=%p ix=%u a=%64phN", + __entry->call, + __entry->ix, + __entry->anno) + ); + #endif /* _TRACE_RXRPC_H */ /* This part must be outside protection */ -- cgit v1.2.3 From a25e21f0bcd25673b91b97b9805db33350feec0f Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 27 Mar 2018 23:03:00 +0100 Subject: rxrpc, afs: Use debug_ids rather than pointers in traces In rxrpc and afs, use the debug_ids that are monotonically allocated to various objects as they're allocated rather than pointers as kernel pointers are now hashed making them less useful. Further, the debug ids aren't reused anywhere nearly as quickly. In addition, allow kernel services that use rxrpc, such as afs, to take numbers from the rxrpc counter, assign them to their own call struct and pass them in to rxrpc for both client and service calls so that the trace lines for each will have the same ID tag. Signed-off-by: David Howells --- fs/afs/internal.h | 1 + fs/afs/rxrpc.c | 12 ++-- include/net/af_rxrpc.h | 11 ++- include/trace/events/afs.h | 69 +++++++++---------- include/trace/events/rxrpc.h | 155 ++++++++++++++++++++++--------------------- net/rxrpc/af_rxrpc.c | 7 +- net/rxrpc/ar-internal.h | 8 +-- net/rxrpc/call_accept.c | 18 +++-- net/rxrpc/call_object.c | 15 +++-- net/rxrpc/conn_event.c | 3 +- net/rxrpc/input.c | 6 +- net/rxrpc/sendmsg.c | 3 +- 12 files changed, 163 insertions(+), 145 deletions(-) (limited to 'include/trace') diff --git a/fs/afs/internal.h b/fs/afs/internal.h index f38d6a561a84..72217170b155 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -118,6 +118,7 @@ struct afs_call { bool ret_reply0; /* T if should return reply[0] on success */ bool upgrade; /* T to request service upgrade */ u16 service_id; /* Actual service ID (after upgrade) */ + unsigned int debug_id; /* Trace ID */ u32 operation_ID; /* operation ID for an incoming call */ u32 count; /* count for use in unmarshalling */ __be32 tmp; /* place to extract temporary data */ diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index e1126659f043..b819900916e6 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -131,6 +131,7 @@ static struct afs_call *afs_alloc_call(struct afs_net *net, call->type = type; call->net = net; + call->debug_id = atomic_inc_return(&rxrpc_debug_id); atomic_set(&call->usage, 1); INIT_WORK(&call->async_work, afs_process_async_call); init_waitqueue_head(&call->waitq); @@ -169,11 +170,12 @@ void afs_put_call(struct afs_call *call) afs_put_server(call->net, call->cm_server); afs_put_cb_interest(call->net, call->cbi); kfree(call->request); - kfree(call); - o = atomic_dec_return(&net->nr_outstanding_calls); trace_afs_call(call, afs_call_trace_free, 0, o, __builtin_return_address(0)); + kfree(call); + + o = atomic_dec_return(&net->nr_outstanding_calls); if (o == 0) wake_up_atomic_t(&net->nr_outstanding_calls); } @@ -378,7 +380,8 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, (async ? afs_wake_up_async_call : afs_wake_up_call_waiter), - call->upgrade); + call->upgrade, + call->debug_id); if (IS_ERR(rxcall)) { ret = PTR_ERR(rxcall); goto error_kill_call; @@ -727,7 +730,8 @@ void afs_charge_preallocation(struct work_struct *work) afs_wake_up_async_call, afs_rx_attach, (unsigned long)call, - GFP_KERNEL) < 0) + GFP_KERNEL, + call->debug_id) < 0) break; call = NULL; } diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h index 2b3a6eec4570..8ae8ee004258 100644 --- a/include/net/af_rxrpc.h +++ b/include/net/af_rxrpc.h @@ -31,6 +31,11 @@ enum rxrpc_call_completion { NR__RXRPC_CALL_COMPLETIONS }; +/* + * Debug ID counter for tracing. + */ +extern atomic_t rxrpc_debug_id; + typedef void (*rxrpc_notify_rx_t)(struct sock *, struct rxrpc_call *, unsigned long); typedef void (*rxrpc_notify_end_tx_t)(struct sock *, struct rxrpc_call *, @@ -50,7 +55,8 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *, s64, gfp_t, rxrpc_notify_rx_t, - bool); + bool, + unsigned int); int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *, struct msghdr *, size_t, rxrpc_notify_end_tx_t); @@ -63,7 +69,8 @@ void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *, struct sockaddr_rxrpc *); u64 rxrpc_kernel_get_rtt(struct socket *, struct rxrpc_call *); int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t, - rxrpc_user_attach_call_t, unsigned long, gfp_t); + rxrpc_user_attach_call_t, unsigned long, gfp_t, + unsigned int); void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64); int rxrpc_kernel_retry_call(struct socket *, struct rxrpc_call *, struct sockaddr_rxrpc *, struct key *); diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index 6b59c63a8e51..63815f66b274 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h @@ -133,8 +133,7 @@ TRACE_EVENT(afs_recv_data, TP_ARGS(call, count, offset, want_more, ret), TP_STRUCT__entry( - __field(struct rxrpc_call *, rxcall ) - __field(struct afs_call *, call ) + __field(unsigned int, call ) __field(enum afs_call_state, state ) __field(unsigned int, count ) __field(unsigned int, offset ) @@ -144,8 +143,7 @@ TRACE_EVENT(afs_recv_data, ), TP_fast_assign( - __entry->rxcall = call->rxcall; - __entry->call = call; + __entry->call = call->debug_id; __entry->state = call->state; __entry->unmarshall = call->unmarshall; __entry->count = count; @@ -154,8 +152,7 @@ TRACE_EVENT(afs_recv_data, __entry->ret = ret; ), - TP_printk("c=%p ac=%p s=%u u=%u %u/%u wm=%u ret=%d", - __entry->rxcall, + TP_printk("c=%08x s=%u u=%u %u/%u wm=%u ret=%d", __entry->call, __entry->state, __entry->unmarshall, __entry->offset, __entry->count, @@ -168,21 +165,18 @@ TRACE_EVENT(afs_notify_call, TP_ARGS(rxcall, call), TP_STRUCT__entry( - __field(struct rxrpc_call *, rxcall ) - __field(struct afs_call *, call ) + __field(unsigned int, call ) __field(enum afs_call_state, state ) __field(unsigned short, unmarshall ) ), TP_fast_assign( - __entry->rxcall = rxcall; - __entry->call = call; + __entry->call = call->debug_id; __entry->state = call->state; __entry->unmarshall = call->unmarshall; ), - TP_printk("c=%p ac=%p s=%u u=%u", - __entry->rxcall, + TP_printk("c=%08x s=%u u=%u", __entry->call, __entry->state, __entry->unmarshall) ); @@ -193,21 +187,18 @@ TRACE_EVENT(afs_cb_call, TP_ARGS(call), TP_STRUCT__entry( - __field(struct rxrpc_call *, rxcall ) - __field(struct afs_call *, call ) + __field(unsigned int, call ) __field(const char *, name ) __field(u32, op ) ), TP_fast_assign( - __entry->rxcall = call->rxcall; - __entry->call = call; + __entry->call = call->debug_id; __entry->name = call->type->name; __entry->op = call->operation_ID; ), - TP_printk("c=%p ac=%p %s o=%u", - __entry->rxcall, + TP_printk("c=%08x %s o=%u", __entry->call, __entry->name, __entry->op) @@ -220,7 +211,7 @@ TRACE_EVENT(afs_call, TP_ARGS(call, op, usage, outstanding, where), TP_STRUCT__entry( - __field(struct afs_call *, call ) + __field(unsigned int, call ) __field(int, op ) __field(int, usage ) __field(int, outstanding ) @@ -228,14 +219,14 @@ TRACE_EVENT(afs_call, ), TP_fast_assign( - __entry->call = call; + __entry->call = call->debug_id; __entry->op = op; __entry->usage = usage; __entry->outstanding = outstanding; __entry->where = where; ), - TP_printk("c=%p %s u=%d o=%d sp=%pSR", + TP_printk("c=%08x %s u=%d o=%d sp=%pSR", __entry->call, __print_symbolic(__entry->op, afs_call_traces), __entry->usage, @@ -249,13 +240,13 @@ TRACE_EVENT(afs_make_fs_call, TP_ARGS(call, fid), TP_STRUCT__entry( - __field(struct afs_call *, call ) + __field(unsigned int, call ) __field(enum afs_fs_operation, op ) __field_struct(struct afs_fid, fid ) ), TP_fast_assign( - __entry->call = call; + __entry->call = call->debug_id; __entry->op = call->operation_ID; if (fid) { __entry->fid = *fid; @@ -266,7 +257,7 @@ TRACE_EVENT(afs_make_fs_call, } ), - TP_printk("c=%p %06x:%06x:%06x %s", + TP_printk("c=%08x %06x:%06x:%06x %s", __entry->call, __entry->fid.vid, __entry->fid.vnode, @@ -280,16 +271,16 @@ TRACE_EVENT(afs_make_vl_call, TP_ARGS(call), TP_STRUCT__entry( - __field(struct afs_call *, call ) + __field(unsigned int, call ) __field(enum afs_vl_operation, op ) ), TP_fast_assign( - __entry->call = call; + __entry->call = call->debug_id; __entry->op = call->operation_ID; ), - TP_printk("c=%p %s", + TP_printk("c=%08x %s", __entry->call, __print_symbolic(__entry->op, afs_vl_operations)) ); @@ -300,20 +291,20 @@ TRACE_EVENT(afs_call_done, TP_ARGS(call), TP_STRUCT__entry( - __field(struct afs_call *, call ) + __field(unsigned int, call ) __field(struct rxrpc_call *, rx_call ) __field(int, ret ) __field(u32, abort_code ) ), TP_fast_assign( - __entry->call = call; + __entry->call = call->debug_id; __entry->rx_call = call->rxcall; __entry->ret = call->error; __entry->abort_code = call->abort_code; ), - TP_printk(" c=%p ret=%d ab=%d [%p]", + TP_printk(" c=%08x ret=%d ab=%d [%p]", __entry->call, __entry->ret, __entry->abort_code, @@ -327,7 +318,7 @@ TRACE_EVENT(afs_send_pages, TP_ARGS(call, msg, first, last, offset), TP_STRUCT__entry( - __field(struct afs_call *, call ) + __field(unsigned int, call ) __field(pgoff_t, first ) __field(pgoff_t, last ) __field(unsigned int, nr ) @@ -337,7 +328,7 @@ TRACE_EVENT(afs_send_pages, ), TP_fast_assign( - __entry->call = call; + __entry->call = call->debug_id; __entry->first = first; __entry->last = last; __entry->nr = msg->msg_iter.nr_segs; @@ -346,7 +337,7 @@ TRACE_EVENT(afs_send_pages, __entry->flags = msg->msg_flags; ), - TP_printk(" c=%p %lx-%lx-%lx b=%x o=%x f=%x", + TP_printk(" c=%08x %lx-%lx-%lx b=%x o=%x f=%x", __entry->call, __entry->first, __entry->first + __entry->nr - 1, __entry->last, __entry->bytes, __entry->offset, @@ -360,7 +351,7 @@ TRACE_EVENT(afs_sent_pages, TP_ARGS(call, first, last, cursor, ret), TP_STRUCT__entry( - __field(struct afs_call *, call ) + __field(unsigned int, call ) __field(pgoff_t, first ) __field(pgoff_t, last ) __field(pgoff_t, cursor ) @@ -368,14 +359,14 @@ TRACE_EVENT(afs_sent_pages, ), TP_fast_assign( - __entry->call = call; + __entry->call = call->debug_id; __entry->first = first; __entry->last = last; __entry->cursor = cursor; __entry->ret = ret; ), - TP_printk(" c=%p %lx-%lx c=%lx r=%d", + TP_printk(" c=%08x %lx-%lx c=%lx r=%d", __entry->call, __entry->first, __entry->last, __entry->cursor, __entry->ret) @@ -450,7 +441,7 @@ TRACE_EVENT(afs_call_state, TP_ARGS(call, from, to, ret, remote_abort), TP_STRUCT__entry( - __field(struct afs_call *, call ) + __field(unsigned int, call ) __field(enum afs_call_state, from ) __field(enum afs_call_state, to ) __field(int, ret ) @@ -458,14 +449,14 @@ TRACE_EVENT(afs_call_state, ), TP_fast_assign( - __entry->call = call; + __entry->call = call->debug_id; __entry->from = from; __entry->to = to; __entry->ret = ret; __entry->abort = remote_abort; ), - TP_printk("c=%p %u->%u r=%d ab=%d", + TP_printk("c=%08x %u->%u r=%d ab=%d", __entry->call, __entry->from, __entry->to, __entry->ret, __entry->abort) diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index 41979f907575..4d2c2d35c5cb 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -439,20 +439,20 @@ TRACE_EVENT(rxrpc_conn, TP_ARGS(conn, op, usage, where), TP_STRUCT__entry( - __field(struct rxrpc_connection *, conn ) - __field(int, op ) - __field(int, usage ) - __field(const void *, where ) + __field(unsigned int, conn ) + __field(int, op ) + __field(int, usage ) + __field(const void *, where ) ), TP_fast_assign( - __entry->conn = conn; + __entry->conn = conn->debug_id; __entry->op = op; __entry->usage = usage; __entry->where = where; ), - TP_printk("C=%p %s u=%d sp=%pSR", + TP_printk("C=%08x %s u=%d sp=%pSR", __entry->conn, __print_symbolic(__entry->op, rxrpc_conn_traces), __entry->usage, @@ -466,7 +466,7 @@ TRACE_EVENT(rxrpc_client, TP_ARGS(conn, channel, op), TP_STRUCT__entry( - __field(struct rxrpc_connection *, conn ) + __field(unsigned int, conn ) __field(u32, cid ) __field(int, channel ) __field(int, usage ) @@ -475,7 +475,7 @@ TRACE_EVENT(rxrpc_client, ), TP_fast_assign( - __entry->conn = conn; + __entry->conn = conn->debug_id; __entry->channel = channel; __entry->usage = atomic_read(&conn->usage); __entry->op = op; @@ -483,7 +483,7 @@ TRACE_EVENT(rxrpc_client, __entry->cs = conn->cache_state; ), - TP_printk("C=%p h=%2d %s %s i=%08x u=%d", + TP_printk("C=%08x h=%2d %s %s i=%08x u=%d", __entry->conn, __entry->channel, __print_symbolic(__entry->op, rxrpc_client_traces), @@ -499,7 +499,7 @@ TRACE_EVENT(rxrpc_call, TP_ARGS(call, op, usage, where, aux), TP_STRUCT__entry( - __field(struct rxrpc_call *, call ) + __field(unsigned int, call ) __field(int, op ) __field(int, usage ) __field(const void *, where ) @@ -507,14 +507,14 @@ TRACE_EVENT(rxrpc_call, ), TP_fast_assign( - __entry->call = call; + __entry->call = call->debug_id; __entry->op = op; __entry->usage = usage; __entry->where = where; __entry->aux = aux; ), - TP_printk("c=%p %s u=%d sp=%pSR a=%p", + TP_printk("c=%08x %s u=%d sp=%pSR a=%p", __entry->call, __print_symbolic(__entry->op, rxrpc_call_traces), __entry->usage, @@ -593,12 +593,13 @@ TRACE_EVENT(rxrpc_rx_done, ); TRACE_EVENT(rxrpc_abort, - TP_PROTO(const char *why, u32 cid, u32 call_id, rxrpc_seq_t seq, - int abort_code, int error), + TP_PROTO(unsigned int call_nr, const char *why, u32 cid, u32 call_id, + rxrpc_seq_t seq, int abort_code, int error), - TP_ARGS(why, cid, call_id, seq, abort_code, error), + TP_ARGS(call_nr, why, cid, call_id, seq, abort_code, error), TP_STRUCT__entry( + __field(unsigned int, call_nr ) __array(char, why, 4 ) __field(u32, cid ) __field(u32, call_id ) @@ -609,6 +610,7 @@ TRACE_EVENT(rxrpc_abort, TP_fast_assign( memcpy(__entry->why, why, 4); + __entry->call_nr = call_nr; __entry->cid = cid; __entry->call_id = call_id; __entry->abort_code = abort_code; @@ -616,7 +618,8 @@ TRACE_EVENT(rxrpc_abort, __entry->seq = seq; ), - TP_printk("%08x:%08x s=%u a=%d e=%d %s", + TP_printk("c=%08x %08x:%08x s=%u a=%d e=%d %s", + __entry->call_nr, __entry->cid, __entry->call_id, __entry->seq, __entry->abort_code, __entry->error, __entry->why) ); @@ -627,7 +630,7 @@ TRACE_EVENT(rxrpc_transmit, TP_ARGS(call, why), TP_STRUCT__entry( - __field(struct rxrpc_call *, call ) + __field(unsigned int, call ) __field(enum rxrpc_transmit_trace, why ) __field(rxrpc_seq_t, tx_hard_ack ) __field(rxrpc_seq_t, tx_top ) @@ -635,14 +638,14 @@ TRACE_EVENT(rxrpc_transmit, ), TP_fast_assign( - __entry->call = call; + __entry->call = call->debug_id; __entry->why = why; __entry->tx_hard_ack = call->tx_hard_ack; __entry->tx_top = call->tx_top; __entry->tx_winsize = call->tx_winsize; ), - TP_printk("c=%p %s f=%08x n=%u/%u", + TP_printk("c=%08x %s f=%08x n=%u/%u", __entry->call, __print_symbolic(__entry->why, rxrpc_transmit_traces), __entry->tx_hard_ack + 1, @@ -657,7 +660,7 @@ TRACE_EVENT(rxrpc_rx_data, TP_ARGS(call, seq, serial, flags, anno), TP_STRUCT__entry( - __field(struct rxrpc_call *, call ) + __field(unsigned int, call ) __field(rxrpc_seq_t, seq ) __field(rxrpc_serial_t, serial ) __field(u8, flags ) @@ -665,14 +668,14 @@ TRACE_EVENT(rxrpc_rx_data, ), TP_fast_assign( - __entry->call = call; + __entry->call = call->debug_id; __entry->seq = seq; __entry->serial = serial; __entry->flags = flags; __entry->anno = anno; ), - TP_printk("c=%p DATA %08x q=%08x fl=%02x a=%02x", + TP_printk("c=%08x DATA %08x q=%08x fl=%02x a=%02x", __entry->call, __entry->serial, __entry->seq, @@ -688,7 +691,7 @@ TRACE_EVENT(rxrpc_rx_ack, TP_ARGS(call, serial, ack_serial, first, prev, reason, n_acks), TP_STRUCT__entry( - __field(struct rxrpc_call *, call ) + __field(unsigned int, call ) __field(rxrpc_serial_t, serial ) __field(rxrpc_serial_t, ack_serial ) __field(rxrpc_seq_t, first ) @@ -698,7 +701,7 @@ TRACE_EVENT(rxrpc_rx_ack, ), TP_fast_assign( - __entry->call = call; + __entry->call = call->debug_id; __entry->serial = serial; __entry->ack_serial = ack_serial; __entry->first = first; @@ -707,7 +710,7 @@ TRACE_EVENT(rxrpc_rx_ack, __entry->n_acks = n_acks; ), - TP_printk("c=%p %08x %s r=%08x f=%08x p=%08x n=%u", + TP_printk("c=%08x %08x %s r=%08x f=%08x p=%08x n=%u", __entry->call, __entry->serial, __print_symbolic(__entry->reason, rxrpc_ack_names), @@ -724,18 +727,18 @@ TRACE_EVENT(rxrpc_rx_abort, TP_ARGS(call, serial, abort_code), TP_STRUCT__entry( - __field(struct rxrpc_call *, call ) + __field(unsigned int, call ) __field(rxrpc_serial_t, serial ) __field(u32, abort_code ) ), TP_fast_assign( - __entry->call = call; + __entry->call = call->debug_id; __entry->serial = serial; __entry->abort_code = abort_code; ), - TP_printk("c=%p ABORT %08x ac=%d", + TP_printk("c=%08x ABORT %08x ac=%d", __entry->call, __entry->serial, __entry->abort_code) @@ -748,20 +751,20 @@ TRACE_EVENT(rxrpc_rx_rwind_change, TP_ARGS(call, serial, rwind, wake), TP_STRUCT__entry( - __field(struct rxrpc_call *, call ) + __field(unsigned int, call ) __field(rxrpc_serial_t, serial ) __field(u32, rwind ) __field(bool, wake ) ), TP_fast_assign( - __entry->call = call; + __entry->call = call->debug_id; __entry->serial = serial; __entry->rwind = rwind; __entry->wake = wake; ), - TP_printk("c=%p %08x rw=%u%s", + TP_printk("c=%08x %08x rw=%u%s", __entry->call, __entry->serial, __entry->rwind, @@ -775,7 +778,7 @@ TRACE_EVENT(rxrpc_tx_data, TP_ARGS(call, seq, serial, flags, retrans, lose), TP_STRUCT__entry( - __field(struct rxrpc_call *, call ) + __field(unsigned int, call ) __field(rxrpc_seq_t, seq ) __field(rxrpc_serial_t, serial ) __field(u8, flags ) @@ -784,7 +787,7 @@ TRACE_EVENT(rxrpc_tx_data, ), TP_fast_assign( - __entry->call = call; + __entry->call = call->debug_id; __entry->seq = seq; __entry->serial = serial; __entry->flags = flags; @@ -792,7 +795,7 @@ TRACE_EVENT(rxrpc_tx_data, __entry->lose = lose; ), - TP_printk("c=%p DATA %08x q=%08x fl=%02x%s%s", + TP_printk("c=%08x DATA %08x q=%08x fl=%02x%s%s", __entry->call, __entry->serial, __entry->seq, @@ -809,7 +812,7 @@ TRACE_EVENT(rxrpc_tx_ack, TP_ARGS(call, serial, ack_first, ack_serial, reason, n_acks), TP_STRUCT__entry( - __field(struct rxrpc_call *, call ) + __field(unsigned int, call ) __field(rxrpc_serial_t, serial ) __field(rxrpc_seq_t, ack_first ) __field(rxrpc_serial_t, ack_serial ) @@ -818,7 +821,7 @@ TRACE_EVENT(rxrpc_tx_ack, ), TP_fast_assign( - __entry->call = call; + __entry->call = call ? call->debug_id : 0; __entry->serial = serial; __entry->ack_first = ack_first; __entry->ack_serial = ack_serial; @@ -826,7 +829,7 @@ TRACE_EVENT(rxrpc_tx_ack, __entry->n_acks = n_acks; ), - TP_printk(" c=%p ACK %08x %s f=%08x r=%08x n=%u", + TP_printk(" c=%08x ACK %08x %s f=%08x r=%08x n=%u", __entry->call, __entry->serial, __print_symbolic(__entry->reason, rxrpc_ack_names), @@ -842,7 +845,7 @@ TRACE_EVENT(rxrpc_receive, TP_ARGS(call, why, serial, seq), TP_STRUCT__entry( - __field(struct rxrpc_call *, call ) + __field(unsigned int, call ) __field(enum rxrpc_receive_trace, why ) __field(rxrpc_serial_t, serial ) __field(rxrpc_seq_t, seq ) @@ -851,7 +854,7 @@ TRACE_EVENT(rxrpc_receive, ), TP_fast_assign( - __entry->call = call; + __entry->call = call->debug_id; __entry->why = why; __entry->serial = serial; __entry->seq = seq; @@ -859,7 +862,7 @@ TRACE_EVENT(rxrpc_receive, __entry->top = call->rx_top; ), - TP_printk("c=%p %s r=%08x q=%08x w=%08x-%08x", + TP_printk("c=%08x %s r=%08x q=%08x w=%08x-%08x", __entry->call, __print_symbolic(__entry->why, rxrpc_receive_traces), __entry->serial, @@ -876,7 +879,7 @@ TRACE_EVENT(rxrpc_recvmsg, TP_ARGS(call, why, seq, offset, len, ret), TP_STRUCT__entry( - __field(struct rxrpc_call *, call ) + __field(unsigned int, call ) __field(enum rxrpc_recvmsg_trace, why ) __field(rxrpc_seq_t, seq ) __field(unsigned int, offset ) @@ -885,7 +888,7 @@ TRACE_EVENT(rxrpc_recvmsg, ), TP_fast_assign( - __entry->call = call; + __entry->call = call->debug_id; __entry->why = why; __entry->seq = seq; __entry->offset = offset; @@ -893,7 +896,7 @@ TRACE_EVENT(rxrpc_recvmsg, __entry->ret = ret; ), - TP_printk("c=%p %s q=%08x o=%u l=%u ret=%d", + TP_printk("c=%08x %s q=%08x o=%u l=%u ret=%d", __entry->call, __print_symbolic(__entry->why, rxrpc_recvmsg_traces), __entry->seq, @@ -909,18 +912,18 @@ TRACE_EVENT(rxrpc_rtt_tx, TP_ARGS(call, why, send_serial), TP_STRUCT__entry( - __field(struct rxrpc_call *, call ) + __field(unsigned int, call ) __field(enum rxrpc_rtt_tx_trace, why ) __field(rxrpc_serial_t, send_serial ) ), TP_fast_assign( - __entry->call = call; + __entry->call = call->debug_id; __entry->why = why; __entry->send_serial = send_serial; ), - TP_printk("c=%p %s sr=%08x", + TP_printk("c=%08x %s sr=%08x", __entry->call, __print_symbolic(__entry->why, rxrpc_rtt_tx_traces), __entry->send_serial) @@ -934,7 +937,7 @@ TRACE_EVENT(rxrpc_rtt_rx, TP_ARGS(call, why, send_serial, resp_serial, rtt, nr, avg), TP_STRUCT__entry( - __field(struct rxrpc_call *, call ) + __field(unsigned int, call ) __field(enum rxrpc_rtt_rx_trace, why ) __field(u8, nr ) __field(rxrpc_serial_t, send_serial ) @@ -944,7 +947,7 @@ TRACE_EVENT(rxrpc_rtt_rx, ), TP_fast_assign( - __entry->call = call; + __entry->call = call->debug_id; __entry->why = why; __entry->send_serial = send_serial; __entry->resp_serial = resp_serial; @@ -953,7 +956,7 @@ TRACE_EVENT(rxrpc_rtt_rx, __entry->avg = avg; ), - TP_printk("c=%p %s sr=%08x rr=%08x rtt=%lld nr=%u avg=%lld", + TP_printk("c=%08x %s sr=%08x rr=%08x rtt=%lld nr=%u avg=%lld", __entry->call, __print_symbolic(__entry->why, rxrpc_rtt_rx_traces), __entry->send_serial, @@ -970,7 +973,7 @@ TRACE_EVENT(rxrpc_timer, TP_ARGS(call, why, now), TP_STRUCT__entry( - __field(struct rxrpc_call *, call ) + __field(unsigned int, call ) __field(enum rxrpc_timer_trace, why ) __field(long, now ) __field(long, ack_at ) @@ -984,7 +987,7 @@ TRACE_EVENT(rxrpc_timer, ), TP_fast_assign( - __entry->call = call; + __entry->call = call->debug_id; __entry->why = why; __entry->now = now; __entry->ack_at = call->ack_at; @@ -996,7 +999,7 @@ TRACE_EVENT(rxrpc_timer, __entry->timer = call->timer.expires; ), - TP_printk("c=%p %s a=%ld la=%ld r=%ld xr=%ld xq=%ld xt=%ld t=%ld", + TP_printk("c=%08x %s a=%ld la=%ld r=%ld xr=%ld xq=%ld xt=%ld t=%ld", __entry->call, __print_symbolic(__entry->why, rxrpc_timer_traces), __entry->ack_at - __entry->now, @@ -1039,7 +1042,7 @@ TRACE_EVENT(rxrpc_propose_ack, outcome), TP_STRUCT__entry( - __field(struct rxrpc_call *, call ) + __field(unsigned int, call ) __field(enum rxrpc_propose_ack_trace, why ) __field(rxrpc_serial_t, serial ) __field(u8, ack_reason ) @@ -1049,7 +1052,7 @@ TRACE_EVENT(rxrpc_propose_ack, ), TP_fast_assign( - __entry->call = call; + __entry->call = call->debug_id; __entry->why = why; __entry->serial = serial; __entry->ack_reason = ack_reason; @@ -1058,7 +1061,7 @@ TRACE_EVENT(rxrpc_propose_ack, __entry->outcome = outcome; ), - TP_printk("c=%p %s %s r=%08x i=%u b=%u%s", + TP_printk("c=%08x %s %s r=%08x i=%u b=%u%s", __entry->call, __print_symbolic(__entry->why, rxrpc_propose_ack_traces), __print_symbolic(__entry->ack_reason, rxrpc_ack_names), @@ -1075,20 +1078,20 @@ TRACE_EVENT(rxrpc_retransmit, TP_ARGS(call, seq, annotation, expiry), TP_STRUCT__entry( - __field(struct rxrpc_call *, call ) + __field(unsigned int, call ) __field(rxrpc_seq_t, seq ) __field(u8, annotation ) __field(s64, expiry ) ), TP_fast_assign( - __entry->call = call; + __entry->call = call->debug_id; __entry->seq = seq; __entry->annotation = annotation; __entry->expiry = expiry; ), - TP_printk("c=%p q=%x a=%02x xp=%lld", + TP_printk("c=%08x q=%x a=%02x xp=%lld", __entry->call, __entry->seq, __entry->annotation, @@ -1102,7 +1105,7 @@ TRACE_EVENT(rxrpc_congest, TP_ARGS(call, summary, ack_serial, change), TP_STRUCT__entry( - __field(struct rxrpc_call *, call ) + __field(unsigned int, call ) __field(enum rxrpc_congest_change, change ) __field(rxrpc_seq_t, hard_ack ) __field(rxrpc_seq_t, top ) @@ -1112,7 +1115,7 @@ TRACE_EVENT(rxrpc_congest, ), TP_fast_assign( - __entry->call = call; + __entry->call = call->debug_id; __entry->change = change; __entry->hard_ack = call->tx_hard_ack; __entry->top = call->tx_top; @@ -1121,7 +1124,7 @@ TRACE_EVENT(rxrpc_congest, memcpy(&__entry->sum, summary, sizeof(__entry->sum)); ), - TP_printk("c=%p r=%08x %s q=%08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s", + TP_printk("c=%08x r=%08x %s q=%08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s", __entry->call, __entry->ack_serial, __print_symbolic(__entry->sum.ack_reason, rxrpc_ack_names), @@ -1146,16 +1149,16 @@ TRACE_EVENT(rxrpc_disconnect_call, TP_ARGS(call), TP_STRUCT__entry( - __field(struct rxrpc_call *, call ) + __field(unsigned int, call ) __field(u32, abort_code ) ), TP_fast_assign( - __entry->call = call; + __entry->call = call->debug_id; __entry->abort_code = call->abort_code; ), - TP_printk("c=%p ab=%08x", + TP_printk("c=%08x ab=%08x", __entry->call, __entry->abort_code) ); @@ -1166,16 +1169,16 @@ TRACE_EVENT(rxrpc_improper_term, TP_ARGS(call), TP_STRUCT__entry( - __field(struct rxrpc_call *, call ) + __field(unsigned int, call ) __field(u32, abort_code ) ), TP_fast_assign( - __entry->call = call; + __entry->call = call->debug_id; __entry->abort_code = call->abort_code; ), - TP_printk("c=%p ab=%08x", + TP_printk("c=%08x ab=%08x", __entry->call, __entry->abort_code) ); @@ -1187,18 +1190,18 @@ TRACE_EVENT(rxrpc_rx_eproto, TP_ARGS(call, serial, why), TP_STRUCT__entry( - __field(struct rxrpc_call *, call ) + __field(unsigned int, call ) __field(rxrpc_serial_t, serial ) __field(const char *, why ) ), TP_fast_assign( - __entry->call = call; + __entry->call = call->debug_id; __entry->serial = serial; __entry->why = why; ), - TP_printk("c=%p EPROTO %08x %s", + TP_printk("c=%08x EPROTO %08x %s", __entry->call, __entry->serial, __entry->why) @@ -1210,20 +1213,20 @@ TRACE_EVENT(rxrpc_connect_call, TP_ARGS(call), TP_STRUCT__entry( - __field(struct rxrpc_call *, call ) + __field(unsigned int, call ) __field(unsigned long, user_call_ID ) __field(u32, cid ) __field(u32, call_id ) ), TP_fast_assign( - __entry->call = call; + __entry->call = call->debug_id; __entry->user_call_ID = call->user_call_ID; __entry->cid = call->cid; __entry->call_id = call->call_id; ), - TP_printk("c=%p u=%p %08x:%08x", + TP_printk("c=%08x u=%p %08x:%08x", __entry->call, (void *)__entry->user_call_ID, __entry->cid, @@ -1236,18 +1239,18 @@ TRACE_EVENT(rxrpc_resend, TP_ARGS(call, ix), TP_STRUCT__entry( - __field(struct rxrpc_call *, call ) + __field(unsigned int, call ) __field(int, ix ) __array(u8, anno, 64 ) ), TP_fast_assign( - __entry->call = call; + __entry->call = call->debug_id; __entry->ix = ix; memcpy(__entry->anno, call->rxtx_annotations, 64); ), - TP_printk("c=%p ix=%u a=%64phN", + TP_printk("c=%08x ix=%u a=%64phN", __entry->call, __entry->ix, __entry->anno) diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 9e1c2c6b6a67..ec5ec68be1aa 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -40,6 +40,7 @@ static const struct proto_ops rxrpc_rpc_ops; /* current debugging ID */ atomic_t rxrpc_debug_id; +EXPORT_SYMBOL(rxrpc_debug_id); /* count of skbs currently in use */ atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs; @@ -267,6 +268,7 @@ static int rxrpc_listen(struct socket *sock, int backlog) * @gfp: The allocation constraints * @notify_rx: Where to send notifications instead of socket queue * @upgrade: Request service upgrade for call + * @debug_id: The debug ID for tracing to be assigned to the call * * Allow a kernel service to begin a call on the nominated socket. This just * sets up all the internal tracking structures and allocates connection and @@ -282,7 +284,8 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, s64 tx_total_len, gfp_t gfp, rxrpc_notify_rx_t notify_rx, - bool upgrade) + bool upgrade, + unsigned int debug_id) { struct rxrpc_conn_parameters cp; struct rxrpc_call_params p; @@ -314,7 +317,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, cp.exclusive = false; cp.upgrade = upgrade; cp.service_id = srx->srx_service; - call = rxrpc_new_client_call(rx, &cp, srx, &p, gfp); + call = rxrpc_new_client_call(rx, &cp, srx, &p, gfp, debug_id); /* The socket has been unlocked. */ if (!IS_ERR(call)) { call->notify_rx = notify_rx; diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 416688381eb7..9c9817ddafc5 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -691,7 +691,6 @@ struct rxrpc_send_params { * af_rxrpc.c */ extern atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs; -extern atomic_t rxrpc_debug_id; extern struct workqueue_struct *rxrpc_workqueue; /* @@ -732,11 +731,12 @@ extern unsigned int rxrpc_max_call_lifetime; extern struct kmem_cache *rxrpc_call_jar; struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long); -struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t); +struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t, unsigned int); struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *, struct rxrpc_conn_parameters *, struct sockaddr_rxrpc *, - struct rxrpc_call_params *, gfp_t); + struct rxrpc_call_params *, gfp_t, + unsigned int); int rxrpc_retry_client_call(struct rxrpc_sock *, struct rxrpc_call *, struct rxrpc_conn_parameters *, @@ -822,7 +822,7 @@ static inline bool __rxrpc_abort_call(const char *why, struct rxrpc_call *call, rxrpc_seq_t seq, u32 abort_code, int error) { - trace_rxrpc_abort(why, call->cid, call->call_id, seq, + trace_rxrpc_abort(call->debug_id, why, call->cid, call->call_id, seq, abort_code, error); return __rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED, abort_code, error); diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index 3028298ca561..92ebd1d7e0bb 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -34,7 +34,8 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, struct rxrpc_backlog *b, rxrpc_notify_rx_t notify_rx, rxrpc_user_attach_call_t user_attach_call, - unsigned long user_call_ID, gfp_t gfp) + unsigned long user_call_ID, gfp_t gfp, + unsigned int debug_id) { const void *here = __builtin_return_address(0); struct rxrpc_call *call; @@ -94,7 +95,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, /* Now it gets complicated, because calls get registered with the * socket here, particularly if a user ID is preassigned by the user. */ - call = rxrpc_alloc_call(rx, gfp); + call = rxrpc_alloc_call(rx, gfp, debug_id); if (!call) return -ENOMEM; call->flags |= (1 << RXRPC_CALL_IS_SERVICE); @@ -174,7 +175,8 @@ int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp) if (rx->discard_new_call) return 0; - while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp) == 0) + while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp, + atomic_inc_return(&rxrpc_debug_id)) == 0) ; return 0; @@ -347,7 +349,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, service_id == rx->second_service)) goto found_service; - trace_rxrpc_abort("INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, + trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, RX_INVALID_OPERATION, EOPNOTSUPP); skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT; skb->priority = RX_INVALID_OPERATION; @@ -358,7 +360,7 @@ found_service: spin_lock(&rx->incoming_lock); if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED || rx->sk.sk_state == RXRPC_CLOSE) { - trace_rxrpc_abort("CLS", sp->hdr.cid, sp->hdr.callNumber, + trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN); skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT; skb->priority = RX_INVALID_OPERATION; @@ -635,6 +637,7 @@ out_discard: * @user_attach_call: Func to attach call to user_call_ID * @user_call_ID: The tag to attach to the preallocated call * @gfp: The allocation conditions. + * @debug_id: The tracing debug ID. * * Charge up the socket with preallocated calls, each with a user ID. A * function should be provided to effect the attachment from the user's side. @@ -645,7 +648,8 @@ out_discard: int rxrpc_kernel_charge_accept(struct socket *sock, rxrpc_notify_rx_t notify_rx, rxrpc_user_attach_call_t user_attach_call, - unsigned long user_call_ID, gfp_t gfp) + unsigned long user_call_ID, gfp_t gfp, + unsigned int debug_id) { struct rxrpc_sock *rx = rxrpc_sk(sock->sk); struct rxrpc_backlog *b = rx->backlog; @@ -655,6 +659,6 @@ int rxrpc_kernel_charge_accept(struct socket *sock, return rxrpc_service_prealloc_one(rx, b, notify_rx, user_attach_call, user_call_ID, - gfp); + gfp, debug_id); } EXPORT_SYMBOL(rxrpc_kernel_charge_accept); diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index 0b2db38dd32d..147657dfe757 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -99,7 +99,8 @@ found_extant_call: /* * allocate a new call */ -struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp) +struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp, + unsigned int debug_id) { struct rxrpc_call *call; @@ -138,7 +139,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp) spin_lock_init(&call->notify_lock); rwlock_init(&call->state_lock); atomic_set(&call->usage, 1); - call->debug_id = atomic_inc_return(&rxrpc_debug_id); + call->debug_id = debug_id; call->tx_total_len = -1; call->next_rx_timo = 20 * HZ; call->next_req_timo = 1 * HZ; @@ -166,14 +167,15 @@ nomem: */ static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx, struct sockaddr_rxrpc *srx, - gfp_t gfp) + gfp_t gfp, + unsigned int debug_id) { struct rxrpc_call *call; ktime_t now; _enter(""); - call = rxrpc_alloc_call(rx, gfp); + call = rxrpc_alloc_call(rx, gfp, debug_id); if (!call) return ERR_PTR(-ENOMEM); call->state = RXRPC_CALL_CLIENT_AWAIT_CONN; @@ -214,7 +216,8 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, struct rxrpc_conn_parameters *cp, struct sockaddr_rxrpc *srx, struct rxrpc_call_params *p, - gfp_t gfp) + gfp_t gfp, + unsigned int debug_id) __releases(&rx->sk.sk_lock.slock) { struct rxrpc_call *call, *xcall; @@ -225,7 +228,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, _enter("%p,%lx", rx, p->user_call_ID); - call = rxrpc_alloc_client_call(rx, srx, gfp); + call = rxrpc_alloc_client_call(rx, srx, gfp, debug_id); if (IS_ERR(call)) { release_sock(&rx->sk); _leave(" = %ld", PTR_ERR(call)); diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index b1dfae107431..d2ec3fd593e8 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c @@ -160,7 +160,8 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn, lockdep_is_held(&conn->channel_lock)); if (call) { if (compl == RXRPC_CALL_LOCALLY_ABORTED) - trace_rxrpc_abort("CON", call->cid, + trace_rxrpc_abort(call->debug_id, + "CON", call->cid, call->call_id, 0, abort_code, error); if (rxrpc_set_call_completion(call, compl, diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 6fc61400337f..2a868fdab0ae 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -1307,21 +1307,21 @@ out_unlock: wrong_security: rcu_read_unlock(); - trace_rxrpc_abort("SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, + trace_rxrpc_abort(0, "SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, RXKADINCONSISTENCY, EBADMSG); skb->priority = RXKADINCONSISTENCY; goto post_abort; reupgrade: rcu_read_unlock(); - trace_rxrpc_abort("UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, + trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, RX_PROTOCOL_ERROR, EBADMSG); goto protocol_error; bad_message_unlock: rcu_read_unlock(); bad_message: - trace_rxrpc_abort("BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, + trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, RX_PROTOCOL_ERROR, EBADMSG); protocol_error: skb->priority = RX_PROTOCOL_ERROR; diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index 09f2a3e05221..8503f279b467 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -579,7 +579,8 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, cp.exclusive = rx->exclusive | p->exclusive; cp.upgrade = p->upgrade; cp.service_id = srx->srx_service; - call = rxrpc_new_client_call(rx, &cp, srx, &p->call, GFP_KERNEL); + call = rxrpc_new_client_call(rx, &cp, srx, &p->call, GFP_KERNEL, + atomic_inc_return(&rxrpc_debug_id)); /* The socket is now unlocked */ _leave(" = %p\n", call); -- cgit v1.2.3 From 1bae5d229532b4e8dfd5728cb3b8373bc9eec9eb Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 27 Mar 2018 23:08:20 +0100 Subject: rxrpc: Trace call completion Add a tracepoint to track rxrpc calls moving into the completed state and to log the completion type and the recorded error value and abort code. Signed-off-by: David Howells --- include/trace/events/rxrpc.h | 33 +++++++++++++++++++++++++++++++++ net/rxrpc/ar-internal.h | 1 + 2 files changed, 34 insertions(+) (limited to 'include/trace') diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index 4d2c2d35c5cb..2ea788f6f95d 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -400,6 +400,13 @@ enum rxrpc_congest_change { EM(RXRPC_ACK_IDLE, "IDL") \ E_(RXRPC_ACK__INVALID, "-?-") +#define rxrpc_completions \ + EM(RXRPC_CALL_SUCCEEDED, "Succeeded") \ + EM(RXRPC_CALL_REMOTELY_ABORTED, "RemoteAbort") \ + EM(RXRPC_CALL_LOCALLY_ABORTED, "LocalAbort") \ + EM(RXRPC_CALL_LOCAL_ERROR, "LocalError") \ + E_(RXRPC_CALL_NETWORK_ERROR, "NetError") + /* * Export enum symbols via userspace. */ @@ -624,6 +631,32 @@ TRACE_EVENT(rxrpc_abort, __entry->abort_code, __entry->error, __entry->why) ); +TRACE_EVENT(rxrpc_call_complete, + TP_PROTO(struct rxrpc_call *call), + + TP_ARGS(call), + + TP_STRUCT__entry( + __field(unsigned int, call ) + __field(enum rxrpc_call_completion, compl ) + __field(int, error ) + __field(u32, abort_code ) + ), + + TP_fast_assign( + __entry->call = call->debug_id; + __entry->compl = call->completion; + __entry->error = call->error; + __entry->abort_code = call->abort_code; + ), + + TP_printk("c=%08x %s r=%d ac=%d", + __entry->call, + __print_symbolic(__entry->compl, rxrpc_completions), + __entry->error, + __entry->abort_code) + ); + TRACE_EVENT(rxrpc_transmit, TP_PROTO(struct rxrpc_call *call, enum rxrpc_transmit_trace why), diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 9c9817ddafc5..21cf164b6d85 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -778,6 +778,7 @@ static inline bool __rxrpc_set_call_completion(struct rxrpc_call *call, call->error = error; call->completion = compl, call->state = RXRPC_CALL_COMPLETE; + trace_rxrpc_call_complete(call); wake_up(&call->waitq); return true; } -- cgit v1.2.3 From c105547501016897194358b11451608a8d5f9a02 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Wed, 28 Mar 2018 12:05:32 -0700 Subject: treewide: remove large struct-pass-by-value from tracepoint arguments - fix trace_hfi1_ctxt_info() to pass large struct by reference instead of by value - convert 'type array[]' tracepoint arguments into 'type *array', since compiler will warn that sizeof('type array[]') == sizeof('type *array') and later should be used instead The CAST_TO_U64 macro in the later patch will enforce that tracepoint arguments can only be integers, pointers, or less than 8 byte structures. Larger structures should be passed by reference. Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- drivers/infiniband/hw/hfi1/file_ops.c | 2 +- drivers/infiniband/hw/hfi1/trace_ctxts.h | 12 ++++++------ include/trace/events/f2fs.h | 2 +- net/wireless/trace.h | 2 +- sound/firewire/amdtp-stream-trace.h | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) (limited to 'include/trace') diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index 41fafebe3b0d..da4aa1a95b11 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -1153,7 +1153,7 @@ static int get_ctxt_info(struct hfi1_filedata *fd, unsigned long arg, u32 len) cinfo.sdma_ring_size = fd->cq->nentries; cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size; - trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, cinfo); + trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, &cinfo); if (copy_to_user((void __user *)arg, &cinfo, len)) return -EFAULT; diff --git a/drivers/infiniband/hw/hfi1/trace_ctxts.h b/drivers/infiniband/hw/hfi1/trace_ctxts.h index 4eb4cc798035..e00c8a7d559c 100644 --- a/drivers/infiniband/hw/hfi1/trace_ctxts.h +++ b/drivers/infiniband/hw/hfi1/trace_ctxts.h @@ -106,7 +106,7 @@ TRACE_EVENT(hfi1_uctxtdata, TRACE_EVENT(hfi1_ctxt_info, TP_PROTO(struct hfi1_devdata *dd, unsigned int ctxt, unsigned int subctxt, - struct hfi1_ctxt_info cinfo), + struct hfi1_ctxt_info *cinfo), TP_ARGS(dd, ctxt, subctxt, cinfo), TP_STRUCT__entry(DD_DEV_ENTRY(dd) __field(unsigned int, ctxt) @@ -120,11 +120,11 @@ TRACE_EVENT(hfi1_ctxt_info, TP_fast_assign(DD_DEV_ASSIGN(dd); __entry->ctxt = ctxt; __entry->subctxt = subctxt; - __entry->egrtids = cinfo.egrtids; - __entry->rcvhdrq_cnt = cinfo.rcvhdrq_cnt; - __entry->rcvhdrq_size = cinfo.rcvhdrq_entsize; - __entry->sdma_ring_size = cinfo.sdma_ring_size; - __entry->rcvegr_size = cinfo.rcvegr_size; + __entry->egrtids = cinfo->egrtids; + __entry->rcvhdrq_cnt = cinfo->rcvhdrq_cnt; + __entry->rcvhdrq_size = cinfo->rcvhdrq_entsize; + __entry->sdma_ring_size = cinfo->sdma_ring_size; + __entry->rcvegr_size = cinfo->rcvegr_size; ), TP_printk("[%s] ctxt %u:%u " CINFO_FMT, __get_str(dev), diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 06c87f9f720c..795698925d20 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -491,7 +491,7 @@ DEFINE_EVENT(f2fs__truncate_node, f2fs_truncate_node, TRACE_EVENT(f2fs_truncate_partial_nodes, - TP_PROTO(struct inode *inode, nid_t nid[], int depth, int err), + TP_PROTO(struct inode *inode, nid_t *nid, int depth, int err), TP_ARGS(inode, nid, depth, err), diff --git a/net/wireless/trace.h b/net/wireless/trace.h index 5152938b358d..018c81fa72fb 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -3137,7 +3137,7 @@ TRACE_EVENT(rdev_start_radar_detection, TRACE_EVENT(rdev_set_mcast_rate, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, - int mcast_rate[NUM_NL80211_BANDS]), + int *mcast_rate), TP_ARGS(wiphy, netdev, mcast_rate), TP_STRUCT__entry( WIPHY_ENTRY diff --git a/sound/firewire/amdtp-stream-trace.h b/sound/firewire/amdtp-stream-trace.h index ea0d486652c8..54cdd4ffa9ce 100644 --- a/sound/firewire/amdtp-stream-trace.h +++ b/sound/firewire/amdtp-stream-trace.h @@ -14,7 +14,7 @@ #include TRACE_EVENT(in_packet, - TP_PROTO(const struct amdtp_stream *s, u32 cycles, u32 cip_header[2], unsigned int payload_length, unsigned int index), + TP_PROTO(const struct amdtp_stream *s, u32 cycles, u32 *cip_header, unsigned int payload_length, unsigned int index), TP_ARGS(s, cycles, cip_header, payload_length, index), TP_STRUCT__entry( __field(unsigned int, second) -- cgit v1.2.3 From c4f6699dfcb8558d138fe838f741b2c10f416cf9 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Wed, 28 Mar 2018 12:05:37 -0700 Subject: bpf: introduce BPF_RAW_TRACEPOINT Introduce BPF_PROG_TYPE_RAW_TRACEPOINT bpf program type to access kernel internal arguments of the tracepoints in their raw form. >From bpf program point of view the access to the arguments look like: struct bpf_raw_tracepoint_args { __u64 args[0]; }; int bpf_prog(struct bpf_raw_tracepoint_args *ctx) { // program can read args[N] where N depends on tracepoint // and statically verified at program load+attach time } kprobe+bpf infrastructure allows programs access function arguments. This feature allows programs access raw tracepoint arguments. Similar to proposed 'dynamic ftrace events' there are no abi guarantees to what the tracepoints arguments are and what their meaning is. The program needs to type cast args properly and use bpf_probe_read() helper to access struct fields when argument is a pointer. For every tracepoint __bpf_trace_##call function is prepared. In assembler it looks like: (gdb) disassemble __bpf_trace_xdp_exception Dump of assembler code for function __bpf_trace_xdp_exception: 0xffffffff81132080 <+0>: mov %ecx,%ecx 0xffffffff81132082 <+2>: jmpq 0xffffffff811231f0 where TRACE_EVENT(xdp_exception, TP_PROTO(const struct net_device *dev, const struct bpf_prog *xdp, u32 act), The above assembler snippet is casting 32-bit 'act' field into 'u64' to pass into bpf_trace_run3(), while 'dev' and 'xdp' args are passed as-is. All of ~500 of __bpf_trace_*() functions are only 5-10 byte long and in total this approach adds 7k bytes to .text. This approach gives the lowest possible overhead while calling trace_xdp_exception() from kernel C code and transitioning into bpf land. Since tracepoint+bpf are used at speeds of 1M+ events per second this is valuable optimization. The new BPF_RAW_TRACEPOINT_OPEN sys_bpf command is introduced that returns anon_inode FD of 'bpf-raw-tracepoint' object. The user space looks like: // load bpf prog with BPF_PROG_TYPE_RAW_TRACEPOINT type prog_fd = bpf_prog_load(...); // receive anon_inode fd for given bpf_raw_tracepoint with prog attached raw_tp_fd = bpf_raw_tracepoint_open("xdp_exception", prog_fd); Ctrl-C of tracing daemon or cmdline tool that uses this feature will automatically detach bpf program, unload it and unregister tracepoint probe. On the kernel side the __bpf_raw_tp_map section of pointers to tracepoint definition and to __bpf_trace_*() probe function is used to find a tracepoint with "xdp_exception" name and corresponding __bpf_trace_xdp_exception() probe function which are passed to tracepoint_probe_register() to connect probe with tracepoint. Addition of bpf_raw_tracepoint doesn't interfere with ftrace and perf tracepoint mechanisms. perf_event_open() can be used in parallel on the same tracepoint. Multiple bpf_raw_tracepoint_open("xdp_exception", prog_fd) are permitted. Each with its own bpf program. The kernel will execute all tracepoint probes and all attached bpf programs. In the future bpf_raw_tracepoints can be extended with query/introspection logic. __bpf_raw_tp_map section logic was contributed by Steven Rostedt Signed-off-by: Alexei Starovoitov Signed-off-by: Steven Rostedt (VMware) Acked-by: Steven Rostedt (VMware) Signed-off-by: Daniel Borkmann --- include/asm-generic/vmlinux.lds.h | 10 +++ include/linux/bpf_types.h | 1 + include/linux/trace_events.h | 42 +++++++++ include/linux/tracepoint-defs.h | 6 ++ include/trace/bpf_probe.h | 92 +++++++++++++++++++ include/trace/define_trace.h | 1 + include/uapi/linux/bpf.h | 11 +++ kernel/bpf/syscall.c | 78 ++++++++++++++++ kernel/trace/bpf_trace.c | 183 ++++++++++++++++++++++++++++++++++++++ 9 files changed, 424 insertions(+) create mode 100644 include/trace/bpf_probe.h (limited to 'include/trace') diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 1ab0e520d6fc..8add3493a202 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -178,6 +178,15 @@ #define TRACE_SYSCALLS() #endif +#ifdef CONFIG_BPF_EVENTS +#define BPF_RAW_TP() STRUCT_ALIGN(); \ + VMLINUX_SYMBOL(__start__bpf_raw_tp) = .; \ + KEEP(*(__bpf_raw_tp_map)) \ + VMLINUX_SYMBOL(__stop__bpf_raw_tp) = .; +#else +#define BPF_RAW_TP() +#endif + #ifdef CONFIG_SERIAL_EARLYCON #define EARLYCON_TABLE() STRUCT_ALIGN(); \ VMLINUX_SYMBOL(__earlycon_table) = .; \ @@ -249,6 +258,7 @@ LIKELY_PROFILE() \ BRANCH_PROFILE() \ TRACE_PRINTKS() \ + BPF_RAW_TP() \ TRACEPOINT_STR() /* diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index 5e2e8a49fb21..6d7243bfb0ff 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -19,6 +19,7 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_SK_MSG, sk_msg) BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe) BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint) BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event) +BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT, raw_tracepoint) #endif #ifdef CONFIG_CGROUP_BPF BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev) diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 8a1442c4e513..b0357cd198b0 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -468,6 +468,9 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx); int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog); void perf_event_detach_bpf_prog(struct perf_event *event); int perf_event_query_prog_array(struct perf_event *event, void __user *info); +int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog); +int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog); +struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name); #else static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) { @@ -487,6 +490,18 @@ perf_event_query_prog_array(struct perf_event *event, void __user *info) { return -EOPNOTSUPP; } +static inline int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *p) +{ + return -EOPNOTSUPP; +} +static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *p) +{ + return -EOPNOTSUPP; +} +static inline struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name) +{ + return NULL; +} #endif enum { @@ -546,6 +561,33 @@ extern void ftrace_profile_free_filter(struct perf_event *event); void perf_trace_buf_update(void *record, u16 type); void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp); +void bpf_trace_run1(struct bpf_prog *prog, u64 arg1); +void bpf_trace_run2(struct bpf_prog *prog, u64 arg1, u64 arg2); +void bpf_trace_run3(struct bpf_prog *prog, u64 arg1, u64 arg2, + u64 arg3); +void bpf_trace_run4(struct bpf_prog *prog, u64 arg1, u64 arg2, + u64 arg3, u64 arg4); +void bpf_trace_run5(struct bpf_prog *prog, u64 arg1, u64 arg2, + u64 arg3, u64 arg4, u64 arg5); +void bpf_trace_run6(struct bpf_prog *prog, u64 arg1, u64 arg2, + u64 arg3, u64 arg4, u64 arg5, u64 arg6); +void bpf_trace_run7(struct bpf_prog *prog, u64 arg1, u64 arg2, + u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7); +void bpf_trace_run8(struct bpf_prog *prog, u64 arg1, u64 arg2, + u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, + u64 arg8); +void bpf_trace_run9(struct bpf_prog *prog, u64 arg1, u64 arg2, + u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, + u64 arg8, u64 arg9); +void bpf_trace_run10(struct bpf_prog *prog, u64 arg1, u64 arg2, + u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, + u64 arg8, u64 arg9, u64 arg10); +void bpf_trace_run11(struct bpf_prog *prog, u64 arg1, u64 arg2, + u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, + u64 arg8, u64 arg9, u64 arg10, u64 arg11); +void bpf_trace_run12(struct bpf_prog *prog, u64 arg1, u64 arg2, + u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, + u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12); void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx, struct trace_event_call *call, u64 count, struct pt_regs *regs, struct hlist_head *head, diff --git a/include/linux/tracepoint-defs.h b/include/linux/tracepoint-defs.h index 64ed7064f1fa..22c5a46e9693 100644 --- a/include/linux/tracepoint-defs.h +++ b/include/linux/tracepoint-defs.h @@ -35,4 +35,10 @@ struct tracepoint { struct tracepoint_func __rcu *funcs; }; +struct bpf_raw_event_map { + struct tracepoint *tp; + void *bpf_func; + u32 num_args; +} __aligned(32); + #endif diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h new file mode 100644 index 000000000000..505dae0bed80 --- /dev/null +++ b/include/trace/bpf_probe.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#undef TRACE_SYSTEM_VAR + +#ifdef CONFIG_BPF_EVENTS + +#undef __entry +#define __entry entry + +#undef __get_dynamic_array +#define __get_dynamic_array(field) \ + ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) + +#undef __get_dynamic_array_len +#define __get_dynamic_array_len(field) \ + ((__entry->__data_loc_##field >> 16) & 0xffff) + +#undef __get_str +#define __get_str(field) ((char *)__get_dynamic_array(field)) + +#undef __get_bitmask +#define __get_bitmask(field) (char *)__get_dynamic_array(field) + +#undef __perf_count +#define __perf_count(c) (c) + +#undef __perf_task +#define __perf_task(t) (t) + +/* cast any integer, pointer, or small struct to u64 */ +#define UINTTYPE(size) \ + __typeof__(__builtin_choose_expr(size == 1, (u8)1, \ + __builtin_choose_expr(size == 2, (u16)2, \ + __builtin_choose_expr(size == 4, (u32)3, \ + __builtin_choose_expr(size == 8, (u64)4, \ + (void)5))))) +#define __CAST_TO_U64(x) ({ \ + typeof(x) __src = (x); \ + UINTTYPE(sizeof(x)) __dst; \ + memcpy(&__dst, &__src, sizeof(__dst)); \ + (u64)__dst; }) + +#define __CAST1(a,...) __CAST_TO_U64(a) +#define __CAST2(a,...) __CAST_TO_U64(a), __CAST1(__VA_ARGS__) +#define __CAST3(a,...) __CAST_TO_U64(a), __CAST2(__VA_ARGS__) +#define __CAST4(a,...) __CAST_TO_U64(a), __CAST3(__VA_ARGS__) +#define __CAST5(a,...) __CAST_TO_U64(a), __CAST4(__VA_ARGS__) +#define __CAST6(a,...) __CAST_TO_U64(a), __CAST5(__VA_ARGS__) +#define __CAST7(a,...) __CAST_TO_U64(a), __CAST6(__VA_ARGS__) +#define __CAST8(a,...) __CAST_TO_U64(a), __CAST7(__VA_ARGS__) +#define __CAST9(a,...) __CAST_TO_U64(a), __CAST8(__VA_ARGS__) +#define __CAST10(a,...) __CAST_TO_U64(a), __CAST9(__VA_ARGS__) +#define __CAST11(a,...) __CAST_TO_U64(a), __CAST10(__VA_ARGS__) +#define __CAST12(a,...) __CAST_TO_U64(a), __CAST11(__VA_ARGS__) +/* tracepoints with more than 12 arguments will hit build error */ +#define CAST_TO_U64(...) CONCATENATE(__CAST, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__) + +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ +static notrace void \ +__bpf_trace_##call(void *__data, proto) \ +{ \ + struct bpf_prog *prog = __data; \ + CONCATENATE(bpf_trace_run, COUNT_ARGS(args))(prog, CAST_TO_U64(args)); \ +} + +/* + * This part is compiled out, it is only here as a build time check + * to make sure that if the tracepoint handling changes, the + * bpf probe will fail to compile unless it too is updated. + */ +#undef DEFINE_EVENT +#define DEFINE_EVENT(template, call, proto, args) \ +static inline void bpf_test_probe_##call(void) \ +{ \ + check_trace_callback_type_##call(__bpf_trace_##template); \ +} \ +static struct bpf_raw_event_map __used \ + __attribute__((section("__bpf_raw_tp_map"))) \ +__bpf_trace_tp_map_##call = { \ + .tp = &__tracepoint_##call, \ + .bpf_func = (void *)__bpf_trace_##template, \ + .num_args = COUNT_ARGS(args), \ +}; + + +#undef DEFINE_EVENT_PRINT +#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ + DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) + +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) +#endif /* CONFIG_BPF_EVENTS */ diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h index d9e3d4aa3f6e..cb30c5532144 100644 --- a/include/trace/define_trace.h +++ b/include/trace/define_trace.h @@ -95,6 +95,7 @@ #ifdef TRACEPOINTS_ENABLED #include #include +#include #endif #undef TRACE_EVENT diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 18b7c510c511..1878201c2d77 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -94,6 +94,7 @@ enum bpf_cmd { BPF_MAP_GET_FD_BY_ID, BPF_OBJ_GET_INFO_BY_FD, BPF_PROG_QUERY, + BPF_RAW_TRACEPOINT_OPEN, }; enum bpf_map_type { @@ -134,6 +135,7 @@ enum bpf_prog_type { BPF_PROG_TYPE_SK_SKB, BPF_PROG_TYPE_CGROUP_DEVICE, BPF_PROG_TYPE_SK_MSG, + BPF_PROG_TYPE_RAW_TRACEPOINT, }; enum bpf_attach_type { @@ -344,6 +346,11 @@ union bpf_attr { __aligned_u64 prog_ids; __u32 prog_cnt; } query; + + struct { + __u64 name; + __u32 prog_fd; + } raw_tracepoint; } __attribute__((aligned(8))); /* BPF helper function descriptions: @@ -1152,4 +1159,8 @@ struct bpf_cgroup_dev_ctx { __u32 minor; }; +struct bpf_raw_tracepoint_args { + __u64 args[0]; +}; + #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 77d45bd9f507..95ca2523fa6e 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1315,6 +1315,81 @@ static int bpf_obj_get(const union bpf_attr *attr) attr->file_flags); } +struct bpf_raw_tracepoint { + struct bpf_raw_event_map *btp; + struct bpf_prog *prog; +}; + +static int bpf_raw_tracepoint_release(struct inode *inode, struct file *filp) +{ + struct bpf_raw_tracepoint *raw_tp = filp->private_data; + + if (raw_tp->prog) { + bpf_probe_unregister(raw_tp->btp, raw_tp->prog); + bpf_prog_put(raw_tp->prog); + } + kfree(raw_tp); + return 0; +} + +static const struct file_operations bpf_raw_tp_fops = { + .release = bpf_raw_tracepoint_release, + .read = bpf_dummy_read, + .write = bpf_dummy_write, +}; + +#define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd + +static int bpf_raw_tracepoint_open(const union bpf_attr *attr) +{ + struct bpf_raw_tracepoint *raw_tp; + struct bpf_raw_event_map *btp; + struct bpf_prog *prog; + char tp_name[128]; + int tp_fd, err; + + if (strncpy_from_user(tp_name, u64_to_user_ptr(attr->raw_tracepoint.name), + sizeof(tp_name) - 1) < 0) + return -EFAULT; + tp_name[sizeof(tp_name) - 1] = 0; + + btp = bpf_find_raw_tracepoint(tp_name); + if (!btp) + return -ENOENT; + + raw_tp = kzalloc(sizeof(*raw_tp), GFP_USER); + if (!raw_tp) + return -ENOMEM; + raw_tp->btp = btp; + + prog = bpf_prog_get_type(attr->raw_tracepoint.prog_fd, + BPF_PROG_TYPE_RAW_TRACEPOINT); + if (IS_ERR(prog)) { + err = PTR_ERR(prog); + goto out_free_tp; + } + + err = bpf_probe_register(raw_tp->btp, prog); + if (err) + goto out_put_prog; + + raw_tp->prog = prog; + tp_fd = anon_inode_getfd("bpf-raw-tracepoint", &bpf_raw_tp_fops, raw_tp, + O_CLOEXEC); + if (tp_fd < 0) { + bpf_probe_unregister(raw_tp->btp, prog); + err = tp_fd; + goto out_put_prog; + } + return tp_fd; + +out_put_prog: + bpf_prog_put(prog); +out_free_tp: + kfree(raw_tp); + return err; +} + #ifdef CONFIG_CGROUP_BPF #define BPF_PROG_ATTACH_LAST_FIELD attach_flags @@ -1925,6 +2000,9 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz case BPF_OBJ_GET_INFO_BY_FD: err = bpf_obj_get_info_by_fd(&attr, uattr); break; + case BPF_RAW_TRACEPOINT_OPEN: + err = bpf_raw_tracepoint_open(&attr); + break; default: err = -EINVAL; break; diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 7f9691c86b6e..463e72d18c4c 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -735,6 +735,86 @@ static const struct bpf_func_proto *pe_prog_func_proto(enum bpf_func_id func_id) } } +/* + * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp + * to avoid potential recursive reuse issue when/if tracepoints are added + * inside bpf_*_event_output and/or bpf_get_stack_id + */ +static DEFINE_PER_CPU(struct pt_regs, bpf_raw_tp_regs); +BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args, + struct bpf_map *, map, u64, flags, void *, data, u64, size) +{ + struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs); + + perf_fetch_caller_regs(regs); + return ____bpf_perf_event_output(regs, map, flags, data, size); +} + +static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = { + .func = bpf_perf_event_output_raw_tp, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_ANYTHING, + .arg4_type = ARG_PTR_TO_MEM, + .arg5_type = ARG_CONST_SIZE_OR_ZERO, +}; + +BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args, + struct bpf_map *, map, u64, flags) +{ + struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs); + + perf_fetch_caller_regs(regs); + /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */ + return bpf_get_stackid((unsigned long) regs, (unsigned long) map, + flags, 0, 0); +} + +static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = { + .func = bpf_get_stackid_raw_tp, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_ANYTHING, +}; + +static const struct bpf_func_proto *raw_tp_prog_func_proto(enum bpf_func_id func_id) +{ + switch (func_id) { + case BPF_FUNC_perf_event_output: + return &bpf_perf_event_output_proto_raw_tp; + case BPF_FUNC_get_stackid: + return &bpf_get_stackid_proto_raw_tp; + default: + return tracing_func_proto(func_id); + } +} + +static bool raw_tp_prog_is_valid_access(int off, int size, + enum bpf_access_type type, + struct bpf_insn_access_aux *info) +{ + /* largest tracepoint in the kernel has 12 args */ + if (off < 0 || off >= sizeof(__u64) * 12) + return false; + if (type != BPF_READ) + return false; + if (off % size != 0) + return false; + return true; +} + +const struct bpf_verifier_ops raw_tracepoint_verifier_ops = { + .get_func_proto = raw_tp_prog_func_proto, + .is_valid_access = raw_tp_prog_is_valid_access, +}; + +const struct bpf_prog_ops raw_tracepoint_prog_ops = { +}; + static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, struct bpf_insn_access_aux *info) { @@ -908,3 +988,106 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info) return ret; } + +extern struct bpf_raw_event_map __start__bpf_raw_tp[]; +extern struct bpf_raw_event_map __stop__bpf_raw_tp[]; + +struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name) +{ + struct bpf_raw_event_map *btp = __start__bpf_raw_tp; + + for (; btp < __stop__bpf_raw_tp; btp++) { + if (!strcmp(btp->tp->name, name)) + return btp; + } + return NULL; +} + +static __always_inline +void __bpf_trace_run(struct bpf_prog *prog, u64 *args) +{ + rcu_read_lock(); + preempt_disable(); + (void) BPF_PROG_RUN(prog, args); + preempt_enable(); + rcu_read_unlock(); +} + +#define UNPACK(...) __VA_ARGS__ +#define REPEAT_1(FN, DL, X, ...) FN(X) +#define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__) +#define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__) +#define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__) +#define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__) +#define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__) +#define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__) +#define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__) +#define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__) +#define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__) +#define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__) +#define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__) +#define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__) + +#define SARG(X) u64 arg##X +#define COPY(X) args[X] = arg##X + +#define __DL_COM (,) +#define __DL_SEM (;) + +#define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 + +#define BPF_TRACE_DEFN_x(x) \ + void bpf_trace_run##x(struct bpf_prog *prog, \ + REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \ + { \ + u64 args[x]; \ + REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \ + __bpf_trace_run(prog, args); \ + } \ + EXPORT_SYMBOL_GPL(bpf_trace_run##x) +BPF_TRACE_DEFN_x(1); +BPF_TRACE_DEFN_x(2); +BPF_TRACE_DEFN_x(3); +BPF_TRACE_DEFN_x(4); +BPF_TRACE_DEFN_x(5); +BPF_TRACE_DEFN_x(6); +BPF_TRACE_DEFN_x(7); +BPF_TRACE_DEFN_x(8); +BPF_TRACE_DEFN_x(9); +BPF_TRACE_DEFN_x(10); +BPF_TRACE_DEFN_x(11); +BPF_TRACE_DEFN_x(12); + +static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) +{ + struct tracepoint *tp = btp->tp; + + /* + * check that program doesn't access arguments beyond what's + * available in this tracepoint + */ + if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64)) + return -EINVAL; + + return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog); +} + +int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) +{ + int err; + + mutex_lock(&bpf_event_mutex); + err = __bpf_probe_register(btp, prog); + mutex_unlock(&bpf_event_mutex); + return err; +} + +int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog) +{ + int err; + + mutex_lock(&bpf_event_mutex); + err = tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog); + mutex_unlock(&bpf_event_mutex); + return err; +} -- cgit v1.2.3 From 09d2bf595db4b4075ea721acd61e180d6bb18f88 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 30 Mar 2018 21:05:28 +0100 Subject: rxrpc: Add a tracepoint to track rxrpc_local refcounting Add a tracepoint to track reference counting on the rxrpc_local struct. Signed-off-by: David Howells --- include/trace/events/rxrpc.h | 43 +++++++++++++++++++++++++++++ net/rxrpc/ar-internal.h | 27 +++--------------- net/rxrpc/call_accept.c | 3 +- net/rxrpc/local_object.c | 65 ++++++++++++++++++++++++++++++++++++++++++-- 4 files changed, 111 insertions(+), 27 deletions(-) (limited to 'include/trace') diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index 2ea788f6f95d..0410dfeb79c6 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -42,6 +42,14 @@ enum rxrpc_skb_trace { rxrpc_skb_tx_seen, }; +enum rxrpc_local_trace { + rxrpc_local_got, + rxrpc_local_new, + rxrpc_local_processing, + rxrpc_local_put, + rxrpc_local_queued, +}; + enum rxrpc_conn_trace { rxrpc_conn_got, rxrpc_conn_new_client, @@ -215,6 +223,13 @@ enum rxrpc_congest_change { EM(rxrpc_skb_tx_rotated, "Tx ROT") \ E_(rxrpc_skb_tx_seen, "Tx SEE") +#define rxrpc_local_traces \ + EM(rxrpc_local_got, "GOT") \ + EM(rxrpc_local_new, "NEW") \ + EM(rxrpc_local_processing, "PRO") \ + EM(rxrpc_local_put, "PUT") \ + E_(rxrpc_local_queued, "QUE") + #define rxrpc_conn_traces \ EM(rxrpc_conn_got, "GOT") \ EM(rxrpc_conn_new_client, "NWc") \ @@ -416,6 +431,7 @@ enum rxrpc_congest_change { #define E_(a, b) TRACE_DEFINE_ENUM(a); rxrpc_skb_traces; +rxrpc_local_traces; rxrpc_conn_traces; rxrpc_client_traces; rxrpc_call_traces; @@ -439,6 +455,33 @@ rxrpc_congest_changes; #define EM(a, b) { a, b }, #define E_(a, b) { a, b } +TRACE_EVENT(rxrpc_local, + TP_PROTO(struct rxrpc_local *local, enum rxrpc_local_trace op, + int usage, const void *where), + + TP_ARGS(local, op, usage, where), + + TP_STRUCT__entry( + __field(unsigned int, local ) + __field(int, op ) + __field(int, usage ) + __field(const void *, where ) + ), + + TP_fast_assign( + __entry->local = local->debug_id; + __entry->op = op; + __entry->usage = usage; + __entry->where = where; + ), + + TP_printk("L=%08x %s u=%d sp=%pSR", + __entry->local, + __print_symbolic(__entry->op, rxrpc_local_traces), + __entry->usage, + __entry->where) + ); + TRACE_EVENT(rxrpc_conn, TP_PROTO(struct rxrpc_connection *conn, enum rxrpc_conn_trace op, int usage, const void *where), diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 2a2b0fdfb157..cc51d3eb0548 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -981,31 +981,12 @@ extern void rxrpc_process_local_events(struct rxrpc_local *); * local_object.c */ struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc *); -void __rxrpc_put_local(struct rxrpc_local *); +struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *); +struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *); +void rxrpc_put_local(struct rxrpc_local *); +void rxrpc_queue_local(struct rxrpc_local *); void rxrpc_destroy_all_locals(struct rxrpc_net *); -static inline void rxrpc_get_local(struct rxrpc_local *local) -{ - atomic_inc(&local->usage); -} - -static inline -struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local) -{ - return atomic_inc_not_zero(&local->usage) ? local : NULL; -} - -static inline void rxrpc_put_local(struct rxrpc_local *local) -{ - if (local && atomic_dec_and_test(&local->usage)) - __rxrpc_put_local(local); -} - -static inline void rxrpc_queue_local(struct rxrpc_local *local) -{ - rxrpc_queue_work(&local->processor); -} - /* * misc.c */ diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index 493545033e42..5a9b1d916124 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -296,8 +296,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, b->conn_backlog[conn_tail] = NULL; smp_store_release(&b->conn_backlog_tail, (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1)); - rxrpc_get_local(local); - conn->params.local = local; + conn->params.local = rxrpc_get_local(local); conn->params.peer = peer; rxrpc_see_connection(conn); rxrpc_new_incoming_connection(rx, conn, skb); diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index 38b99db30e54..8b54e9531d52 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c @@ -95,6 +95,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet, local->debug_id = atomic_inc_return(&rxrpc_debug_id); memcpy(&local->srx, srx, sizeof(*srx)); local->srx.srx_service = 0; + trace_rxrpc_local(local, rxrpc_local_new, 1, NULL); } _leave(" = %p", local); @@ -256,15 +257,74 @@ addr_in_use: return ERR_PTR(-EADDRINUSE); } +/* + * Get a ref on a local endpoint. + */ +struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local) +{ + const void *here = __builtin_return_address(0); + int n; + + n = atomic_inc_return(&local->usage); + trace_rxrpc_local(local, rxrpc_local_got, n, here); + return local; +} + +/* + * Get a ref on a local endpoint unless its usage has already reached 0. + */ +struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local) +{ + const void *here = __builtin_return_address(0); + + if (local) { + int n = __atomic_add_unless(&local->usage, 1, 0); + if (n > 0) + trace_rxrpc_local(local, rxrpc_local_got, n + 1, here); + else + local = NULL; + } + return local; +} + +/* + * Queue a local endpoint. + */ +void rxrpc_queue_local(struct rxrpc_local *local) +{ + const void *here = __builtin_return_address(0); + + if (rxrpc_queue_work(&local->processor)) + trace_rxrpc_local(local, rxrpc_local_queued, + atomic_read(&local->usage), here); +} + /* * A local endpoint reached its end of life. */ -void __rxrpc_put_local(struct rxrpc_local *local) +static void __rxrpc_put_local(struct rxrpc_local *local) { _enter("%d", local->debug_id); rxrpc_queue_work(&local->processor); } +/* + * Drop a ref on a local endpoint. + */ +void rxrpc_put_local(struct rxrpc_local *local) +{ + const void *here = __builtin_return_address(0); + int n; + + if (local) { + n = atomic_dec_return(&local->usage); + trace_rxrpc_local(local, rxrpc_local_put, n, here); + + if (n == 0) + __rxrpc_put_local(local); + } +} + /* * Destroy a local endpoint's socket and then hand the record to RCU to dispose * of. @@ -322,7 +382,8 @@ static void rxrpc_local_processor(struct work_struct *work) container_of(work, struct rxrpc_local, processor); bool again; - _enter("%d", local->debug_id); + trace_rxrpc_local(local, rxrpc_local_processing, + atomic_read(&local->usage), NULL); do { again = false; -- cgit v1.2.3 From 1159d4b496f57d5b8ee27c8b90b9d01c332e2e11 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 30 Mar 2018 21:05:38 +0100 Subject: rxrpc: Add a tracepoint to track rxrpc_peer refcounting Add a tracepoint to track reference counting on the rxrpc_peer struct. Signed-off-by: David Howells --- include/trace/events/rxrpc.h | 42 ++++++++++++++++++++++++++++ net/rxrpc/ar-internal.h | 23 +++------------- net/rxrpc/peer_event.c | 2 +- net/rxrpc/peer_object.c | 65 ++++++++++++++++++++++++++++++++++++++++++-- 4 files changed, 110 insertions(+), 22 deletions(-) (limited to 'include/trace') diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index 0410dfeb79c6..9e96c2fe2793 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -50,6 +50,14 @@ enum rxrpc_local_trace { rxrpc_local_queued, }; +enum rxrpc_peer_trace { + rxrpc_peer_got, + rxrpc_peer_new, + rxrpc_peer_processing, + rxrpc_peer_put, + rxrpc_peer_queued_error, +}; + enum rxrpc_conn_trace { rxrpc_conn_got, rxrpc_conn_new_client, @@ -230,6 +238,13 @@ enum rxrpc_congest_change { EM(rxrpc_local_put, "PUT") \ E_(rxrpc_local_queued, "QUE") +#define rxrpc_peer_traces \ + EM(rxrpc_peer_got, "GOT") \ + EM(rxrpc_peer_new, "NEW") \ + EM(rxrpc_peer_processing, "PRO") \ + EM(rxrpc_peer_put, "PUT") \ + E_(rxrpc_peer_queued_error, "QER") + #define rxrpc_conn_traces \ EM(rxrpc_conn_got, "GOT") \ EM(rxrpc_conn_new_client, "NWc") \ @@ -482,6 +497,33 @@ TRACE_EVENT(rxrpc_local, __entry->where) ); +TRACE_EVENT(rxrpc_peer, + TP_PROTO(struct rxrpc_peer *peer, enum rxrpc_peer_trace op, + int usage, const void *where), + + TP_ARGS(peer, op, usage, where), + + TP_STRUCT__entry( + __field(unsigned int, peer ) + __field(int, op ) + __field(int, usage ) + __field(const void *, where ) + ), + + TP_fast_assign( + __entry->peer = peer->debug_id; + __entry->op = op; + __entry->usage = usage; + __entry->where = where; + ), + + TP_printk("P=%08x %s u=%d sp=%pSR", + __entry->peer, + __print_symbolic(__entry->op, rxrpc_peer_traces), + __entry->usage, + __entry->where) + ); + TRACE_EVENT(rxrpc_conn, TP_PROTO(struct rxrpc_connection *conn, enum rxrpc_conn_trace op, int usage, const void *where), diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index d40d54b78567..c46583bc255d 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -1041,25 +1041,10 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *, struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t); struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *, struct rxrpc_peer *); - -static inline struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer) -{ - atomic_inc(&peer->usage); - return peer; -} - -static inline -struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer) -{ - return atomic_inc_not_zero(&peer->usage) ? peer : NULL; -} - -extern void __rxrpc_put_peer(struct rxrpc_peer *peer); -static inline void rxrpc_put_peer(struct rxrpc_peer *peer) -{ - if (peer && atomic_dec_and_test(&peer->usage)) - __rxrpc_put_peer(peer); -} +struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *); +struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *); +void rxrpc_put_peer(struct rxrpc_peer *); +void __rxrpc_queue_peer_error(struct rxrpc_peer *); /* * proc.c diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c index d01eb9a06448..78c2f95d1f22 100644 --- a/net/rxrpc/peer_event.c +++ b/net/rxrpc/peer_event.c @@ -192,7 +192,7 @@ void rxrpc_error_report(struct sock *sk) rxrpc_free_skb(skb, rxrpc_skb_rx_freed); /* The ref we obtained is passed off to the work item */ - rxrpc_queue_work(&peer->error_distributor); + __rxrpc_queue_peer_error(peer); _leave(""); } diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c index 94a6dbfcf129..a4a750aea1e5 100644 --- a/net/rxrpc/peer_object.c +++ b/net/rxrpc/peer_object.c @@ -386,9 +386,54 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local, } /* - * Discard a ref on a remote peer record. + * Get a ref on a peer record. */ -void __rxrpc_put_peer(struct rxrpc_peer *peer) +struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer) +{ + const void *here = __builtin_return_address(0); + int n; + + n = atomic_inc_return(&peer->usage); + trace_rxrpc_peer(peer, rxrpc_peer_got, n, here); + return peer; +} + +/* + * Get a ref on a peer record unless its usage has already reached 0. + */ +struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer) +{ + const void *here = __builtin_return_address(0); + + if (peer) { + int n = __atomic_add_unless(&peer->usage, 1, 0); + if (n > 0) + trace_rxrpc_peer(peer, rxrpc_peer_got, n + 1, here); + else + peer = NULL; + } + return peer; +} + +/* + * Queue a peer record. This passes the caller's ref to the workqueue. + */ +void __rxrpc_queue_peer_error(struct rxrpc_peer *peer) +{ + const void *here = __builtin_return_address(0); + int n; + + n = atomic_read(&peer->usage); + if (rxrpc_queue_work(&peer->error_distributor)) + trace_rxrpc_peer(peer, rxrpc_peer_queued_error, n, here); + else + rxrpc_put_peer(peer); +} + +/* + * Discard a peer record. + */ +static void __rxrpc_put_peer(struct rxrpc_peer *peer) { struct rxrpc_net *rxnet = peer->local->rxnet; @@ -402,6 +447,22 @@ void __rxrpc_put_peer(struct rxrpc_peer *peer) kfree_rcu(peer, rcu); } +/* + * Drop a ref on a peer record. + */ +void rxrpc_put_peer(struct rxrpc_peer *peer) +{ + const void *here = __builtin_return_address(0); + int n; + + if (peer) { + n = atomic_dec_return(&peer->usage); + trace_rxrpc_peer(peer, rxrpc_peer_put, n, here); + if (n == 0) + __rxrpc_put_peer(peer); + } +} + /** * rxrpc_kernel_get_peer - Get the peer address of a call * @sock: The socket on which the call is in progress. -- cgit v1.2.3