diff options
author | Chuck Lever <chuck.lever@oracle.com> | 2020-06-22 17:15:41 +0300 |
---|---|---|
committer | Chuck Lever <chuck.lever@oracle.com> | 2020-11-30 21:00:22 +0300 |
commit | 78147ca8b4a9b6cf0e597ddd6bf17959e08376c2 (patch) | |
tree | 9222da84a51b34258996dafeb3a2774a96122a39 /include/trace | |
parent | ded380f10072c924a17be6ac996019ff6472c9d2 (diff) | |
download | linux-78147ca8b4a9b6cf0e597ddd6bf17959e08376c2.tar.xz |
svcrdma: Add a "parsed chunk list" data structure
This simple data structure binds the location of each data payload
inside of an RPC message to the chunk that will be used to push it
to or pull it from the client.
There are several benefits to this small additional overhead:
* It enables support for more than one chunk in incoming Read and
Write lists.
* It translates the version-specific on-the-wire format into a
generic in-memory structure, enabling support for multiple
versions of the RPC/RDMA transport protocol.
* It enables the server to re-organize a chunk list if it needs to
adjust where Read chunk data lands in server memory without
altering the contents of the XDR-encoded Receive buffer.
Construction of these lists is done while sanity checking each
incoming RPC/RDMA header. Subsequent patches will make use of the
generated data structures.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Diffstat (limited to 'include/trace')
-rw-r--r-- | include/trace/events/rpcrdma.h | 75 |
1 files changed, 73 insertions, 2 deletions
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index bf1065772228..72b941aef43b 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -1446,12 +1446,83 @@ DECLARE_EVENT_CLASS(svcrdma_segment_event, ), \ TP_ARGS(handle, length, offset)) -DEFINE_SEGMENT_EVENT(decode_wseg); -DEFINE_SEGMENT_EVENT(encode_rseg); DEFINE_SEGMENT_EVENT(send_rseg); DEFINE_SEGMENT_EVENT(encode_wseg); DEFINE_SEGMENT_EVENT(send_wseg); +TRACE_EVENT(svcrdma_decode_rseg, + TP_PROTO( + const struct rpc_rdma_cid *cid, + const struct svc_rdma_chunk *chunk, + const struct svc_rdma_segment *segment + ), + + TP_ARGS(cid, chunk, segment), + + TP_STRUCT__entry( + __field(u32, cq_id) + __field(int, completion_id) + __field(u32, segno) + __field(u32, position) + __field(u32, handle) + __field(u32, length) + __field(u64, offset) + ), + + TP_fast_assign( + __entry->cq_id = cid->ci_queue_id; + __entry->completion_id = cid->ci_completion_id; + __entry->segno = chunk->ch_segcount; + __entry->position = chunk->ch_position; + __entry->handle = segment->rs_handle; + __entry->length = segment->rs_length; + __entry->offset = segment->rs_offset; + ), + + TP_printk("cq_id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x", + __entry->cq_id, __entry->completion_id, + __entry->segno, __entry->position, __entry->length, + (unsigned long long)__entry->offset, __entry->handle + ) +); + +TRACE_EVENT(svcrdma_decode_wseg, + TP_PROTO( + const struct rpc_rdma_cid *cid, + const struct svc_rdma_chunk *chunk, + u32 segno + ), + + TP_ARGS(cid, chunk, segno), + + TP_STRUCT__entry( + __field(u32, cq_id) + __field(int, completion_id) + __field(u32, segno) + __field(u32, handle) + __field(u32, length) + __field(u64, offset) + ), + + TP_fast_assign( + const struct svc_rdma_segment *segment = + &chunk->ch_segments[segno]; + + __entry->cq_id = cid->ci_queue_id; + __entry->completion_id = cid->ci_completion_id; + __entry->segno = segno; + __entry->handle = segment->rs_handle; + __entry->length = segment->rs_length; + __entry->offset = segment->rs_offset; + ), + + TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x", + __entry->cq_id, __entry->completion_id, + __entry->segno, __entry->length, + (unsigned long long)__entry->offset, __entry->handle + ) +); + DECLARE_EVENT_CLASS(svcrdma_chunk_event, TP_PROTO( u32 length |