summaryrefslogtreecommitdiff
path: root/fs/netfs
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2024-12-16 23:40:54 +0300
committerChristian Brauner <brauner@kernel.org>2024-12-21 00:34:02 +0300
commitaabcabf2746062253565b33aa3f8d25999a5ac01 (patch)
treec6471f14ba0a7f7d44a9d0e721949db33ddd5471 /fs/netfs
parenteb1181594417dafad0f75808ead71f6d5170c1ea (diff)
downloadlinux-aabcabf2746062253565b33aa3f8d25999a5ac01.tar.xz
netfs: Add a tracepoint to log the lifespan of folio_queue structs
Add a tracepoint to log the lifespan of folio_queue structs. For tracing illustrative purposes, folio_queues are tagged with the debug ID of whatever they're related to (typically a netfs_io_request) and a debug ID of their own. Signed-off-by: David Howells <dhowells@redhat.com> Link: https://lore.kernel.org/r/20241216204124.3752367-5-dhowells@redhat.com cc: Jeff Layton <jlayton@kernel.org> cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org Signed-off-by: Christian Brauner <brauner@kernel.org>
Diffstat (limited to 'fs/netfs')
-rw-r--r--fs/netfs/buffered_read.c10
-rw-r--r--fs/netfs/internal.h3
-rw-r--r--fs/netfs/misc.c31
-rw-r--r--fs/netfs/read_collect.c8
-rw-r--r--fs/netfs/write_issue.c2
5 files changed, 36 insertions, 18 deletions
diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
index 78b04763bed6..7ec04d5628d8 100644
--- a/fs/netfs/buffered_read.c
+++ b/fs/netfs/buffered_read.c
@@ -131,7 +131,8 @@ static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq)
struct folio_queue *tail = rreq->buffer_tail, *new;
size_t added;
- new = netfs_folioq_alloc(GFP_NOFS);
+ new = netfs_folioq_alloc(rreq->debug_id, GFP_NOFS,
+ netfs_trace_folioq_alloc_read_prep);
if (!new)
return -ENOMEM;
new->prev = tail;
@@ -361,9 +362,11 @@ static int netfs_prime_buffer(struct netfs_io_request *rreq)
struct folio_batch put_batch;
size_t added;
- folioq = netfs_folioq_alloc(GFP_KERNEL);
+ folioq = netfs_folioq_alloc(rreq->debug_id, GFP_KERNEL,
+ netfs_trace_folioq_alloc_read_prime);
if (!folioq)
return -ENOMEM;
+
rreq->buffer = folioq;
rreq->buffer_tail = folioq;
rreq->submitted = rreq->start;
@@ -436,7 +439,8 @@ static int netfs_create_singular_buffer(struct netfs_io_request *rreq, struct fo
{
struct folio_queue *folioq;
- folioq = netfs_folioq_alloc(GFP_KERNEL);
+ folioq = netfs_folioq_alloc(rreq->debug_id, GFP_KERNEL,
+ netfs_trace_folioq_alloc_read_sing);
if (!folioq)
return -ENOMEM;
diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h
index c562aec3b483..01b013f558f7 100644
--- a/fs/netfs/internal.h
+++ b/fs/netfs/internal.h
@@ -58,7 +58,8 @@ static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) {}
/*
* misc.c
*/
-struct folio_queue *netfs_buffer_make_space(struct netfs_io_request *rreq);
+struct folio_queue *netfs_buffer_make_space(struct netfs_io_request *rreq,
+ enum netfs_folioq_trace trace);
int netfs_buffer_append_folio(struct netfs_io_request *rreq, struct folio *folio,
bool needs_put);
struct folio_queue *netfs_delete_buffer_head(struct netfs_io_request *wreq);
diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c
index 6cd7e1ee7a14..afe032551de5 100644
--- a/fs/netfs/misc.c
+++ b/fs/netfs/misc.c
@@ -10,18 +10,25 @@
/**
* netfs_folioq_alloc - Allocate a folio_queue struct
+ * @rreq_id: Associated debugging ID for tracing purposes
* @gfp: Allocation constraints
+ * @trace: Trace tag to indicate the purpose of the allocation
*
- * Allocate, initialise and account the folio_queue struct.
+ * Allocate, initialise and account the folio_queue struct and log a trace line
+ * to mark the allocation.
*/
-struct folio_queue *netfs_folioq_alloc(gfp_t gfp)
+struct folio_queue *netfs_folioq_alloc(unsigned int rreq_id, gfp_t gfp,
+ unsigned int /*enum netfs_folioq_trace*/ trace)
{
+ static atomic_t debug_ids;
struct folio_queue *fq;
fq = kmalloc(sizeof(*fq), gfp);
if (fq) {
netfs_stat(&netfs_n_folioq);
- folioq_init(fq);
+ folioq_init(fq, rreq_id);
+ fq->debug_id = atomic_inc_return(&debug_ids);
+ trace_netfs_folioq(fq, trace);
}
return fq;
}
@@ -30,11 +37,14 @@ EXPORT_SYMBOL(netfs_folioq_alloc);
/**
* netfs_folioq_free - Free a folio_queue struct
* @folioq: The object to free
+ * @trace: Trace tag to indicate which free
*
* Free and unaccount the folio_queue struct.
*/
-void netfs_folioq_free(struct folio_queue *folioq)
+void netfs_folioq_free(struct folio_queue *folioq,
+ unsigned int /*enum netfs_trace_folioq*/ trace)
{
+ trace_netfs_folioq(folioq, trace);
netfs_stat_d(&netfs_n_folioq);
kfree(folioq);
}
@@ -43,7 +53,8 @@ EXPORT_SYMBOL(netfs_folioq_free);
/*
* Make sure there's space in the rolling queue.
*/
-struct folio_queue *netfs_buffer_make_space(struct netfs_io_request *rreq)
+struct folio_queue *netfs_buffer_make_space(struct netfs_io_request *rreq,
+ enum netfs_folioq_trace trace)
{
struct folio_queue *tail = rreq->buffer_tail, *prev;
unsigned int prev_nr_slots = 0;
@@ -59,11 +70,9 @@ struct folio_queue *netfs_buffer_make_space(struct netfs_io_request *rreq)
prev_nr_slots = folioq_nr_slots(tail);
}
- tail = kmalloc(sizeof(*tail), GFP_NOFS);
+ tail = netfs_folioq_alloc(rreq->debug_id, GFP_NOFS, trace);
if (!tail)
return ERR_PTR(-ENOMEM);
- netfs_stat(&netfs_n_folioq);
- folioq_init(tail);
tail->prev = prev;
if (prev)
/* [!] NOTE: After we set prev->next, the consumer is entirely
@@ -98,7 +107,7 @@ int netfs_buffer_append_folio(struct netfs_io_request *rreq, struct folio *folio
struct folio_queue *tail;
unsigned int slot, order = folio_order(folio);
- tail = netfs_buffer_make_space(rreq);
+ tail = netfs_buffer_make_space(rreq, netfs_trace_folioq_alloc_append_folio);
if (IS_ERR(tail))
return PTR_ERR(tail);
@@ -119,7 +128,7 @@ struct folio_queue *netfs_delete_buffer_head(struct netfs_io_request *wreq)
if (next)
next->prev = NULL;
- netfs_folioq_free(head);
+ netfs_folioq_free(head, netfs_trace_folioq_delete);
wreq->buffer = next;
return next;
}
@@ -142,7 +151,7 @@ void netfs_clear_buffer(struct netfs_io_request *rreq)
folio_put(folio);
}
}
- netfs_folioq_free(p);
+ netfs_folioq_free(p, netfs_trace_folioq_clear);
}
}
diff --git a/fs/netfs/read_collect.c b/fs/netfs/read_collect.c
index e8624f5c7fcc..f7a5cb29dd12 100644
--- a/fs/netfs/read_collect.c
+++ b/fs/netfs/read_collect.c
@@ -103,6 +103,7 @@ static bool netfs_consume_read_data(struct netfs_io_subrequest *subreq, bool was
subreq->transferred, subreq->len))
subreq->transferred = subreq->len;
+ trace_netfs_folioq(folioq, netfs_trace_folioq_read_progress);
next_folio:
fsize = PAGE_SIZE << subreq->curr_folio_order;
fpos = round_down(subreq->start + subreq->consumed, fsize);
@@ -119,9 +120,11 @@ next_folio:
if (folioq) {
struct folio *folio = folioq_folio(folioq, slot);
- pr_err("folioq: orders=%02x%02x%02x%02x\n",
+ pr_err("folioq: fq=%x orders=%02x%02x%02x%02x %px\n",
+ folioq->debug_id,
folioq->orders[0], folioq->orders[1],
- folioq->orders[2], folioq->orders[3]);
+ folioq->orders[2], folioq->orders[3],
+ folioq);
if (folio)
pr_err("folio: %llx-%llx ix=%llx o=%u qo=%u\n",
fpos, fend - 1, folio_pos(folio), folio_order(folio),
@@ -222,6 +225,7 @@ donation_changed:
slot = 0;
folioq = folioq->next;
subreq->curr_folioq = folioq;
+ trace_netfs_folioq(folioq, netfs_trace_folioq_read_progress);
}
subreq->curr_folioq_slot = slot;
if (folioq && folioq_folio(folioq, slot))
diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c
index ff0e82505a0b..87e5cf4a0957 100644
--- a/fs/netfs/write_issue.c
+++ b/fs/netfs/write_issue.c
@@ -161,7 +161,7 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
*/
if (iov_iter_is_folioq(wreq_iter) &&
wreq_iter->folioq_slot >= folioq_nr_slots(wreq_iter->folioq)) {
- netfs_buffer_make_space(wreq);
+ netfs_buffer_make_space(wreq, netfs_trace_folioq_prep_write);
}
subreq = netfs_alloc_subrequest(wreq);