summaryrefslogtreecommitdiff
path: root/net/rxrpc/txbuf.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2024-03-13 02:37:17 +0300
committerPaolo Abeni <pabeni@redhat.com>2024-03-14 15:09:52 +0300
commit6b2536462fd48b49563aef0555517cb91047c5f5 (patch)
treec03f9186836b6a71cbb9c8c508953efbf1ee1f0c /net/rxrpc/txbuf.c
parentddbec99f58571301679addbc022256970ca3eac6 (diff)
downloadlinux-6b2536462fd48b49563aef0555517cb91047c5f5.tar.xz
rxrpc: Fix use of changed alignment param to page_frag_alloc_align()
Commit 411c5f36805c ("mm/page_alloc: modify page_frag_alloc_align() to accept align as an argument") changed the way page_frag_alloc_align() worked, but it didn't fix AF_RXRPC as that use of that allocator function hadn't been merged yet at the time. Now, when the AFS filesystem is used, this results in: WARNING: CPU: 4 PID: 379 at include/linux/gfp.h:323 rxrpc_alloc_data_txbuf+0x9d/0x2b0 [rxrpc] Fix this by using __page_frag_alloc_align() instead. Note that it might be better to use an order-based alignment rather than a mask-based alignment. Fixes: 49489bb03a50 ("rxrpc: Do zerocopy using MSG_SPLICE_PAGES and page frags") Signed-off-by: David Howells <dhowells@redhat.com> Reported-by: Marc Dionne <marc.dionne@auristor.com> cc: Yunsheng Lin <linyunsheng@huawei.com> cc: Alexander Duyck <alexander.duyck@gmail.com> cc: Michael S. Tsirkin <mst@redhat.com> cc: "David S. Miller" <davem@davemloft.net> cc: Eric Dumazet <edumazet@google.com> cc: Jakub Kicinski <kuba@kernel.org> cc: Paolo Abeni <pabeni@redhat.com> cc: linux-afs@lists.infradead.org cc: netdev@vger.kernel.org Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Diffstat (limited to 'net/rxrpc/txbuf.c')
-rw-r--r--net/rxrpc/txbuf.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/net/rxrpc/txbuf.c b/net/rxrpc/txbuf.c
index b2a82ab756c2..e0679658d9de 100644
--- a/net/rxrpc/txbuf.c
+++ b/net/rxrpc/txbuf.c
@@ -33,8 +33,8 @@ struct rxrpc_txbuf *rxrpc_alloc_data_txbuf(struct rxrpc_call *call, size_t data_
total = hoff + sizeof(*whdr) + data_size;
mutex_lock(&call->conn->tx_data_alloc_lock);
- buf = page_frag_alloc_align(&call->conn->tx_data_alloc, total, gfp,
- ~(data_align - 1) & ~(L1_CACHE_BYTES - 1));
+ buf = __page_frag_alloc_align(&call->conn->tx_data_alloc, total, gfp,
+ ~(data_align - 1) & ~(L1_CACHE_BYTES - 1));
mutex_unlock(&call->conn->tx_data_alloc_lock);
if (!buf) {
kfree(txb);