summaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_fsops.c
diff options
context:
space:
mode:
authorCarlos Maiolino <cem@kernel.org>2025-03-04 13:25:46 +0300
committerCarlos Maiolino <cem@kernel.org>2025-03-04 13:25:46 +0300
commit4c6283ec9284bb72906dba83bc7a809747e6331e (patch)
tree6a2ed104fc86a90bb787ff0dbee020461e59ec14 /fs/xfs/xfs_fsops.c
parent0a1fd78080c8c9a5582e82100bd91b87ae5ac57c (diff)
parent9c477912b2f58da71751f244aceecf5f8cc549ed (diff)
downloadlinux-4c6283ec9284bb72906dba83bc7a809747e6331e.tar.xz
Merge tag 'xfs-zoned-allocator-2025-03-03' of git://git.infradead.org/users/hch/xfs into xfs-6.15-zoned_devices
xfs: add support for zoned devices Add support for the new zoned space allocator and thus for zoned devices: https://zonedstorage.io/docs/introduction/zoned-storage to XFS. This has been developed for and tested on both SMR hard drives, which are the oldest and most common class of zoned devices: https://zonedstorage.io/docs/introduction/smr and ZNS SSDs: https://zonedstorage.io/docs/introduction/zns It has not been tested with zoned UFS devices, as their current capacity points and performance characteristics aren't too interesting for XFS use cases (but never say never). Sequential write only zones are only supported for data using a new allocator for the RT device, which maps each zone to a rtgroup which is written sequentially. All metadata and (for now) the log require using randomly writable space. This means a realtime device is required to support zoned storage, but for the common case of SMR hard drives that contain random writable zones and sequential write required zones on the same block device, the concept of an internal RT device is added which means using XFS on a SMR HDD is as simple as: $ mkfs.xfs /dev/sda $ mount /dev/sda /mnt When using NVMe ZNS SSDs that do not support conventional zones, the traditional multi-device RT configuration is required. E.g. for an SSD with a conventional namespace 1 and a zoned namespace 2: $ mkfs.xfs /dev/nvme0n1 -o rtdev=/dev/nvme0n2 $ mount -o rtdev=/dev/nvme0n2 /dev/nvme0n1 /mnt The zoned allocator can also be used on conventional block devices, or on conventional zones (e.g. when using an SMR HDD as the external RT device). For example using zoned XFS on normal SSDs shows very nice performance advantages and write amplification reduction for intelligent workloads like RocksDB. Some work is still in progress or planned, but should not affect the integration with the rest of XFS or the on-disk format: - support for quotas - support for reflinks Note that the I/O path already supports reflink, but garbage collection isn't refcount aware yet and would unshare shared blocks, thus rendering the feature useless.
Diffstat (limited to 'fs/xfs/xfs_fsops.c')
-rw-r--r--fs/xfs/xfs_fsops.c50
1 files changed, 27 insertions, 23 deletions
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 455298503d01..ee2cefbd5df8 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -24,6 +24,7 @@
#include "xfs_rtalloc.h"
#include "xfs_rtrmap_btree.h"
#include "xfs_rtrefcount_btree.h"
+#include "xfs_metafile.h"
/*
* Write new AG headers to disk. Non-transactional, but need to be
@@ -307,6 +308,10 @@ xfs_growfs_data(
if (!mutex_trylock(&mp->m_growlock))
return -EWOULDBLOCK;
+ /* we can't grow the data section when an internal RT section exists */
+ if (in->newblocks != mp->m_sb.sb_dblocks && mp->m_sb.sb_rtstart)
+ return -EINVAL;
+
/* update imaxpct separately to the physical grow of the filesystem */
if (in->imaxpct != mp->m_sb.sb_imax_pct) {
error = xfs_growfs_imaxpct(mp, in->imaxpct);
@@ -366,6 +371,7 @@ xfs_growfs_log(
int
xfs_reserve_blocks(
struct xfs_mount *mp,
+ enum xfs_free_counter ctr,
uint64_t request)
{
int64_t lcounter, delta;
@@ -373,6 +379,8 @@ xfs_reserve_blocks(
int64_t free;
int error = 0;
+ ASSERT(ctr < XC_FREE_NR);
+
/*
* With per-cpu counters, this becomes an interesting problem. we need
* to work out if we are freeing or allocation blocks first, then we can
@@ -391,16 +399,16 @@ xfs_reserve_blocks(
* counters directly since we shouldn't have any problems unreserving
* space.
*/
- if (mp->m_resblks > request) {
- lcounter = mp->m_resblks_avail - request;
+ if (mp->m_free[ctr].res_total > request) {
+ lcounter = mp->m_free[ctr].res_avail - request;
if (lcounter > 0) { /* release unused blocks */
fdblks_delta = lcounter;
- mp->m_resblks_avail -= lcounter;
+ mp->m_free[ctr].res_avail -= lcounter;
}
- mp->m_resblks = request;
+ mp->m_free[ctr].res_total = request;
if (fdblks_delta) {
spin_unlock(&mp->m_sb_lock);
- xfs_add_fdblocks(mp, fdblks_delta);
+ xfs_add_freecounter(mp, ctr, fdblks_delta);
spin_lock(&mp->m_sb_lock);
}
@@ -409,7 +417,7 @@ xfs_reserve_blocks(
/*
* If the request is larger than the current reservation, reserve the
- * blocks before we update the reserve counters. Sample m_fdblocks and
+ * blocks before we update the reserve counters. Sample m_free and
* perform a partial reservation if the request exceeds free space.
*
* The code below estimates how many blocks it can request from
@@ -419,10 +427,10 @@ xfs_reserve_blocks(
* space to fill it because mod_fdblocks will refill an undersized
* reserve when it can.
*/
- free = percpu_counter_sum(&mp->m_fdblocks) -
- xfs_fdblocks_unavailable(mp);
- delta = request - mp->m_resblks;
- mp->m_resblks = request;
+ free = xfs_sum_freecounter_raw(mp, ctr) -
+ xfs_freecounter_unavailable(mp, ctr);
+ delta = request - mp->m_free[ctr].res_total;
+ mp->m_free[ctr].res_total = request;
if (delta > 0 && free > 0) {
/*
* We'll either succeed in getting space from the free block
@@ -436,9 +444,9 @@ xfs_reserve_blocks(
*/
fdblks_delta = min(free, delta);
spin_unlock(&mp->m_sb_lock);
- error = xfs_dec_fdblocks(mp, fdblks_delta, 0);
+ error = xfs_dec_freecounter(mp, ctr, fdblks_delta, 0);
if (!error)
- xfs_add_fdblocks(mp, fdblks_delta);
+ xfs_add_freecounter(mp, ctr, fdblks_delta);
spin_lock(&mp->m_sb_lock);
}
out:
@@ -558,15 +566,13 @@ xfs_fs_reserve_ag_blocks(
return error;
}
- if (xfs_has_realtime(mp)) {
- err2 = xfs_rt_resv_init(mp);
- if (err2 && err2 != -ENOSPC) {
- xfs_warn(mp,
- "Error %d reserving realtime metadata reserve pool.", err2);
- xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
- }
+ err2 = xfs_metafile_resv_init(mp);
+ if (err2 && err2 != -ENOSPC) {
+ xfs_warn(mp,
+ "Error %d reserving realtime metadata reserve pool.", err2);
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
- if (err2 && !error)
+ if (!error)
error = err2;
}
@@ -582,9 +588,7 @@ xfs_fs_unreserve_ag_blocks(
{
struct xfs_perag *pag = NULL;
- if (xfs_has_realtime(mp))
- xfs_rt_resv_free(mp);
-
+ xfs_metafile_resv_free(mp);
while ((pag = xfs_perag_next(mp, pag)))
xfs_ag_resv_free(pag);
}