diff options
Diffstat (limited to 'fs/fat/fatent.c')
-rw-r--r-- | fs/fat/fatent.c | 102 |
1 files changed, 102 insertions, 0 deletions
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c index bac10de678cc..25d43a5e8a4d 100644 --- a/fs/fat/fatent.c +++ b/fs/fat/fatent.c @@ -4,6 +4,7 @@ */ #include <linux/blkdev.h> +#include <linux/sched/signal.h> #include "fat.h" struct fatent_operations { @@ -690,3 +691,104 @@ out: unlock_fat(sbi); return err; } + +static int fat_trim_clusters(struct super_block *sb, u32 clus, u32 nr_clus) +{ + struct msdos_sb_info *sbi = MSDOS_SB(sb); + return sb_issue_discard(sb, fat_clus_to_blknr(sbi, clus), + nr_clus * sbi->sec_per_clus, GFP_NOFS, 0); +} + +int fat_trim_fs(struct inode *inode, struct fstrim_range *range) +{ + struct super_block *sb = inode->i_sb; + struct msdos_sb_info *sbi = MSDOS_SB(sb); + const struct fatent_operations *ops = sbi->fatent_ops; + struct fat_entry fatent; + u64 ent_start, ent_end, minlen, trimmed = 0; + u32 free = 0; + unsigned long reada_blocks, reada_mask, cur_block = 0; + int err = 0; + + /* + * FAT data is organized as clusters, trim at the granulary of cluster. + * + * fstrim_range is in byte, convert vaules to cluster index. + * Treat sectors before data region as all used, not to trim them. + */ + ent_start = max_t(u64, range->start>>sbi->cluster_bits, FAT_START_ENT); + ent_end = ent_start + (range->len >> sbi->cluster_bits) - 1; + minlen = range->minlen >> sbi->cluster_bits; + + if (ent_start >= sbi->max_cluster || range->len < sbi->cluster_size) + return -EINVAL; + if (ent_end >= sbi->max_cluster) + ent_end = sbi->max_cluster - 1; + + reada_blocks = FAT_READA_SIZE >> sb->s_blocksize_bits; + reada_mask = reada_blocks - 1; + + fatent_init(&fatent); + lock_fat(sbi); + fatent_set_entry(&fatent, ent_start); + while (fatent.entry <= ent_end) { + /* readahead of fat blocks */ + if ((cur_block & reada_mask) == 0) { + unsigned long rest = sbi->fat_length - cur_block; + fat_ent_reada(sb, &fatent, min(reada_blocks, rest)); + } + cur_block++; + + err = fat_ent_read_block(sb, &fatent); + if (err) + goto error; + do { + if (ops->ent_get(&fatent) == FAT_ENT_FREE) { + free++; + } else if (free) { + if (free >= minlen) { + u32 clus = fatent.entry - free; + + err = fat_trim_clusters(sb, clus, free); + if (err && err != -EOPNOTSUPP) + goto error; + if (!err) + trimmed += free; + err = 0; + } + free = 0; + } + } while (fat_ent_next(sbi, &fatent) && fatent.entry <= ent_end); + + if (fatal_signal_pending(current)) { + err = -ERESTARTSYS; + goto error; + } + + if (need_resched()) { + fatent_brelse(&fatent); + unlock_fat(sbi); + cond_resched(); + lock_fat(sbi); + } + } + /* handle scenario when tail entries are all free */ + if (free && free >= minlen) { + u32 clus = fatent.entry - free; + + err = fat_trim_clusters(sb, clus, free); + if (err && err != -EOPNOTSUPP) + goto error; + if (!err) + trimmed += free; + err = 0; + } + +error: + fatent_brelse(&fatent); + unlock_fat(sbi); + + range->len = trimmed << sbi->cluster_bits; + + return err; +} |