summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorBoris Brezillon <boris.brezillon@bootlin.com>2018-10-19 10:20:09 +0300
committerBoris Brezillon <boris.brezillon@bootlin.com>2018-10-19 10:20:09 +0300
commit042c1a5a6021f73c10dc84958c287eb2a2a26f7c (patch)
tree8ff3f0812c1a88392233d8633e541088ecd78b9f /fs
parent5cc1b66e63ecb0661c00b26a733406eef7386da7 (diff)
parent53c83b59759c1ee213f5ffa194909daee8902a28 (diff)
downloadlinux-042c1a5a6021f73c10dc84958c287eb2a2a26f7c.tar.xz
Merge tag 'nand/for-4.20' of git://git.infradead.org/linux-mtd into mtd/next
NAND core changes: - Two batchs of cleanups of the NAND API, including: * Deprecating a lot of interfaces (now replaced by ->exec_op()). * Moving code in separate drivers (JEDEC, ONFI), in private files (internals), in platform drivers, etc. * Functions/structures reordering. * Exclusive use of the nand_chip structure instead of the MTD one all across the subsystem. - Addition of the nand_wait_readrdy/rdy_op() helpers. Raw NAND controllers drivers changes: - Various coccinelle patches. - Marvell: * Use regmap_update_bits() for syscon access. * More documentation. * BCH failure path rework. * More layouts to be supported. * IRQ handler complete() condition fixed. - Fsl_ifc: * SRAM initialization fixed for newer controller versions. - Denali: * Fix licenses mismatch and use a SPDX tag. * Set SPARE_AREA_SKIP_BYTES register to 8 if unset. - Qualcomm: * Do not include dma-direct.h. - Docg4: * Removed. - Ams-delta: * Use of a GPIO lookup table * Internal machinery changes. Raw NAND chip drivers changes: - Toshiba: * Add support for Toshiba memory BENAND * Pass a single nand_chip object to the status helper. - ESMT: * New driver to retrieve the ECC requirements from the 5th ID byte.
Diffstat (limited to 'fs')
-rw-r--r--fs/buffer.c1
-rw-r--r--fs/isofs/inode.c7
-rw-r--r--fs/notify/mark.c6
-rw-r--r--fs/quota/quota.c14
-rw-r--r--fs/udf/super.c93
5 files changed, 37 insertions, 84 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 4cc679d5bf58..6f1ae3ac9789 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -39,7 +39,6 @@
#include <linux/buffer_head.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/bio.h>
-#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/bitops.h>
#include <linux/mpage.h>
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index ec3fba7d492f..488a9e7f8f66 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -24,6 +24,7 @@
#include <linux/mpage.h>
#include <linux/user_namespace.h>
#include <linux/seq_file.h>
+#include <linux/blkdev.h>
#include "isofs.h"
#include "zisofs.h"
@@ -653,6 +654,12 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
/*
* What if bugger tells us to go beyond page size?
*/
+ if (bdev_logical_block_size(s->s_bdev) > 2048) {
+ printk(KERN_WARNING
+ "ISOFS: unsupported/invalid hardware sector size %d\n",
+ bdev_logical_block_size(s->s_bdev));
+ goto out_freesbi;
+ }
opt.blocksize = sb_min_blocksize(s, opt.blocksize);
sbi->s_high_sierra = 0; /* default is iso9660 */
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 05506d60131c..59cdb27826de 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -132,13 +132,13 @@ static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
struct fsnotify_mark *mark;
assert_spin_locked(&conn->lock);
+ /* We can get detached connector here when inode is getting unlinked. */
+ if (!fsnotify_valid_obj_type(conn->type))
+ return;
hlist_for_each_entry(mark, &conn->list, obj_list) {
if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)
new_mask |= mark->mask;
}
- if (WARN_ON(!fsnotify_valid_obj_type(conn->type)))
- return;
-
*fsnotify_conn_mask_p(conn) = new_mask;
}
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index 860bfbe7a07a..f0cbf58ad4da 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -18,6 +18,7 @@
#include <linux/quotaops.h>
#include <linux/types.h>
#include <linux/writeback.h>
+#include <linux/nospec.h>
static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
qid_t id)
@@ -120,8 +121,6 @@ static int quota_getinfo(struct super_block *sb, int type, void __user *addr)
struct if_dqinfo uinfo;
int ret;
- /* This checks whether qc_state has enough entries... */
- BUILD_BUG_ON(MAXQUOTAS > XQM_MAXQUOTAS);
if (!sb->s_qcop->get_state)
return -ENOSYS;
ret = sb->s_qcop->get_state(sb, &state);
@@ -354,10 +353,10 @@ static int quota_getstate(struct super_block *sb, struct fs_quota_stat *fqs)
* GETXSTATE quotactl has space for just one set of time limits so
* report them for the first enabled quota type
*/
- for (type = 0; type < XQM_MAXQUOTAS; type++)
+ for (type = 0; type < MAXQUOTAS; type++)
if (state.s_state[type].flags & QCI_ACCT_ENABLED)
break;
- BUG_ON(type == XQM_MAXQUOTAS);
+ BUG_ON(type == MAXQUOTAS);
fqs->qs_btimelimit = state.s_state[type].spc_timelimit;
fqs->qs_itimelimit = state.s_state[type].ino_timelimit;
fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
@@ -427,10 +426,10 @@ static int quota_getstatev(struct super_block *sb, struct fs_quota_statv *fqs)
* GETXSTATV quotactl has space for just one set of time limits so
* report them for the first enabled quota type
*/
- for (type = 0; type < XQM_MAXQUOTAS; type++)
+ for (type = 0; type < MAXQUOTAS; type++)
if (state.s_state[type].flags & QCI_ACCT_ENABLED)
break;
- BUG_ON(type == XQM_MAXQUOTAS);
+ BUG_ON(type == MAXQUOTAS);
fqs->qs_btimelimit = state.s_state[type].spc_timelimit;
fqs->qs_itimelimit = state.s_state[type].ino_timelimit;
fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
@@ -701,8 +700,9 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
{
int ret;
- if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS))
+ if (type >= MAXQUOTAS)
return -EINVAL;
+ type = array_index_nospec(type, MAXQUOTAS);
/*
* Quota not supported on this fs? Check this before s_quota_types
* since they needn't be set if quota is not supported at all.
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 3040dc2a32f6..6f515651a2c2 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -764,9 +764,7 @@ static int udf_find_fileset(struct super_block *sb,
struct kernel_lb_addr *root)
{
struct buffer_head *bh = NULL;
- long lastblock;
uint16_t ident;
- struct udf_sb_info *sbi;
if (fileset->logicalBlockNum != 0xFFFFFFFF ||
fileset->partitionReferenceNum != 0xFFFF) {
@@ -779,69 +777,11 @@ static int udf_find_fileset(struct super_block *sb,
return 1;
}
- }
-
- sbi = UDF_SB(sb);
- if (!bh) {
- /* Search backwards through the partitions */
- struct kernel_lb_addr newfileset;
-
-/* --> cvg: FIXME - is it reasonable? */
- return 1;
-
- for (newfileset.partitionReferenceNum = sbi->s_partitions - 1;
- (newfileset.partitionReferenceNum != 0xFFFF &&
- fileset->logicalBlockNum == 0xFFFFFFFF &&
- fileset->partitionReferenceNum == 0xFFFF);
- newfileset.partitionReferenceNum--) {
- lastblock = sbi->s_partmaps
- [newfileset.partitionReferenceNum]
- .s_partition_len;
- newfileset.logicalBlockNum = 0;
-
- do {
- bh = udf_read_ptagged(sb, &newfileset, 0,
- &ident);
- if (!bh) {
- newfileset.logicalBlockNum++;
- continue;
- }
-
- switch (ident) {
- case TAG_IDENT_SBD:
- {
- struct spaceBitmapDesc *sp;
- sp = (struct spaceBitmapDesc *)
- bh->b_data;
- newfileset.logicalBlockNum += 1 +
- ((le32_to_cpu(sp->numOfBytes) +
- sizeof(struct spaceBitmapDesc)
- - 1) >> sb->s_blocksize_bits);
- brelse(bh);
- break;
- }
- case TAG_IDENT_FSD:
- *fileset = newfileset;
- break;
- default:
- newfileset.logicalBlockNum++;
- brelse(bh);
- bh = NULL;
- break;
- }
- } while (newfileset.logicalBlockNum < lastblock &&
- fileset->logicalBlockNum == 0xFFFFFFFF &&
- fileset->partitionReferenceNum == 0xFFFF);
- }
- }
-
- if ((fileset->logicalBlockNum != 0xFFFFFFFF ||
- fileset->partitionReferenceNum != 0xFFFF) && bh) {
udf_debug("Fileset at block=%u, partition=%u\n",
fileset->logicalBlockNum,
fileset->partitionReferenceNum);
- sbi->s_partition = fileset->partitionReferenceNum;
+ UDF_SB(sb)->s_partition = fileset->partitionReferenceNum;
udf_load_fileset(sb, bh, root);
brelse(bh);
return 0;
@@ -1570,10 +1510,16 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_
*/
#define PART_DESC_ALLOC_STEP 32
+struct part_desc_seq_scan_data {
+ struct udf_vds_record rec;
+ u32 partnum;
+};
+
struct desc_seq_scan_data {
struct udf_vds_record vds[VDS_POS_LENGTH];
unsigned int size_part_descs;
- struct udf_vds_record *part_descs_loc;
+ unsigned int num_part_descs;
+ struct part_desc_seq_scan_data *part_descs_loc;
};
static struct udf_vds_record *handle_partition_descriptor(
@@ -1582,10 +1528,14 @@ static struct udf_vds_record *handle_partition_descriptor(
{
struct partitionDesc *desc = (struct partitionDesc *)bh->b_data;
int partnum;
+ int i;
partnum = le16_to_cpu(desc->partitionNumber);
- if (partnum >= data->size_part_descs) {
- struct udf_vds_record *new_loc;
+ for (i = 0; i < data->num_part_descs; i++)
+ if (partnum == data->part_descs_loc[i].partnum)
+ return &(data->part_descs_loc[i].rec);
+ if (data->num_part_descs >= data->size_part_descs) {
+ struct part_desc_seq_scan_data *new_loc;
unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP);
new_loc = kcalloc(new_size, sizeof(*new_loc), GFP_KERNEL);
@@ -1597,7 +1547,7 @@ static struct udf_vds_record *handle_partition_descriptor(
data->part_descs_loc = new_loc;
data->size_part_descs = new_size;
}
- return &(data->part_descs_loc[partnum]);
+ return &(data->part_descs_loc[data->num_part_descs++].rec);
}
@@ -1647,6 +1597,7 @@ static noinline int udf_process_sequence(
memset(data.vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
data.size_part_descs = PART_DESC_ALLOC_STEP;
+ data.num_part_descs = 0;
data.part_descs_loc = kcalloc(data.size_part_descs,
sizeof(*data.part_descs_loc),
GFP_KERNEL);
@@ -1658,7 +1609,6 @@ static noinline int udf_process_sequence(
* are in it.
*/
for (; (!done && block <= lastblock); block++) {
-
bh = udf_read_tagged(sb, block, block, &ident);
if (!bh)
break;
@@ -1730,13 +1680,10 @@ static noinline int udf_process_sequence(
}
/* Now handle prevailing Partition Descriptors */
- for (i = 0; i < data.size_part_descs; i++) {
- if (data.part_descs_loc[i].block) {
- ret = udf_load_partdesc(sb,
- data.part_descs_loc[i].block);
- if (ret < 0)
- return ret;
- }
+ for (i = 0; i < data.num_part_descs; i++) {
+ ret = udf_load_partdesc(sb, data.part_descs_loc[i].rec.block);
+ if (ret < 0)
+ return ret;
}
return 0;