summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/amiflop.c1
-rw-r--r--drivers/block/ataflop.c1
-rw-r--r--drivers/block/drbd/drbd_int.h5
-rw-r--r--drivers/block/floppy.c1
-rw-r--r--drivers/block/loop.c35
-rw-r--r--drivers/block/paride/pcd.c1
-rw-r--r--drivers/block/paride/pd.c1
-rw-r--r--drivers/block/paride/pf.c1
-rw-r--r--drivers/block/pktcdvd.c1
-rw-r--r--drivers/block/ps3disk.c4
-rw-r--r--drivers/block/swim.c1
-rw-r--r--drivers/block/swim3.c1
-rw-r--r--drivers/block/virtio_blk.c3
-rw-r--r--drivers/block/xsysace.c1
-rw-r--r--drivers/cdrom/gdrom.c1
-rw-r--r--drivers/ide/ide-cd.c1
-rw-r--r--drivers/ide/ide-cd_ioctl.c5
-rw-r--r--drivers/ide/ide-gd.c6
-rw-r--r--drivers/md/dm-exception-store.h28
-rw-r--r--drivers/md/dm-integrity.c8
-rw-r--r--drivers/md/md-bitmap.c8
-rw-r--r--drivers/md/md.c195
-rw-r--r--drivers/md/md.h25
-rw-r--r--drivers/md/raid5.c25
-rw-r--r--drivers/nvdimm/pfn_devs.c4
-rw-r--r--drivers/nvme/host/core.c6
-rw-r--r--drivers/nvme/host/pci.c263
-rw-r--r--drivers/nvme/target/core.c9
-rw-r--r--drivers/nvme/target/fc.c7
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c6
-rw-r--r--drivers/nvme/target/tcp.c24
-rw-r--r--drivers/scsi/sd.c33
-rw-r--r--drivers/scsi/sr.c1
-rw-r--r--drivers/xen/biomerge.c5
34 files changed, 333 insertions, 384 deletions
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 0903e0803ec8..92b930cb3b72 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1829,6 +1829,7 @@ static int __init fd_probe_drives(void)
disk->major = FLOPPY_MAJOR;
disk->first_minor = drive;
disk->fops = &floppy_fops;
+ disk->events = DISK_EVENT_MEDIA_CHANGE;
sprintf(disk->disk_name, "fd%d", drive);
disk->private_data = &unit[drive];
set_capacity(disk, 880*2);
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index b0dbbdfeb33e..c7b5c4671f05 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -2028,6 +2028,7 @@ static int __init atari_floppy_init (void)
unit[i].disk->first_minor = i;
sprintf(unit[i].disk->disk_name, "fd%d", i);
unit[i].disk->fops = &floppy_fops;
+ unit[i].disk->events = DISK_EVENT_MEDIA_CHANGE;
unit[i].disk->private_data = &unit[i];
set_capacity(unit[i].disk, MAX_DISK_SIZE * 2);
add_disk(unit[i].disk);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 000a2f4c0e92..acd7af3630e9 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1317,10 +1317,6 @@ struct bm_extent {
#define DRBD_MAX_SECTORS_FIXED_BM \
((MD_128MB_SECT - MD_32kB_SECT - MD_4kB_SECT) * (1LL<<(BM_EXT_SHIFT-9)))
-#if !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32
-#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_32
-#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32
-#else
#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_FIXED_BM
/* 16 TB in units of sectors */
#if BITS_PER_LONG == 32
@@ -1333,7 +1329,6 @@ struct bm_extent {
#define DRBD_MAX_SECTORS_FLEX (1UL << 51)
/* corresponds to (1UL << 38) bits right now. */
#endif
-#endif
/* Estimate max bio size as 256 * PAGE_SIZE,
* so for typical PAGE_SIZE of 4k, that is (1<<20) Byte.
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 95f608d1a098..8072bd9881e6 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4540,6 +4540,7 @@ static int __init do_floppy_init(void)
disks[drive]->major = FLOPPY_MAJOR;
disks[drive]->first_minor = TOMINOR(drive);
disks[drive]->fops = &floppy_fops;
+ disks[drive]->events = DISK_EVENT_MEDIA_CHANGE;
sprintf(disks[drive]->disk_name, "fd%d", drive);
timer_setup(&motor_off_timer[drive], motor_off_callback, 0);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index bf1c61cab8eb..102d79575895 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -264,12 +264,20 @@ lo_do_transfer(struct loop_device *lo, int cmd,
return ret;
}
+static inline void loop_iov_iter_bvec(struct iov_iter *i,
+ unsigned int direction, const struct bio_vec *bvec,
+ unsigned long nr_segs, size_t count)
+{
+ iov_iter_bvec(i, direction, bvec, nr_segs, count);
+ i->type |= ITER_BVEC_FLAG_NO_REF;
+}
+
static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
{
struct iov_iter i;
ssize_t bw;
- iov_iter_bvec(&i, WRITE, bvec, 1, bvec->bv_len);
+ loop_iov_iter_bvec(&i, WRITE, bvec, 1, bvec->bv_len);
file_start_write(file);
bw = vfs_iter_write(file, &i, ppos, 0);
@@ -347,7 +355,7 @@ static int lo_read_simple(struct loop_device *lo, struct request *rq,
ssize_t len;
rq_for_each_segment(bvec, rq, iter) {
- iov_iter_bvec(&i, READ, &bvec, 1, bvec.bv_len);
+ loop_iov_iter_bvec(&i, READ, &bvec, 1, bvec.bv_len);
len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
if (len < 0)
return len;
@@ -388,7 +396,7 @@ static int lo_read_transfer(struct loop_device *lo, struct request *rq,
b.bv_offset = 0;
b.bv_len = bvec.bv_len;
- iov_iter_bvec(&i, READ, &b, 1, b.bv_len);
+ loop_iov_iter_bvec(&i, READ, &b, 1, b.bv_len);
len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
if (len < 0) {
ret = len;
@@ -555,7 +563,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
}
atomic_set(&cmd->ref, 2);
- iov_iter_bvec(&iter, rw, bvec, nr_bvec, blk_rq_bytes(rq));
+ loop_iov_iter_bvec(&iter, rw, bvec, nr_bvec, blk_rq_bytes(rq));
iter.iov_offset = offset;
cmd->iocb.ki_pos = pos;
@@ -900,6 +908,24 @@ static int loop_prepare_queue(struct loop_device *lo)
return 0;
}
+static void loop_update_rotational(struct loop_device *lo)
+{
+ struct file *file = lo->lo_backing_file;
+ struct inode *file_inode = file->f_mapping->host;
+ struct block_device *file_bdev = file_inode->i_sb->s_bdev;
+ struct request_queue *q = lo->lo_queue;
+ bool nonrot = true;
+
+ /* not all filesystems (e.g. tmpfs) have a sb->s_bdev */
+ if (file_bdev)
+ nonrot = blk_queue_nonrot(bdev_get_queue(file_bdev));
+
+ if (nonrot)
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
+ else
+ blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
+}
+
static int loop_set_fd(struct loop_device *lo, fmode_t mode,
struct block_device *bdev, unsigned int arg)
{
@@ -963,6 +989,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
blk_queue_write_cache(lo->lo_queue, true, false);
+ loop_update_rotational(lo);
loop_update_dio(lo);
set_capacity(lo->lo_disk, size);
bd_set_size(bdev, size << 9);
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 6d415b20fb70..001dbdcbf355 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -343,6 +343,7 @@ static void pcd_init_units(void)
strcpy(disk->disk_name, cd->name); /* umm... */
disk->fops = &pcd_bdops;
disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
+ disk->events = DISK_EVENT_MEDIA_CHANGE;
}
}
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 0ff9b12d0e35..6f9ad3fc716f 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -897,6 +897,7 @@ static void pd_probe_drive(struct pd_unit *disk)
p->fops = &pd_fops;
p->major = major;
p->first_minor = (disk - pd) << PD_BITS;
+ p->events = DISK_EVENT_MEDIA_CHANGE;
disk->gd = p;
p->private_data = disk;
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 35e6e271b219..1e9c50a7256c 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -319,6 +319,7 @@ static void __init pf_init_units(void)
disk->first_minor = unit;
strcpy(disk->disk_name, pf->name);
disk->fops = &pf_fops;
+ disk->events = DISK_EVENT_MEDIA_CHANGE;
if (!(*drives[unit])[D_PRT])
pf_drive_count++;
}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index f5a71023f76c..024060165afa 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2761,7 +2761,6 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
/* inherit events of the host device */
disk->events = pd->bdev->bd_disk->events;
- disk->async_events = pd->bdev->bd_disk->async_events;
add_disk(disk);
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 4e1d9b31f60c..cc61c5ce3ad5 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -102,7 +102,7 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
rq_for_each_segment(bvec, req, iter) {
unsigned long flags;
- dev_dbg(&dev->sbd.core, "%s:%u: bio %u: %u sectors from %lu\n",
+ dev_dbg(&dev->sbd.core, "%s:%u: bio %u: %u sectors from %llu\n",
__func__, __LINE__, i, bio_sectors(iter.bio),
iter.bio->bi_iter.bi_sector);
@@ -496,7 +496,7 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
dev->regions[dev->region_idx].size*priv->blocking_factor);
dev_info(&dev->sbd.core,
- "%s is a %s (%llu MiB total, %lu MiB for OtherOS)\n",
+ "%s is a %s (%llu MiB total, %llu MiB for OtherOS)\n",
gendisk->disk_name, priv->model, priv->raw_capacity >> 11,
get_capacity(gendisk) >> 11);
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 3fa6fcc34790..67b5ec281c6d 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -862,6 +862,7 @@ static int swim_floppy_init(struct swim_priv *swd)
swd->unit[drive].disk->first_minor = drive;
sprintf(swd->unit[drive].disk->disk_name, "fd%d", drive);
swd->unit[drive].disk->fops = &floppy_fops;
+ swd->unit[drive].disk->events = DISK_EVENT_MEDIA_CHANGE;
swd->unit[drive].disk->private_data = &swd->unit[drive];
set_capacity(swd->unit[drive].disk, 2880);
add_disk(swd->unit[drive].disk);
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 1e2ae90d7715..cf42729c788e 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1216,6 +1216,7 @@ static int swim3_attach(struct macio_dev *mdev,
disk->first_minor = floppy_count;
disk->fops = &floppy_fops;
disk->private_data = fs;
+ disk->events = DISK_EVENT_MEDIA_CHANGE;
disk->flags |= GENHD_FL_REMOVABLE;
sprintf(disk->disk_name, "fd%d", floppy_count);
set_capacity(disk, 2880);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 2a7ca4a1e6f7..f1d90cd3dc47 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -693,7 +693,8 @@ static int virtblk_map_queues(struct blk_mq_tag_set *set)
{
struct virtio_blk *vblk = set->driver_data;
- return blk_mq_virtio_map_queues(&set->map[0], vblk->vdev, 0);
+ return blk_mq_virtio_map_queues(&set->map[HCTX_TYPE_DEFAULT],
+ vblk->vdev, 0);
}
#ifdef CONFIG_VIRTIO_BLK_SCSI
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 32a21b8d1d85..464c9092bc8b 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -1032,6 +1032,7 @@ static int ace_setup(struct ace_device *ace)
ace->gd->major = ace_major;
ace->gd->first_minor = ace->id * ACE_NUM_MINORS;
ace->gd->fops = &ace_fops;
+ ace->gd->events = DISK_EVENT_MEDIA_CHANGE;
ace->gd->queue = ace->queue;
ace->gd->private_data = ace;
snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a');
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index f8b7345fe1cb..5cf3bade0d57 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -786,6 +786,7 @@ static int probe_gdrom(struct platform_device *devptr)
goto probe_fail_cdrom_register;
}
gd.disk->fops = &gdrom_bdops;
+ gd.disk->events = DISK_EVENT_MEDIA_CHANGE;
/* latch on to the interrupt */
err = gdrom_set_interrupt_handlers();
if (err)
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 1f03884a6808..3b15adc6ce98 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1797,6 +1797,7 @@ static int ide_cd_probe(ide_drive_t *drive)
ide_cd_read_toc(drive);
g->fops = &idecd_ops;
g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
+ g->events = DISK_EVENT_MEDIA_CHANGE;
device_add_disk(&drive->gendev, g, NULL);
return 0;
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index 4a6e1a413ead..46f2df288c6a 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -82,8 +82,9 @@ int ide_cdrom_drive_status(struct cdrom_device_info *cdi, int slot_nr)
/*
* ide-cd always generates media changed event if media is missing, which
- * makes it impossible to use for proper event reporting, so disk->events
- * is cleared to 0 and the following function is used only to trigger
+ * makes it impossible to use for proper event reporting, so
+ * DISK_EVENT_FLAG_UEVENT is cleared in disk->event_flags
+ * and the following function is used only to trigger
* revalidation and never propagated to userland.
*/
unsigned int ide_cdrom_check_events_real(struct cdrom_device_info *cdi,
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index 04e008e8f6f9..f233b34ea0c0 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -299,8 +299,9 @@ static unsigned int ide_gd_check_events(struct gendisk *disk,
/*
* The following is used to force revalidation on the first open on
* removeable devices, and never gets reported to userland as
- * genhd->events is 0. This is intended as removeable ide disk
- * can't really detect MEDIA_CHANGE events.
+ * DISK_EVENT_FLAG_UEVENT isn't set in genhd->event_flags.
+ * This is intended as removable ide disk can't really detect
+ * MEDIA_CHANGE events.
*/
ret = drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED;
drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED;
@@ -416,6 +417,7 @@ static int ide_gd_probe(ide_drive_t *drive)
if (drive->dev_flags & IDE_DFLAG_REMOVABLE)
g->flags = GENHD_FL_REMOVABLE;
g->fops = &ide_gd_ops;
+ g->events = DISK_EVENT_MEDIA_CHANGE;
device_add_disk(&drive->gendev, g, NULL);
return 0;
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
index 12b5216c2cfe..721efc493942 100644
--- a/drivers/md/dm-exception-store.h
+++ b/drivers/md/dm-exception-store.h
@@ -135,9 +135,8 @@ struct dm_dev *dm_snap_cow(struct dm_snapshot *snap);
/*
* Funtions to manipulate consecutive chunks
*/
-# if defined(CONFIG_LBDAF) || (BITS_PER_LONG == 64)
-# define DM_CHUNK_CONSECUTIVE_BITS 8
-# define DM_CHUNK_NUMBER_BITS 56
+#define DM_CHUNK_CONSECUTIVE_BITS 8
+#define DM_CHUNK_NUMBER_BITS 56
static inline chunk_t dm_chunk_number(chunk_t chunk)
{
@@ -163,29 +162,6 @@ static inline void dm_consecutive_chunk_count_dec(struct dm_exception *e)
e->new_chunk -= (1ULL << DM_CHUNK_NUMBER_BITS);
}
-# else
-# define DM_CHUNK_CONSECUTIVE_BITS 0
-
-static inline chunk_t dm_chunk_number(chunk_t chunk)
-{
- return chunk;
-}
-
-static inline unsigned dm_consecutive_chunk_count(struct dm_exception *e)
-{
- return 0;
-}
-
-static inline void dm_consecutive_chunk_count_inc(struct dm_exception *e)
-{
-}
-
-static inline void dm_consecutive_chunk_count_dec(struct dm_exception *e)
-{
-}
-
-# endif
-
/*
* Return the number of sectors in the device.
*/
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 7c678f50aaa3..05075613af93 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -88,14 +88,10 @@ struct journal_entry {
#if BITS_PER_LONG == 64
#define journal_entry_set_sector(je, x) do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
-#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
-#elif defined(CONFIG_LBDAF)
-#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
-#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
#else
-#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32(0)); } while (0)
-#define journal_entry_get_sector(je) le32_to_cpu((je)->u.s.sector_lo)
+#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
#endif
+#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
#define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1))
#define journal_entry_set_unused(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0)
#define journal_entry_is_inprogress(je) ((je)->u.s.sector_hi == cpu_to_le32(-2))
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 1cd4f991792c..3a62a46b75c7 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -490,10 +490,10 @@ void md_bitmap_print_sb(struct bitmap *bitmap)
pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic));
pr_debug(" version: %d\n", le32_to_cpu(sb->version));
pr_debug(" uuid: %08x.%08x.%08x.%08x\n",
- le32_to_cpu(*(__u32 *)(sb->uuid+0)),
- le32_to_cpu(*(__u32 *)(sb->uuid+4)),
- le32_to_cpu(*(__u32 *)(sb->uuid+8)),
- le32_to_cpu(*(__u32 *)(sb->uuid+12)));
+ le32_to_cpu(*(__le32 *)(sb->uuid+0)),
+ le32_to_cpu(*(__le32 *)(sb->uuid+4)),
+ le32_to_cpu(*(__le32 *)(sb->uuid+8)),
+ le32_to_cpu(*(__le32 *)(sb->uuid+12)));
pr_debug(" events: %llu\n",
(unsigned long long) le64_to_cpu(sb->events));
pr_debug("events cleared: %llu\n",
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 05ffffb8b769..541015373f6a 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -88,8 +88,7 @@ static struct kobj_type md_ktype;
struct md_cluster_operations *md_cluster_ops;
EXPORT_SYMBOL(md_cluster_ops);
-struct module *md_cluster_mod;
-EXPORT_SYMBOL(md_cluster_mod);
+static struct module *md_cluster_mod;
static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
static struct workqueue_struct *md_wq;
@@ -132,24 +131,6 @@ static inline int speed_max(struct mddev *mddev)
mddev->sync_speed_max : sysctl_speed_limit_max;
}
-static void * flush_info_alloc(gfp_t gfp_flags, void *data)
-{
- return kzalloc(sizeof(struct flush_info), gfp_flags);
-}
-static void flush_info_free(void *flush_info, void *data)
-{
- kfree(flush_info);
-}
-
-static void * flush_bio_alloc(gfp_t gfp_flags, void *data)
-{
- return kzalloc(sizeof(struct flush_bio), gfp_flags);
-}
-static void flush_bio_free(void *flush_bio, void *data)
-{
- kfree(flush_bio);
-}
-
static struct ctl_table_header *raid_table_header;
static struct ctl_table raid_table[] = {
@@ -423,54 +404,31 @@ static int md_congested(void *data, int bits)
/*
* Generic flush handling for md
*/
-static void submit_flushes(struct work_struct *ws)
-{
- struct flush_info *fi = container_of(ws, struct flush_info, flush_work);
- struct mddev *mddev = fi->mddev;
- struct bio *bio = fi->bio;
-
- bio->bi_opf &= ~REQ_PREFLUSH;
- md_handle_request(mddev, bio);
-
- mempool_free(fi, mddev->flush_pool);
-}
-static void md_end_flush(struct bio *fbio)
+static void md_end_flush(struct bio *bio)
{
- struct flush_bio *fb = fbio->bi_private;
- struct md_rdev *rdev = fb->rdev;
- struct flush_info *fi = fb->fi;
- struct bio *bio = fi->bio;
- struct mddev *mddev = fi->mddev;
+ struct md_rdev *rdev = bio->bi_private;
+ struct mddev *mddev = rdev->mddev;
rdev_dec_pending(rdev, mddev);
- if (atomic_dec_and_test(&fi->flush_pending)) {
- if (bio->bi_iter.bi_size == 0) {
- /* an empty barrier - all done */
- bio_endio(bio);
- mempool_free(fi, mddev->flush_pool);
- } else {
- INIT_WORK(&fi->flush_work, submit_flushes);
- queue_work(md_wq, &fi->flush_work);
- }
+ if (atomic_dec_and_test(&mddev->flush_pending)) {
+ /* The pre-request flush has finished */
+ queue_work(md_wq, &mddev->flush_work);
}
-
- mempool_free(fb, mddev->flush_bio_pool);
- bio_put(fbio);
+ bio_put(bio);
}
-void md_flush_request(struct mddev *mddev, struct bio *bio)
+static void md_submit_flush_data(struct work_struct *ws);
+
+static void submit_flushes(struct work_struct *ws)
{
+ struct mddev *mddev = container_of(ws, struct mddev, flush_work);
struct md_rdev *rdev;
- struct flush_info *fi;
-
- fi = mempool_alloc(mddev->flush_pool, GFP_NOIO);
-
- fi->bio = bio;
- fi->mddev = mddev;
- atomic_set(&fi->flush_pending, 1);
+ mddev->start_flush = ktime_get_boottime();
+ INIT_WORK(&mddev->flush_work, md_submit_flush_data);
+ atomic_set(&mddev->flush_pending, 1);
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev)
if (rdev->raid_disk >= 0 &&
@@ -480,37 +438,74 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
* we reclaim rcu_read_lock
*/
struct bio *bi;
- struct flush_bio *fb;
atomic_inc(&rdev->nr_pending);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
-
- fb = mempool_alloc(mddev->flush_bio_pool, GFP_NOIO);
- fb->fi = fi;
- fb->rdev = rdev;
-
bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
- bio_set_dev(bi, rdev->bdev);
bi->bi_end_io = md_end_flush;
- bi->bi_private = fb;
+ bi->bi_private = rdev;
+ bio_set_dev(bi, rdev->bdev);
bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
-
- atomic_inc(&fi->flush_pending);
+ atomic_inc(&mddev->flush_pending);
submit_bio(bi);
-
rcu_read_lock();
rdev_dec_pending(rdev, mddev);
}
rcu_read_unlock();
+ if (atomic_dec_and_test(&mddev->flush_pending))
+ queue_work(md_wq, &mddev->flush_work);
+}
+
+static void md_submit_flush_data(struct work_struct *ws)
+{
+ struct mddev *mddev = container_of(ws, struct mddev, flush_work);
+ struct bio *bio = mddev->flush_bio;
+
+ /*
+ * must reset flush_bio before calling into md_handle_request to avoid a
+ * deadlock, because other bios passed md_handle_request suspend check
+ * could wait for this and below md_handle_request could wait for those
+ * bios because of suspend check
+ */
+ mddev->last_flush = mddev->start_flush;
+ mddev->flush_bio = NULL;
+ wake_up(&mddev->sb_wait);
+
+ if (bio->bi_iter.bi_size == 0) {
+ /* an empty barrier - all done */
+ bio_endio(bio);
+ } else {
+ bio->bi_opf &= ~REQ_PREFLUSH;
+ md_handle_request(mddev, bio);
+ }
+}
- if (atomic_dec_and_test(&fi->flush_pending)) {
- if (bio->bi_iter.bi_size == 0) {
+void md_flush_request(struct mddev *mddev, struct bio *bio)
+{
+ ktime_t start = ktime_get_boottime();
+ spin_lock_irq(&mddev->lock);
+ wait_event_lock_irq(mddev->sb_wait,
+ !mddev->flush_bio ||
+ ktime_after(mddev->last_flush, start),
+ mddev->lock);
+ if (!ktime_after(mddev->last_flush, start)) {
+ WARN_ON(mddev->flush_bio);
+ mddev->flush_bio = bio;
+ bio = NULL;
+ }
+ spin_unlock_irq(&mddev->lock);
+
+ if (!bio) {
+ INIT_WORK(&mddev->flush_work, submit_flushes);
+ queue_work(md_wq, &mddev->flush_work);
+ } else {
+ /* flush was performed for some other bio while we waited. */
+ if (bio->bi_iter.bi_size == 0)
/* an empty barrier - all done */
bio_endio(bio);
- mempool_free(fi, mddev->flush_pool);
- } else {
- INIT_WORK(&fi->flush_work, submit_flushes);
- queue_work(md_wq, &fi->flush_work);
+ else {
+ bio->bi_opf &= ~REQ_PREFLUSH;
+ mddev->pers->make_request(mddev, bio);
}
}
}
@@ -560,6 +555,7 @@ void mddev_init(struct mddev *mddev)
atomic_set(&mddev->openers, 0);
atomic_set(&mddev->active_io, 0);
spin_lock_init(&mddev->lock);
+ atomic_set(&mddev->flush_pending, 0);
init_waitqueue_head(&mddev->sb_wait);
init_waitqueue_head(&mddev->recovery_wait);
mddev->reshape_position = MaxSector;
@@ -1109,8 +1105,7 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
* (not needed for Linear and RAID0 as metadata doesn't
* record this size)
*/
- if (IS_ENABLED(CONFIG_LBDAF) && (u64)rdev->sectors >= (2ULL << 32) &&
- sb->level >= 1)
+ if ((u64)rdev->sectors >= (2ULL << 32) && sb->level >= 1)
rdev->sectors = (sector_t)(2ULL << 32) - 2;
if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
@@ -1408,8 +1403,7 @@ super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
/* Limit to 4TB as metadata cannot record more than that.
* 4TB == 2^32 KB, or 2*2^32 sectors.
*/
- if (IS_ENABLED(CONFIG_LBDAF) && (u64)num_sectors >= (2ULL << 32) &&
- rdev->mddev->level >= 1)
+ if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
num_sectors = (sector_t)(2ULL << 32) - 2;
do {
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
@@ -1553,7 +1547,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
*/
s32 offset;
sector_t bb_sector;
- u64 *bbp;
+ __le64 *bbp;
int i;
int sectors = le16_to_cpu(sb->bblog_size);
if (sectors > (PAGE_SIZE / 512))
@@ -1565,7 +1559,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
if (!sync_page_io(rdev, bb_sector, sectors << 9,
rdev->bb_page, REQ_OP_READ, 0, true))
return -EIO;
- bbp = (u64 *)page_address(rdev->bb_page);
+ bbp = (__le64 *)page_address(rdev->bb_page);
rdev->badblocks.shift = sb->bblog_shift;
for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
u64 bb = le64_to_cpu(*bbp);
@@ -1877,7 +1871,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
md_error(mddev, rdev);
else {
struct badblocks *bb = &rdev->badblocks;
- u64 *bbp = (u64 *)page_address(rdev->bb_page);
+ __le64 *bbp = (__le64 *)page_address(rdev->bb_page);
u64 *p = bb->page;
sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
if (bb->changed) {
@@ -2855,8 +2849,10 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
err = 0;
}
} else if (cmd_match(buf, "re-add")) {
- if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
- rdev->saved_raid_disk >= 0) {
+ if (!rdev->mddev->pers)
+ err = -EINVAL;
+ else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
+ rdev->saved_raid_disk >= 0) {
/* clear_bit is performed _after_ all the devices
* have their local Faulty bit cleared. If any writes
* happen in the meantime in the local node, they
@@ -5511,22 +5507,6 @@ int md_run(struct mddev *mddev)
if (err)
return err;
}
- if (mddev->flush_pool == NULL) {
- mddev->flush_pool = mempool_create(NR_FLUSH_INFOS, flush_info_alloc,
- flush_info_free, mddev);
- if (!mddev->flush_pool) {
- err = -ENOMEM;
- goto abort;
- }
- }
- if (mddev->flush_bio_pool == NULL) {
- mddev->flush_bio_pool = mempool_create(NR_FLUSH_BIOS, flush_bio_alloc,
- flush_bio_free, mddev);
- if (!mddev->flush_bio_pool) {
- err = -ENOMEM;
- goto abort;
- }
- }
spin_lock(&pers_lock);
pers = find_pers(mddev->level, mddev->clevel);
@@ -5686,11 +5666,8 @@ int md_run(struct mddev *mddev)
return 0;
abort:
- mempool_destroy(mddev->flush_bio_pool);
- mddev->flush_bio_pool = NULL;
- mempool_destroy(mddev->flush_pool);
- mddev->flush_pool = NULL;
-
+ bioset_exit(&mddev->bio_set);
+ bioset_exit(&mddev->sync_set);
return err;
}
EXPORT_SYMBOL_GPL(md_run);
@@ -5894,14 +5871,6 @@ static void __md_stop(struct mddev *mddev)
mddev->to_remove = &md_redundancy_group;
module_put(pers->owner);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- if (mddev->flush_bio_pool) {
- mempool_destroy(mddev->flush_bio_pool);
- mddev->flush_bio_pool = NULL;
- }
- if (mddev->flush_pool) {
- mempool_destroy(mddev->flush_pool);
- mddev->flush_pool = NULL;
- }
}
void md_stop(struct mddev *mddev)
@@ -9257,7 +9226,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
* reshape is happening in the remote node, we need to
* update reshape_position and call start_reshape.
*/
- mddev->reshape_position = sb->reshape_position;
+ mddev->reshape_position = le64_to_cpu(sb->reshape_position);
if (mddev->pers->update_reshape_pos)
mddev->pers->update_reshape_pos(mddev);
if (mddev->pers->start_reshape)
diff --git a/drivers/md/md.h b/drivers/md/md.h
index c52afb52c776..257cb4c9e22b 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -252,19 +252,6 @@ enum mddev_sb_flags {
MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */
};
-#define NR_FLUSH_INFOS 8
-#define NR_FLUSH_BIOS 64
-struct flush_info {
- struct bio *bio;
- struct mddev *mddev;
- struct work_struct flush_work;
- atomic_t flush_pending;
-};
-struct flush_bio {
- struct flush_info *fi;
- struct md_rdev *rdev;
-};
-
struct mddev {
void *private;
struct md_personality *pers;
@@ -470,8 +457,16 @@ struct mddev {
* metadata and bitmap writes
*/
- mempool_t *flush_pool;
- mempool_t *flush_bio_pool;
+ /* Generic flush handling.
+ * The last to finish preflush schedules a worker to submit
+ * the rest of the request (without the REQ_PREFLUSH flag).
+ */
+ struct bio *flush_bio;
+ atomic_t flush_pending;
+ ktime_t start_flush, last_flush; /* last_flush is when the last completed
+ * flush was started.
+ */
+ struct work_struct flush_work;
struct work_struct event_work; /* used by dm to report failure event */
void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
struct md_cluster_info *cluster_info;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index c033bfcb209e..2b0a715e70c9 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -711,6 +711,8 @@ static bool is_full_stripe_write(struct stripe_head *sh)
}
static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
+ __acquires(&sh1->stripe_lock)
+ __acquires(&sh2->stripe_lock)
{
if (sh1 > sh2) {
spin_lock_irq(&sh2->stripe_lock);
@@ -722,6 +724,8 @@ static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
}
static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
+ __releases(&sh1->stripe_lock)
+ __releases(&sh2->stripe_lock)
{
spin_unlock(&sh1->stripe_lock);
spin_unlock_irq(&sh2->stripe_lock);
@@ -4223,26 +4227,15 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
case check_state_check_result:
sh->check_state = check_state_idle;
+ if (s->failed > 1)
+ break;
/* handle a successful check operation, if parity is correct
* we are done. Otherwise update the mismatch count and repair
* parity if !MD_RECOVERY_CHECK
*/
if (sh->ops.zero_sum_result == 0) {
- /* both parities are correct */
- if (!s->failed)
- set_bit(STRIPE_INSYNC, &sh->state);
- else {
- /* in contrast to the raid5 case we can validate
- * parity, but still have a failure to write
- * back
- */
- sh->check_state = check_state_compute_result;
- /* Returning at this point means that we may go
- * off and bring p and/or q uptodate again so
- * we make sure to check zero_sum_result again
- * to verify if p or q need writeback
- */
- }
+ /* Any parity checked was correct */
+ set_bit(STRIPE_INSYNC, &sh->state);
} else {
atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) {
@@ -6166,6 +6159,8 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio,
static int handle_active_stripes(struct r5conf *conf, int group,
struct r5worker *worker,
struct list_head *temp_inactive_list)
+ __releases(&conf->device_lock)
+ __acquires(&conf->device_lock)
{
struct stripe_head *batch[MAX_STRIPE_BATCH], *sh;
int i, batch_size = 0, hash;
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index d271bd731af7..01f40672507f 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -391,7 +391,7 @@ static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
bb_present = badblocks_check(&nd_region->bb, meta_start,
meta_num, &first_bad, &num_bad);
if (bb_present) {
- dev_dbg(&nd_pfn->dev, "meta: %x badblocks at %lx\n",
+ dev_dbg(&nd_pfn->dev, "meta: %x badblocks at %llx\n",
num_bad, first_bad);
nsoff = ALIGN_DOWN((nd_region->ndr_start
+ (first_bad << 9)) - nsio->res.start,
@@ -410,7 +410,7 @@ static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
}
if (rc) {
dev_err(&nd_pfn->dev,
- "error clearing %x badblocks at %lx\n",
+ "error clearing %x badblocks at %llx\n",
num_bad, first_bad);
return rc;
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 2c43e12b70af..248ff3b48041 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1105,7 +1105,7 @@ static struct nvme_id_ns *nvme_identify_ns(struct nvme_ctrl *ctrl,
error = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
if (error) {
- dev_warn(ctrl->device, "Identify namespace failed\n");
+ dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
kfree(id);
return NULL;
}
@@ -1588,7 +1588,7 @@ static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
static void nvme_update_disk_info(struct gendisk *disk,
struct nvme_ns *ns, struct nvme_id_ns *id)
{
- sector_t capacity = le64_to_cpup(&id->nsze) << (ns->lba_shift - 9);
+ sector_t capacity = le64_to_cpu(id->nsze) << (ns->lba_shift - 9);
unsigned short bs = 1 << ns->lba_shift;
blk_mq_freeze_queue(disk->queue);
@@ -2549,7 +2549,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->crdt[2] = le16_to_cpu(id->crdt3);
ctrl->oacs = le16_to_cpu(id->oacs);
- ctrl->oncs = le16_to_cpup(&id->oncs);
+ ctrl->oncs = le16_to_cpu(id->oncs);
ctrl->oaes = le32_to_cpu(id->oaes);
atomic_set(&ctrl->abort_limit, id->acl + 1);
ctrl->vwc = id->vwc;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index a90cf5d63aac..c1eecde6b853 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -177,7 +177,6 @@ static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
* commands and one for I/O commands).
*/
struct nvme_queue {
- struct device *q_dmadev;
struct nvme_dev *dev;
spinlock_t sq_lock;
struct nvme_command *sq_cmds;
@@ -189,7 +188,7 @@ struct nvme_queue {
dma_addr_t cq_dma_addr;
u32 __iomem *q_db;
u16 q_depth;
- s16 cq_vector;
+ u16 cq_vector;
u16 sq_tail;
u16 last_sq_tail;
u16 cq_head;
@@ -200,6 +199,7 @@ struct nvme_queue {
#define NVMEQ_ENABLED 0
#define NVMEQ_SQ_CMB 1
#define NVMEQ_DELETE_ERROR 2
+#define NVMEQ_POLLED 3
u32 *dbbuf_sq_db;
u32 *dbbuf_cq_db;
u32 *dbbuf_sq_ei;
@@ -208,10 +208,10 @@ struct nvme_queue {
};
/*
- * The nvme_iod describes the data in an I/O, including the list of PRP
- * entries. You can't see it in this data structure because C doesn't let
- * me express that. Use nvme_init_iod to ensure there's enough space
- * allocated to store the PRP list.
+ * The nvme_iod describes the data in an I/O.
+ *
+ * The sg pointer contains the list of PRP/SGL chunk allocations in addition
+ * to the actual struct scatterlist.
*/
struct nvme_iod {
struct nvme_request req;
@@ -220,11 +220,10 @@ struct nvme_iod {
int aborted;
int npages; /* In the PRP list. 0 means small pool in use */
int nents; /* Used in scatterlist */
- int length; /* Of data, in bytes */
dma_addr_t first_dma;
- struct scatterlist meta_sg; /* metadata requires single contiguous buffer */
+ unsigned int dma_len; /* length of single DMA segment mapping */
+ dma_addr_t meta_dma;
struct scatterlist *sg;
- struct scatterlist inline_sg[0];
};
/*
@@ -372,12 +371,6 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
}
/*
- * Max size of iod being embedded in the request payload
- */
-#define NVME_INT_PAGES 2
-#define NVME_INT_BYTES(dev) (NVME_INT_PAGES * (dev)->ctrl.page_size)
-
-/*
* Will slightly overestimate the number of pages needed. This is OK
* as it only leads to a small amount of wasted memory for the lifetime of
* the I/O.
@@ -411,15 +404,6 @@ static unsigned int nvme_pci_iod_alloc_size(struct nvme_dev *dev,
return alloc_size + sizeof(struct scatterlist) * nseg;
}
-static unsigned int nvme_pci_cmd_size(struct nvme_dev *dev, bool use_sgl)
-{
- unsigned int alloc_size = nvme_pci_iod_alloc_size(dev,
- NVME_INT_BYTES(dev), NVME_INT_PAGES,
- use_sgl);
-
- return sizeof(struct nvme_iod) + alloc_size;
-}
-
static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
@@ -584,37 +568,26 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
return true;
}
-static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
+static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
- int nseg = blk_rq_nr_phys_segments(rq);
- unsigned int size = blk_rq_payload_bytes(rq);
-
- iod->use_sgl = nvme_pci_use_sgls(dev, rq);
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ enum dma_data_direction dma_dir = rq_data_dir(req) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ const int last_prp = dev->ctrl.page_size / sizeof(__le64) - 1;
+ dma_addr_t dma_addr = iod->first_dma, next_dma_addr;
+ int i;
- if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
- iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
- if (!iod->sg)
- return BLK_STS_RESOURCE;
- } else {
- iod->sg = iod->inline_sg;
+ if (iod->dma_len) {
+ dma_unmap_page(dev->dev, dma_addr, iod->dma_len, dma_dir);
+ return;
}
- iod->aborted = 0;
- iod->npages = -1;
- iod->nents = 0;
- iod->length = size;
+ WARN_ON_ONCE(!iod->nents);
- return BLK_STS_OK;
-}
+ /* P2PDMA requests do not need to be unmapped */
+ if (!is_pci_p2pdma_page(sg_page(iod->sg)))
+ dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
-static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
-{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- const int last_prp = dev->ctrl.page_size / sizeof(__le64) - 1;
- dma_addr_t dma_addr = iod->first_dma, next_dma_addr;
-
- int i;
if (iod->npages == 0)
dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
@@ -638,8 +611,7 @@ static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
dma_addr = next_dma_addr;
}
- if (iod->sg != iod->inline_sg)
- mempool_free(iod->sg, dev->iod_mempool);
+ mempool_free(iod->sg, dev->iod_mempool);
}
static void nvme_print_sgl(struct scatterlist *sgl, int nents)
@@ -829,80 +801,103 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
return BLK_STS_OK;
}
+static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
+ struct request *req, struct nvme_rw_command *cmnd,
+ struct bio_vec *bv)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ unsigned int first_prp_len = dev->ctrl.page_size - bv->bv_offset;
+
+ iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
+ if (dma_mapping_error(dev->dev, iod->first_dma))
+ return BLK_STS_RESOURCE;
+ iod->dma_len = bv->bv_len;
+
+ cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma);
+ if (bv->bv_len > first_prp_len)
+ cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len);
+ return 0;
+}
+
+static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev,
+ struct request *req, struct nvme_rw_command *cmnd,
+ struct bio_vec *bv)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+ iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
+ if (dma_mapping_error(dev->dev, iod->first_dma))
+ return BLK_STS_RESOURCE;
+ iod->dma_len = bv->bv_len;
+
+ cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma);
+ cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len);
+ cmnd->dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4;
+ return 0;
+}
+
static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
struct nvme_command *cmnd)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- struct request_queue *q = req->q;
- enum dma_data_direction dma_dir = rq_data_dir(req) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE;
- blk_status_t ret = BLK_STS_IOERR;
+ blk_status_t ret = BLK_STS_RESOURCE;
int nr_mapped;
+ if (blk_rq_nr_phys_segments(req) == 1) {
+ struct bio_vec bv = req_bvec(req);
+
+ if (!is_pci_p2pdma_page(bv.bv_page)) {
+ if (bv.bv_offset + bv.bv_len <= dev->ctrl.page_size * 2)
+ return nvme_setup_prp_simple(dev, req,
+ &cmnd->rw, &bv);
+
+ if (iod->nvmeq->qid &&
+ dev->ctrl.sgls & ((1 << 0) | (1 << 1)))
+ return nvme_setup_sgl_simple(dev, req,
+ &cmnd->rw, &bv);
+ }
+ }
+
+ iod->dma_len = 0;
+ iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
+ if (!iod->sg)
+ return BLK_STS_RESOURCE;
sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
- iod->nents = blk_rq_map_sg(q, req, iod->sg);
+ iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
if (!iod->nents)
goto out;
- ret = BLK_STS_RESOURCE;
-
if (is_pci_p2pdma_page(sg_page(iod->sg)))
nr_mapped = pci_p2pdma_map_sg(dev->dev, iod->sg, iod->nents,
- dma_dir);
+ rq_dma_dir(req));
else
nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
- dma_dir, DMA_ATTR_NO_WARN);
+ rq_dma_dir(req), DMA_ATTR_NO_WARN);
if (!nr_mapped)
goto out;
+ iod->use_sgl = nvme_pci_use_sgls(dev, req);
if (iod->use_sgl)
ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
else
ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
-
- if (ret != BLK_STS_OK)
- goto out_unmap;
-
- ret = BLK_STS_IOERR;
- if (blk_integrity_rq(req)) {
- if (blk_rq_count_integrity_sg(q, req->bio) != 1)
- goto out_unmap;
-
- sg_init_table(&iod->meta_sg, 1);
- if (blk_rq_map_integrity_sg(q, req->bio, &iod->meta_sg) != 1)
- goto out_unmap;
-
- if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir))
- goto out_unmap;
-
- cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg));
- }
-
- return BLK_STS_OK;
-
-out_unmap:
- dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
out:
+ if (ret != BLK_STS_OK)
+ nvme_unmap_data(dev, req);
return ret;
}
-static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
+static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req,
+ struct nvme_command *cmnd)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- enum dma_data_direction dma_dir = rq_data_dir(req) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE;
-
- if (iod->nents) {
- /* P2PDMA requests do not need to be unmapped */
- if (!is_pci_p2pdma_page(sg_page(iod->sg)))
- dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
- if (blk_integrity_rq(req))
- dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir);
- }
-
- nvme_cleanup_cmd(req);
- nvme_free_iod(dev, req);
+ iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req),
+ rq_dma_dir(req), 0);
+ if (dma_mapping_error(dev->dev, iod->meta_dma))
+ return BLK_STS_IOERR;
+ cmnd->rw.metadata = cpu_to_le64(iod->meta_dma);
+ return 0;
}
/*
@@ -915,9 +910,14 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_queue *nvmeq = hctx->driver_data;
struct nvme_dev *dev = nvmeq->dev;
struct request *req = bd->rq;
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_command cmnd;
blk_status_t ret;
+ iod->aborted = 0;
+ iod->npages = -1;
+ iod->nents = 0;
+
/*
* We should not need to do this, but we're still using this to
* ensure we can drain requests on a dying queue.
@@ -929,21 +929,23 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ret)
return ret;
- ret = nvme_init_iod(req, dev);
- if (ret)
- goto out_free_cmd;
-
if (blk_rq_nr_phys_segments(req)) {
ret = nvme_map_data(dev, req, &cmnd);
if (ret)
- goto out_cleanup_iod;
+ goto out_free_cmd;
+ }
+
+ if (blk_integrity_rq(req)) {
+ ret = nvme_map_metadata(dev, req, &cmnd);
+ if (ret)
+ goto out_unmap_data;
}
blk_mq_start_request(req);
nvme_submit_cmd(nvmeq, &cmnd, bd->last);
return BLK_STS_OK;
-out_cleanup_iod:
- nvme_free_iod(dev, req);
+out_unmap_data:
+ nvme_unmap_data(dev, req);
out_free_cmd:
nvme_cleanup_cmd(req);
return ret;
@@ -952,8 +954,14 @@ out_free_cmd:
static void nvme_pci_complete_rq(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_dev *dev = iod->nvmeq->dev;
- nvme_unmap_data(iod->nvmeq->dev, req);
+ nvme_cleanup_cmd(req);
+ if (blk_integrity_rq(req))
+ dma_unmap_page(dev->dev, iod->meta_dma,
+ rq_integrity_vec(req)->bv_len, rq_data_dir(req));
+ if (blk_rq_nr_phys_segments(req))
+ nvme_unmap_data(dev, req);
nvme_complete_rq(req);
}
@@ -1088,7 +1096,7 @@ static int nvme_poll_irqdisable(struct nvme_queue *nvmeq, unsigned int tag)
* using the CQ lock. For normal interrupt driven threads we have
* to disable the interrupt to avoid racing with it.
*/
- if (nvmeq->cq_vector == -1) {
+ if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) {
spin_lock(&nvmeq->cq_poll_lock);
found = nvme_process_cq(nvmeq, &start, &end, tag);
spin_unlock(&nvmeq->cq_poll_lock);
@@ -1148,7 +1156,7 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
struct nvme_command c;
int flags = NVME_QUEUE_PHYS_CONTIG;
- if (vector != -1)
+ if (!test_bit(NVMEQ_POLLED, &nvmeq->flags))
flags |= NVME_CQ_IRQ_ENABLED;
/*
@@ -1161,10 +1169,7 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
c.create_cq.cqid = cpu_to_le16(qid);
c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
c.create_cq.cq_flags = cpu_to_le16(flags);
- if (vector != -1)
- c.create_cq.irq_vector = cpu_to_le16(vector);
- else
- c.create_cq.irq_vector = 0;
+ c.create_cq.irq_vector = cpu_to_le16(vector);
return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
}
@@ -1371,16 +1376,16 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
static void nvme_free_queue(struct nvme_queue *nvmeq)
{
- dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
+ dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq->q_depth),
(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
if (!nvmeq->sq_cmds)
return;
if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) {
- pci_free_p2pmem(to_pci_dev(nvmeq->q_dmadev),
+ pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev),
nvmeq->sq_cmds, SQ_SIZE(nvmeq->q_depth));
} else {
- dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
+ dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq->q_depth),
nvmeq->sq_cmds, nvmeq->sq_dma_addr);
}
}
@@ -1410,10 +1415,8 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
nvmeq->dev->online_queues--;
if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
blk_mq_quiesce_queue(nvmeq->dev->ctrl.admin_q);
- if (nvmeq->cq_vector == -1)
- return 0;
- pci_free_irq(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector, nvmeq);
- nvmeq->cq_vector = -1;
+ if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags))
+ pci_free_irq(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector, nvmeq);
return 0;
}
@@ -1498,7 +1501,6 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth))
goto free_cqdma;
- nvmeq->q_dmadev = dev->dev;
nvmeq->dev = dev;
spin_lock_init(&nvmeq->sq_lock);
spin_lock_init(&nvmeq->cq_poll_lock);
@@ -1507,7 +1509,6 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
nvmeq->q_depth = depth;
nvmeq->qid = qid;
- nvmeq->cq_vector = -1;
dev->ctrl.queue_count++;
return 0;
@@ -1552,7 +1553,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
{
struct nvme_dev *dev = nvmeq->dev;
int result;
- s16 vector;
+ u16 vector = 0;
clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);
@@ -1563,7 +1564,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
if (!polled)
vector = dev->num_vecs == 1 ? 0 : qid;
else
- vector = -1;
+ set_bit(NVMEQ_POLLED, &nvmeq->flags);
result = adapter_alloc_cq(dev, qid, nvmeq, vector);
if (result)
@@ -1578,7 +1579,8 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
nvmeq->cq_vector = vector;
nvme_init_queue(nvmeq, qid);
- if (vector != -1) {
+ if (!polled) {
+ nvmeq->cq_vector = vector;
result = queue_request_irq(nvmeq);
if (result < 0)
goto release_sq;
@@ -1588,7 +1590,6 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
return result;
release_sq:
- nvmeq->cq_vector = -1;
dev->online_queues--;
adapter_delete_sq(dev, qid);
release_cq:
@@ -1639,7 +1640,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
dev->admin_tagset.timeout = ADMIN_TIMEOUT;
dev->admin_tagset.numa_node = dev_to_node(dev->dev);
- dev->admin_tagset.cmd_size = nvme_pci_cmd_size(dev, false);
+ dev->admin_tagset.cmd_size = sizeof(struct nvme_iod);
dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
dev->admin_tagset.driver_data = dev;
@@ -1730,7 +1731,7 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
nvme_init_queue(nvmeq, 0);
result = queue_request_irq(nvmeq);
if (result) {
- nvmeq->cq_vector = -1;
+ dev->online_queues--;
return result;
}
@@ -2171,10 +2172,8 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
* number of interrupts.
*/
result = queue_request_irq(adminq);
- if (result) {
- adminq->cq_vector = -1;
+ if (result)
return result;
- }
set_bit(NVMEQ_ENABLED, &adminq->flags);
result = nvme_create_io_queues(dev);
@@ -2286,11 +2285,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
dev->tagset.numa_node = dev_to_node(dev->dev);
dev->tagset.queue_depth =
min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
- dev->tagset.cmd_size = nvme_pci_cmd_size(dev, false);
- if ((dev->ctrl.sgls & ((1 << 0) | (1 << 1))) && sgl_threshold) {
- dev->tagset.cmd_size = max(dev->tagset.cmd_size,
- nvme_pci_cmd_size(dev, true));
- }
+ dev->tagset.cmd_size = sizeof(struct nvme_iod);
dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
dev->tagset.driver_data = dev;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b3e765a95af8..4d8dd29479c0 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -214,6 +214,8 @@ void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
{
struct nvmet_ctrl *ctrl;
+ lockdep_assert_held(&subsys->lock);
+
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
@@ -494,13 +496,14 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
int ret;
mutex_lock(&subsys->lock);
- ret = -EMFILE;
- if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
- goto out_unlock;
ret = 0;
if (ns->enabled)
goto out_unlock;
+ ret = -EMFILE;
+ if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
+ goto out_unlock;
+
ret = nvmet_bdev_ns_enable(ns);
if (ret == -ENOTBLK)
ret = nvmet_file_ns_enable(ns);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 98b7b1f4ee96..9369a11fe7a9 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -128,12 +128,12 @@ struct nvmet_fc_tgt_queue {
struct nvmet_cq nvme_cq;
struct nvmet_sq nvme_sq;
struct nvmet_fc_tgt_assoc *assoc;
- struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */
struct list_head fod_list;
struct list_head pending_cmd_list;
struct list_head avail_defer_list;
struct workqueue_struct *work_q;
struct kref ref;
+ struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */
} __aligned(sizeof(unsigned long long));
struct nvmet_fc_tgt_assoc {
@@ -588,9 +588,7 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
if (qid > NVMET_NR_QUEUES)
return NULL;
- queue = kzalloc((sizeof(*queue) +
- (sizeof(struct nvmet_fc_fcp_iod) * sqsize)),
- GFP_KERNEL);
+ queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL);
if (!queue)
return NULL;
@@ -603,7 +601,6 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
if (!queue->work_q)
goto out_a_put;
- queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1];
queue->qid = qid;
queue->sqsize = sqsize;
queue->assoc = assoc;
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index a065dbfc43b1..3efc52f9c309 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -196,7 +196,7 @@ static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
GFP_KERNEL, 0, bio);
if (ret && ret != -EOPNOTSUPP) {
req->error_slba = le64_to_cpu(range->slba);
- return blk_to_nvme_status(req, errno_to_blk_status(ret));
+ return errno_to_nvme_status(req, ret);
}
return NVME_SC_SUCCESS;
}
@@ -252,7 +252,6 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
{
struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
struct bio *bio = NULL;
- u16 status = NVME_SC_SUCCESS;
sector_t sector;
sector_t nr_sector;
int ret;
@@ -264,13 +263,12 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
ret = __blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
GFP_KERNEL, &bio, 0);
- status = blk_to_nvme_status(req, errno_to_blk_status(ret));
if (bio) {
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
submit_bio(bio);
} else {
- nvmet_req_complete(req, status);
+ nvmet_req_complete(req, errno_to_nvme_status(req, ret));
}
}
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index ad0df786fe93..0a941abf56ec 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -371,7 +371,8 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
cmd->state = NVMET_TCP_SEND_DATA_PDU;
pdu->hdr.type = nvme_tcp_c2h_data;
- pdu->hdr.flags = NVME_TCP_F_DATA_LAST;
+ pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
+ NVME_TCP_F_DATA_SUCCESS : 0);
pdu->hdr.hlen = sizeof(*pdu);
pdu->hdr.pdo = pdu->hdr.hlen + hdgst;
pdu->hdr.plen =
@@ -542,8 +543,19 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd)
cmd->state = NVMET_TCP_SEND_DDGST;
cmd->offset = 0;
} else {
- nvmet_setup_response_pdu(cmd);
+ if (queue->nvme_sq.sqhd_disabled) {
+ cmd->queue->snd_cmd = NULL;
+ nvmet_tcp_put_cmd(cmd);
+ } else {
+ nvmet_setup_response_pdu(cmd);
+ }
}
+
+ if (queue->nvme_sq.sqhd_disabled) {
+ kfree(cmd->iov);
+ sgl_free(cmd->req.sg);
+ }
+
return 1;
}
@@ -619,7 +631,13 @@ static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd)
return ret;
cmd->offset += ret;
- nvmet_setup_response_pdu(cmd);
+
+ if (queue->nvme_sq.sqhd_disabled) {
+ cmd->queue->snd_cmd = NULL;
+ nvmet_tcp_put_cmd(cmd);
+ } else {
+ nvmet_setup_response_pdu(cmd);
+ }
return 1;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 2b2bc4b49d78..ebc80354714c 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2256,22 +2256,6 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
#define READ_CAPACITY_RETRIES_ON_RESET 10
-/*
- * Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set
- * and the reported logical block size is bigger than 512 bytes. Note
- * that last_sector is a u64 and therefore logical_to_sectors() is not
- * applicable.
- */
-static bool sd_addressable_capacity(u64 lba, unsigned int sector_size)
-{
- u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9);
-
- if (sizeof(sector_t) == 4 && last_sector > U32_MAX)
- return false;
-
- return true;
-}
-
static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
unsigned char *buffer)
{
@@ -2337,14 +2321,6 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
return -ENODEV;
}
- if (!sd_addressable_capacity(lba, sector_size)) {
- sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
- "kernel compiled with support for large block "
- "devices.\n");
- sdkp->capacity = 0;
- return -EOVERFLOW;
- }
-
/* Logical blocks per physical block exponent */
sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size;
@@ -2426,14 +2402,6 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
return sector_size;
}
- if (!sd_addressable_capacity(lba, sector_size)) {
- sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
- "kernel compiled with support for large block "
- "devices.\n");
- sdkp->capacity = 0;
- return -EOVERFLOW;
- }
-
sdkp->capacity = lba + 1;
sdkp->physical_block_size = sector_size;
return sector_size;
@@ -3325,6 +3293,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
if (sdp->removable) {
gd->flags |= GENHD_FL_REMOVABLE;
gd->events |= DISK_EVENT_MEDIA_CHANGE;
+ gd->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT;
}
blk_pm_runtime_init(sdp->request_queue, dev);
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 039c27c2d7b3..c3f443d5aea8 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -716,6 +716,7 @@ static int sr_probe(struct device *dev)
disk->fops = &sr_bdops;
disk->flags = GENHD_FL_CD | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
disk->events = DISK_EVENT_MEDIA_CHANGE | DISK_EVENT_EJECT_REQUEST;
+ disk->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT;
blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT);
diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c
index f3fbb700f569..05a286d24f14 100644
--- a/drivers/xen/biomerge.c
+++ b/drivers/xen/biomerge.c
@@ -4,12 +4,13 @@
#include <xen/xen.h>
#include <xen/page.h>
+/* check if @page can be merged with 'vec1' */
bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
- const struct bio_vec *vec2)
+ const struct page *page)
{
#if XEN_PAGE_SIZE == PAGE_SIZE
unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page));
- unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page));
+ unsigned long bfn2 = pfn_to_bfn(page_to_pfn(page));
return bfn1 + PFN_DOWN(vec1->bv_offset + vec1->bv_len) == bfn2;
#else