diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2014-12-09 04:39:29 +0300 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2014-12-09 04:39:29 +0300 |
commit | ba00410b8131b23edfb0e09f8b6dd26c8eb621fb (patch) | |
tree | c08504e4d2fa51ac91cef544f336d0169806c49f /drivers/block | |
parent | 8ce74dd6057832618957fc2cbd38fa959c3a0a6c (diff) | |
parent | aa583096d9767892983332e7c1a984bd17e3cd39 (diff) | |
download | linux-ba00410b8131b23edfb0e09f8b6dd26c8eb621fb.tar.xz |
Merge branch 'iov_iter' into for-next
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/null_blk.c | 14 | ||||
-rw-r--r-- | drivers/block/rbd.c | 35 | ||||
-rw-r--r-- | drivers/block/sunvdc.c | 9 | ||||
-rw-r--r-- | drivers/block/zram/zram_drv.c | 13 |
4 files changed, 34 insertions, 37 deletions
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 2671a3f02f0c..8001e812018b 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -450,14 +450,10 @@ static int init_driver_queues(struct nullb *nullb) ret = setup_commands(nq); if (ret) - goto err_queue; + return ret; nullb->nr_queues++; } - return 0; -err_queue: - cleanup_queues(nullb); - return ret; } static int null_add_dev(void) @@ -507,7 +503,9 @@ static int null_add_dev(void) goto out_cleanup_queues; } blk_queue_make_request(nullb->q, null_queue_bio); - init_driver_queues(nullb); + rv = init_driver_queues(nullb); + if (rv) + goto out_cleanup_blk_queue; } else { nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); if (!nullb->q) { @@ -516,7 +514,9 @@ static int null_add_dev(void) } blk_queue_prep_rq(nullb->q, null_rq_prep_fn); blk_queue_softirq_done(nullb->q, null_softirq_done_fn); - init_driver_queues(nullb); + rv = init_driver_queues(nullb); + if (rv) + goto out_cleanup_blk_queue; } nullb->q->queuedata = nullb; diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 0a54c588e433..27b71a0b72d0 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -342,7 +342,6 @@ struct rbd_device { struct list_head rq_queue; /* incoming rq queue */ spinlock_t lock; /* queue, flags, open_count */ - struct workqueue_struct *rq_wq; struct work_struct rq_work; struct rbd_image_header header; @@ -402,6 +401,8 @@ static struct kmem_cache *rbd_segment_name_cache; static int rbd_major; static DEFINE_IDA(rbd_dev_id_ida); +static struct workqueue_struct *rbd_wq; + /* * Default to false for now, as single-major requires >= 0.75 version of * userspace rbd utility. @@ -3452,7 +3453,7 @@ static void rbd_request_fn(struct request_queue *q) } if (queued) - queue_work(rbd_dev->rq_wq, &rbd_dev->rq_work); + queue_work(rbd_wq, &rbd_dev->rq_work); } /* @@ -3532,7 +3533,7 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev, page_count = (u32) calc_pages_for(offset, length); pages = ceph_alloc_page_vector(page_count, GFP_KERNEL); if (IS_ERR(pages)) - ret = PTR_ERR(pages); + return PTR_ERR(pages); ret = -ENOMEM; obj_request = rbd_obj_request_create(object_name, offset, length, @@ -5242,16 +5243,9 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev) set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE); set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only); - rbd_dev->rq_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, - rbd_dev->disk->disk_name); - if (!rbd_dev->rq_wq) { - ret = -ENOMEM; - goto err_out_mapping; - } - ret = rbd_bus_add_dev(rbd_dev); if (ret) - goto err_out_workqueue; + goto err_out_mapping; /* Everything's ready. Announce the disk to the world. */ @@ -5263,9 +5257,6 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev) return ret; -err_out_workqueue: - destroy_workqueue(rbd_dev->rq_wq); - rbd_dev->rq_wq = NULL; err_out_mapping: rbd_dev_mapping_clear(rbd_dev); err_out_disk: @@ -5512,7 +5503,6 @@ static void rbd_dev_device_release(struct device *dev) { struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); - destroy_workqueue(rbd_dev->rq_wq); rbd_free_disk(rbd_dev); clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); rbd_dev_mapping_clear(rbd_dev); @@ -5716,11 +5706,21 @@ static int __init rbd_init(void) if (rc) return rc; + /* + * The number of active work items is limited by the number of + * rbd devices, so leave @max_active at default. + */ + rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0); + if (!rbd_wq) { + rc = -ENOMEM; + goto err_out_slab; + } + if (single_major) { rbd_major = register_blkdev(0, RBD_DRV_NAME); if (rbd_major < 0) { rc = rbd_major; - goto err_out_slab; + goto err_out_wq; } } @@ -5738,6 +5738,8 @@ static int __init rbd_init(void) err_out_blkdev: if (single_major) unregister_blkdev(rbd_major, RBD_DRV_NAME); +err_out_wq: + destroy_workqueue(rbd_wq); err_out_slab: rbd_slab_exit(); return rc; @@ -5749,6 +5751,7 @@ static void __exit rbd_exit(void) rbd_sysfs_cleanup(); if (single_major) unregister_blkdev(rbd_major, RBD_DRV_NAME); + destroy_workqueue(rbd_wq); rbd_slab_exit(); } diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index 756b8ec00f16..0ebadf93b6c5 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -69,8 +69,6 @@ struct vdc_port { u8 vdisk_mtype; char disk_name[32]; - - struct vio_disk_vtoc label; }; static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio) @@ -710,13 +708,6 @@ static int probe_disk(struct vdc_port *port) if (comp.err) return comp.err; - err = generic_request(port, VD_OP_GET_VTOC, - &port->label, sizeof(port->label)); - if (err < 0) { - printk(KERN_ERR PFX "VD_OP_GET_VTOC returns error %d\n", err); - return err; - } - if (vdc_version_supported(port, 1, 1)) { /* vdisk_size should be set during the handshake, if it wasn't * then the underlying disk is reserved by another system diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 0e63e8aa8279..3920ee45aa59 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -99,11 +99,12 @@ static ssize_t mem_used_total_show(struct device *dev, { u64 val = 0; struct zram *zram = dev_to_zram(dev); - struct zram_meta *meta = zram->meta; down_read(&zram->init_lock); - if (init_done(zram)) + if (init_done(zram)) { + struct zram_meta *meta = zram->meta; val = zs_get_total_pages(meta->mem_pool); + } up_read(&zram->init_lock); return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT); @@ -173,16 +174,17 @@ static ssize_t mem_used_max_store(struct device *dev, int err; unsigned long val; struct zram *zram = dev_to_zram(dev); - struct zram_meta *meta = zram->meta; err = kstrtoul(buf, 10, &val); if (err || val != 0) return -EINVAL; down_read(&zram->init_lock); - if (init_done(zram)) + if (init_done(zram)) { + struct zram_meta *meta = zram->meta; atomic_long_set(&zram->stats.max_used_pages, zs_get_total_pages(meta->mem_pool)); + } up_read(&zram->init_lock); return len; @@ -558,7 +560,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, } if (page_zero_filled(uncmem)) { - kunmap_atomic(user_mem); + if (user_mem) + kunmap_atomic(user_mem); /* Free memory associated with this sector now. */ bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); zram_free_page(zram, index); |