diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-06-04 17:58:06 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-06-04 17:58:06 +0300 |
commit | f459c34538f57661e0fd1d3eaf7c0b17125ae011 (patch) | |
tree | 3addc82d7f792c4533501978798dad0095293933 /drivers/block | |
parent | 29dcea88779c856c7dc92040a0c01233263101d4 (diff) | |
parent | 32a50fabb334b2f0725de84bf248bd8c24c22b05 (diff) | |
download | linux-f459c34538f57661e0fd1d3eaf7c0b17125ae011.tar.xz |
Merge tag 'for-4.18/block-20180603' of git://git.kernel.dk/linux-block
Pull block updates from Jens Axboe:
- clean up how we pass around gfp_t and
blk_mq_req_flags_t (Christoph)
- prepare us to defer scheduler attach (Christoph)
- clean up drivers handling of bounce buffers (Christoph)
- fix timeout handling corner cases (Christoph/Bart/Keith)
- bcache fixes (Coly)
- prep work for bcachefs and some block layer optimizations (Kent).
- convert users of bio_sets to using embedded structs (Kent).
- fixes for the BFQ io scheduler (Paolo/Davide/Filippo)
- lightnvm fixes and improvements (Matias, with contributions from Hans
and Javier)
- adding discard throttling to blk-wbt (me)
- sbitmap blk-mq-tag handling (me/Omar/Ming).
- remove the sparc jsflash block driver, acked by DaveM.
- Kyber scheduler improvement from Jianchao, making it more friendly
wrt merging.
- conversion of symbolic proc permissions to octal, from Joe Perches.
Previously the block parts were a mix of both.
- nbd fixes (Josef and Kevin Vigor)
- unify how we handle the various kinds of timestamps that the block
core and utility code uses (Omar)
- three NVMe pull requests from Keith and Christoph, bringing AEN to
feature completeness, file backed namespaces, cq/sq lock split, and
various fixes
- various little fixes and improvements all over the map
* tag 'for-4.18/block-20180603' of git://git.kernel.dk/linux-block: (196 commits)
blk-mq: update nr_requests when switching to 'none' scheduler
block: don't use blocking queue entered for recursive bio submits
dm-crypt: fix warning in shutdown path
lightnvm: pblk: take bitmap alloc. out of critical section
lightnvm: pblk: kick writer on new flush points
lightnvm: pblk: only try to recover lines with written smeta
lightnvm: pblk: remove unnecessary bio_get/put
lightnvm: pblk: add possibility to set write buffer size manually
lightnvm: fix partial read error path
lightnvm: proper error handling for pblk_bio_add_pages
lightnvm: pblk: fix smeta write error path
lightnvm: pblk: garbage collect lines with failed writes
lightnvm: pblk: rework write error recovery path
lightnvm: pblk: remove dead function
lightnvm: pass flag on graceful teardown to targets
lightnvm: pblk: check for chunk size before allocating it
lightnvm: pblk: remove unnecessary argument
lightnvm: pblk: remove unnecessary indirection
lightnvm: pblk: return NVM_ error on failed submission
lightnvm: pblk: warn in case of corrupted write buffer
...
Diffstat (limited to 'drivers/block')
28 files changed, 226 insertions, 229 deletions
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index f781eff7d23e..7c3887a7e534 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c @@ -1179,7 +1179,6 @@ static bool DAC960_V1_EnableMemoryMailboxInterface(DAC960_Controller_T if (pci_set_dma_mask(Controller->PCIDevice, DMA_BIT_MASK(32))) return DAC960_Failure(Controller, "DMA mask out of range"); - Controller->BounceBufferLimit = DMA_BIT_MASK(32); if ((hw_type == DAC960_PD_Controller) || (hw_type == DAC960_P_Controller)) { CommandMailboxesSize = 0; @@ -1380,11 +1379,8 @@ static bool DAC960_V2_EnableMemoryMailboxInterface(DAC960_Controller_T dma_addr_t CommandMailboxDMA; DAC960_V2_CommandStatus_T CommandStatus; - if (!pci_set_dma_mask(Controller->PCIDevice, DMA_BIT_MASK(64))) - Controller->BounceBufferLimit = DMA_BIT_MASK(64); - else if (!pci_set_dma_mask(Controller->PCIDevice, DMA_BIT_MASK(32))) - Controller->BounceBufferLimit = DMA_BIT_MASK(32); - else + if (pci_set_dma_mask(Controller->PCIDevice, DMA_BIT_MASK(64)) && + pci_set_dma_mask(Controller->PCIDevice, DMA_BIT_MASK(32))) return DAC960_Failure(Controller, "DMA mask out of range"); /* This is a temporary dma mapping, used only in the scope of this function */ @@ -2540,7 +2536,6 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller) continue; } Controller->RequestQueue[n] = RequestQueue; - blk_queue_bounce_limit(RequestQueue, Controller->BounceBufferLimit); RequestQueue->queuedata = Controller; blk_queue_max_segments(RequestQueue, Controller->DriverScatterGatherLimit); blk_queue_max_hw_sectors(RequestQueue, Controller->MaxBlocksPerCommand); @@ -6594,7 +6589,7 @@ static void DAC960_CreateProcEntries(DAC960_Controller_T *Controller) DAC960_ProcDirectoryEntry); proc_create_data("initial_status", 0, ControllerProcEntry, &dac960_initial_status_proc_fops, Controller); proc_create_data("current_status", 0, ControllerProcEntry, &dac960_current_status_proc_fops, Controller); - proc_create_data("user_command", S_IWUSR | S_IRUSR, ControllerProcEntry, &dac960_user_command_proc_fops, Controller); + proc_create_data("user_command", 0600, ControllerProcEntry, &dac960_user_command_proc_fops, Controller); Controller->ControllerProcEntry = ControllerProcEntry; } diff --git a/drivers/block/DAC960.h b/drivers/block/DAC960.h index 21aff470d268..1439e651928b 100644 --- a/drivers/block/DAC960.h +++ b/drivers/block/DAC960.h @@ -2295,7 +2295,6 @@ typedef struct DAC960_Controller unsigned short MaxBlocksPerCommand; unsigned short ControllerScatterGatherLimit; unsigned short DriverScatterGatherLimit; - u64 BounceBufferLimit; unsigned int CombinedStatusBufferLength; unsigned int InitialStatusLength; unsigned int CurrentStatusLength; diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index 6797e6c23c8a..429ebb84b592 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c @@ -159,14 +159,14 @@ static int aoe_debugfs_open(struct inode *inode, struct file *file) return single_open(file, aoedisk_debugfs_show, inode->i_private); } -static DEVICE_ATTR(state, S_IRUGO, aoedisk_show_state, NULL); -static DEVICE_ATTR(mac, S_IRUGO, aoedisk_show_mac, NULL); -static DEVICE_ATTR(netif, S_IRUGO, aoedisk_show_netif, NULL); +static DEVICE_ATTR(state, 0444, aoedisk_show_state, NULL); +static DEVICE_ATTR(mac, 0444, aoedisk_show_mac, NULL); +static DEVICE_ATTR(netif, 0444, aoedisk_show_netif, NULL); static struct device_attribute dev_attr_firmware_version = { - .attr = { .name = "firmware-version", .mode = S_IRUGO }, + .attr = { .name = "firmware-version", .mode = 0444 }, .show = aoedisk_show_fwver, }; -static DEVICE_ATTR(payload, S_IRUGO, aoedisk_show_payload, NULL); +static DEVICE_ATTR(payload, 0444, aoedisk_show_payload, NULL); static struct attribute *aoe_attrs[] = { &dev_attr_state.attr, @@ -388,7 +388,6 @@ aoeblk_gdalloc(void *vp) d->aoemajor, d->aoeminor); goto err_mempool; } - blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); spin_lock_irqsave(&d->lock, flags); WARN_ON(!(d->flags & DEVFL_GD_NOW)); diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 540bb60cd071..096882e54095 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -1032,8 +1032,9 @@ bvcpy(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter, long cnt) iter.bi_size = cnt; __bio_for_each_segment(bv, bio, iter, iter) { - char *p = page_address(bv.bv_page) + bv.bv_offset; + char *p = kmap_atomic(bv.bv_page) + bv.bv_offset; skb_copy_bits(skb, soff, p, bv.bv_len); + kunmap_atomic(p); soff += bv.bv_len; } } diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 66cb0f857f64..bb976598ee43 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -331,15 +331,15 @@ static const struct block_device_operations brd_fops = { * And now the modules code and kernel interface. */ static int rd_nr = CONFIG_BLK_DEV_RAM_COUNT; -module_param(rd_nr, int, S_IRUGO); +module_param(rd_nr, int, 0444); MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices"); unsigned long rd_size = CONFIG_BLK_DEV_RAM_SIZE; -module_param(rd_size, ulong, S_IRUGO); +module_param(rd_size, ulong, 0444); MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes."); static int max_part = 1; -module_param(max_part, int, S_IRUGO); +module_param(max_part, int, 0444); MODULE_PARM_DESC(max_part, "Num Minors to reserve between devices"); MODULE_LICENSE("GPL"); @@ -402,6 +402,10 @@ static struct brd_device *brd_alloc(int i) set_capacity(disk, rd_size * 2); disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO; + /* Tell the block layer that this is not a rotational device */ + blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue); + return brd; out_free_queue: diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index 9f4e6f502b84..11a85b740327 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -977,7 +977,7 @@ static void drbd_bm_endio(struct bio *bio) bm_page_unlock_io(device, idx); if (ctx->flags & BM_AIO_COPY_PAGES) - mempool_free(bio->bi_io_vec[0].bv_page, drbd_md_io_page_pool); + mempool_free(bio->bi_io_vec[0].bv_page, &drbd_md_io_page_pool); bio_put(bio); @@ -1014,7 +1014,8 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho bm_set_page_unchanged(b->bm_pages[page_nr]); if (ctx->flags & BM_AIO_COPY_PAGES) { - page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_RECLAIM); + page = mempool_alloc(&drbd_md_io_page_pool, + GFP_NOIO | __GFP_HIGHMEM); copy_highpage(page, b->bm_pages[page_nr]); bm_store_page_idx(page, page_nr); } else diff --git a/drivers/block/drbd/drbd_debugfs.c b/drivers/block/drbd/drbd_debugfs.c index ab21976a87b2..5d5e8d6a8a56 100644 --- a/drivers/block/drbd/drbd_debugfs.c +++ b/drivers/block/drbd/drbd_debugfs.c @@ -481,9 +481,9 @@ void drbd_debugfs_resource_add(struct drbd_resource *resource) goto fail; resource->debugfs_res_connections = dentry; - dentry = debugfs_create_file("in_flight_summary", S_IRUSR|S_IRGRP, - resource->debugfs_res, resource, - &in_flight_summary_fops); + dentry = debugfs_create_file("in_flight_summary", 0440, + resource->debugfs_res, resource, + &in_flight_summary_fops); if (IS_ERR_OR_NULL(dentry)) goto fail; resource->debugfs_res_in_flight_summary = dentry; @@ -645,16 +645,16 @@ void drbd_debugfs_connection_add(struct drbd_connection *connection) goto fail; connection->debugfs_conn = dentry; - dentry = debugfs_create_file("callback_history", S_IRUSR|S_IRGRP, - connection->debugfs_conn, connection, - &connection_callback_history_fops); + dentry = debugfs_create_file("callback_history", 0440, + connection->debugfs_conn, connection, + &connection_callback_history_fops); if (IS_ERR_OR_NULL(dentry)) goto fail; connection->debugfs_conn_callback_history = dentry; - dentry = debugfs_create_file("oldest_requests", S_IRUSR|S_IRGRP, - connection->debugfs_conn, connection, - &connection_oldest_requests_fops); + dentry = debugfs_create_file("oldest_requests", 0440, + connection->debugfs_conn, connection, + &connection_oldest_requests_fops); if (IS_ERR_OR_NULL(dentry)) goto fail; connection->debugfs_conn_oldest_requests = dentry; @@ -824,7 +824,7 @@ void drbd_debugfs_device_add(struct drbd_device *device) device->debugfs_minor = dentry; #define DCF(name) do { \ - dentry = debugfs_create_file(#name, S_IRUSR|S_IRGRP, \ + dentry = debugfs_create_file(#name, 0440, \ device->debugfs_vol, device, \ &device_ ## name ## _fops); \ if (IS_ERR_OR_NULL(dentry)) \ diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 06ecee1b528e..21b4186add6f 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1405,8 +1405,8 @@ extern struct kmem_cache *drbd_request_cache; extern struct kmem_cache *drbd_ee_cache; /* peer requests */ extern struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */ extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */ -extern mempool_t *drbd_request_mempool; -extern mempool_t *drbd_ee_mempool; +extern mempool_t drbd_request_mempool; +extern mempool_t drbd_ee_mempool; /* drbd's page pool, used to buffer data received from the peer, * or data requested by the peer. @@ -1432,16 +1432,16 @@ extern wait_queue_head_t drbd_pp_wait; * 128 should be plenty, currently we probably can get away with as few as 1. */ #define DRBD_MIN_POOL_PAGES 128 -extern mempool_t *drbd_md_io_page_pool; +extern mempool_t drbd_md_io_page_pool; /* We also need to make sure we get a bio * when we need it for housekeeping purposes */ -extern struct bio_set *drbd_md_io_bio_set; +extern struct bio_set drbd_md_io_bio_set; /* to allocate from that set */ extern struct bio *bio_alloc_drbd(gfp_t gfp_mask); /* And a bio_set for cloning */ -extern struct bio_set *drbd_io_bio_set; +extern struct bio_set drbd_io_bio_set; extern struct mutex resources_mutex; diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 185f1ef00a7c..a233e71e58ff 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -124,11 +124,11 @@ struct kmem_cache *drbd_request_cache; struct kmem_cache *drbd_ee_cache; /* peer requests */ struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */ struct kmem_cache *drbd_al_ext_cache; /* activity log extents */ -mempool_t *drbd_request_mempool; -mempool_t *drbd_ee_mempool; -mempool_t *drbd_md_io_page_pool; -struct bio_set *drbd_md_io_bio_set; -struct bio_set *drbd_io_bio_set; +mempool_t drbd_request_mempool; +mempool_t drbd_ee_mempool; +mempool_t drbd_md_io_page_pool; +struct bio_set drbd_md_io_bio_set; +struct bio_set drbd_io_bio_set; /* I do not use a standard mempool, because: 1) I want to hand out the pre-allocated objects first. @@ -153,10 +153,10 @@ struct bio *bio_alloc_drbd(gfp_t gfp_mask) { struct bio *bio; - if (!drbd_md_io_bio_set) + if (!bioset_initialized(&drbd_md_io_bio_set)) return bio_alloc(gfp_mask, 1); - bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set); + bio = bio_alloc_bioset(gfp_mask, 1, &drbd_md_io_bio_set); if (!bio) return NULL; return bio; @@ -2097,16 +2097,11 @@ static void drbd_destroy_mempools(void) /* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */ - if (drbd_io_bio_set) - bioset_free(drbd_io_bio_set); - if (drbd_md_io_bio_set) - bioset_free(drbd_md_io_bio_set); - if (drbd_md_io_page_pool) - mempool_destroy(drbd_md_io_page_pool); - if (drbd_ee_mempool) - mempool_destroy(drbd_ee_mempool); - if (drbd_request_mempool) - mempool_destroy(drbd_request_mempool); + bioset_exit(&drbd_io_bio_set); + bioset_exit(&drbd_md_io_bio_set); + mempool_exit(&drbd_md_io_page_pool); + mempool_exit(&drbd_ee_mempool); + mempool_exit(&drbd_request_mempool); if (drbd_ee_cache) kmem_cache_destroy(drbd_ee_cache); if (drbd_request_cache) @@ -2116,11 +2111,6 @@ static void drbd_destroy_mempools(void) if (drbd_al_ext_cache) kmem_cache_destroy(drbd_al_ext_cache); - drbd_io_bio_set = NULL; - drbd_md_io_bio_set = NULL; - drbd_md_io_page_pool = NULL; - drbd_ee_mempool = NULL; - drbd_request_mempool = NULL; drbd_ee_cache = NULL; drbd_request_cache = NULL; drbd_bm_ext_cache = NULL; @@ -2133,18 +2123,7 @@ static int drbd_create_mempools(void) { struct page *page; const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count; - int i; - - /* prepare our caches and mempools */ - drbd_request_mempool = NULL; - drbd_ee_cache = NULL; - drbd_request_cache = NULL; - drbd_bm_ext_cache = NULL; - drbd_al_ext_cache = NULL; - drbd_pp_pool = NULL; - drbd_md_io_page_pool = NULL; - drbd_md_io_bio_set = NULL; - drbd_io_bio_set = NULL; + int i, ret; /* caches */ drbd_request_cache = kmem_cache_create( @@ -2168,26 +2147,26 @@ static int drbd_create_mempools(void) goto Enomem; /* mempools */ - drbd_io_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0); - if (drbd_io_bio_set == NULL) + ret = bioset_init(&drbd_io_bio_set, BIO_POOL_SIZE, 0, 0); + if (ret) goto Enomem; - drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0, - BIOSET_NEED_BVECS); - if (drbd_md_io_bio_set == NULL) + ret = bioset_init(&drbd_md_io_bio_set, DRBD_MIN_POOL_PAGES, 0, + BIOSET_NEED_BVECS); + if (ret) goto Enomem; - drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0); - if (drbd_md_io_page_pool == NULL) + ret = mempool_init_page_pool(&drbd_md_io_page_pool, DRBD_MIN_POOL_PAGES, 0); + if (ret) goto Enomem; - drbd_request_mempool = mempool_create_slab_pool(number, - drbd_request_cache); - if (drbd_request_mempool == NULL) + ret = mempool_init_slab_pool(&drbd_request_mempool, number, + drbd_request_cache); + if (ret) goto Enomem; - drbd_ee_mempool = mempool_create_slab_pool(number, drbd_ee_cache); - if (drbd_ee_mempool == NULL) + ret = mempool_init_slab_pool(&drbd_ee_mempool, number, drbd_ee_cache); + if (ret) goto Enomem; /* drbd's page pool */ @@ -3010,7 +2989,7 @@ static int __init drbd_init(void) goto fail; err = -ENOMEM; - drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL); + drbd_proc = proc_create_data("drbd", S_IFREG | 0444 , NULL, &drbd_proc_fops, NULL); if (!drbd_proc) { pr_err("unable to register proc file\n"); goto fail; diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index c72dee0ef083..be9450f5ad1c 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -378,7 +378,7 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto if (drbd_insert_fault(device, DRBD_FAULT_AL_EE)) return NULL; - peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM); + peer_req = mempool_alloc(&drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM); if (!peer_req) { if (!(gfp_mask & __GFP_NOWARN)) drbd_err(device, "%s: allocation failed\n", __func__); @@ -409,7 +409,7 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto return peer_req; fail: - mempool_free(peer_req, drbd_ee_mempool); + mempool_free(peer_req, &drbd_ee_mempool); return NULL; } @@ -426,7 +426,7 @@ void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request * peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO; drbd_al_complete_io(device, &peer_req->i); } - mempool_free(peer_req, drbd_ee_mempool); + mempool_free(peer_req, &drbd_ee_mempool); } int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list) diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index a500e738d929..a47e4987ee46 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -55,7 +55,7 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio { struct drbd_request *req; - req = mempool_alloc(drbd_request_mempool, GFP_NOIO); + req = mempool_alloc(&drbd_request_mempool, GFP_NOIO); if (!req) return NULL; memset(req, 0, sizeof(*req)); @@ -184,7 +184,7 @@ void drbd_req_destroy(struct kref *kref) } } - mempool_free(req, drbd_request_mempool); + mempool_free(req, &drbd_request_mempool); } static void wake_all_senders(struct drbd_connection *connection) diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h index cb97b3b30962..94c654020f0f 100644 --- a/drivers/block/drbd/drbd_req.h +++ b/drivers/block/drbd/drbd_req.h @@ -269,7 +269,7 @@ enum drbd_req_state_bits { static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bio *bio_src) { struct bio *bio; - bio = bio_clone_fast(bio_src, GFP_NOIO, drbd_io_bio_set); + bio = bio_clone_fast(bio_src, GFP_NOIO, &drbd_io_bio_set); req->private_bio = bio; diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 8ec7235fc93b..8871b5044d9e 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -4450,7 +4450,7 @@ static ssize_t floppy_cmos_show(struct device *dev, return sprintf(buf, "%X\n", UDP->cmos); } -static DEVICE_ATTR(cmos, S_IRUGO, floppy_cmos_show, NULL); +static DEVICE_ATTR(cmos, 0444, floppy_cmos_show, NULL); static struct attribute *floppy_dev_attrs[] = { &dev_attr_cmos.attr, diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 55cf554bc914..4838b0dbaad3 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -732,7 +732,7 @@ static ssize_t loop_attr_do_show_##_name(struct device *d, \ return loop_attr_show(d, b, loop_attr_##_name##_show); \ } \ static struct device_attribute loop_attr_##_name = \ - __ATTR(_name, S_IRUGO, loop_attr_do_show_##_name, NULL); + __ATTR(_name, 0444, loop_attr_do_show_##_name, NULL); static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf) { @@ -809,16 +809,17 @@ static struct attribute_group loop_attribute_group = { .attrs= loop_attrs, }; -static int loop_sysfs_init(struct loop_device *lo) +static void loop_sysfs_init(struct loop_device *lo) { - return sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj, - &loop_attribute_group); + lo->sysfs_inited = !sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj, + &loop_attribute_group); } static void loop_sysfs_exit(struct loop_device *lo) { - sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj, - &loop_attribute_group); + if (lo->sysfs_inited) + sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj, + &loop_attribute_group); } static void loop_config_discard(struct loop_device *lo) @@ -1677,9 +1678,9 @@ static const struct block_device_operations lo_fops = { * And now the modules code and kernel interface. */ static int max_loop; -module_param(max_loop, int, S_IRUGO); +module_param(max_loop, int, 0444); MODULE_PARM_DESC(max_loop, "Maximum number of loop devices"); -module_param(max_part, int, S_IRUGO); +module_param(max_part, int, 0444); MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device"); MODULE_LICENSE("GPL"); MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR); diff --git a/drivers/block/loop.h b/drivers/block/loop.h index b78de9879f4f..4d42c7af7de7 100644 --- a/drivers/block/loop.h +++ b/drivers/block/loop.h @@ -58,6 +58,7 @@ struct loop_device { struct kthread_worker worker; struct task_struct *worker_task; bool use_dio; + bool sysfs_inited; struct request_queue *lo_queue; struct blk_mq_tag_set tag_set; diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 769c551e3d71..c73626decb46 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -2285,7 +2285,7 @@ static ssize_t mtip_hw_show_status(struct device *dev, return size; } -static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL); +static DEVICE_ATTR(status, 0444, mtip_hw_show_status, NULL); /* debugsfs entries */ @@ -2566,10 +2566,9 @@ static int mtip_hw_debugfs_init(struct driver_data *dd) return -1; } - debugfs_create_file("flags", S_IRUGO, dd->dfs_node, dd, - &mtip_flags_fops); - debugfs_create_file("registers", S_IRUGO, dd->dfs_node, dd, - &mtip_regs_fops); + debugfs_create_file("flags", 0444, dd->dfs_node, dd, &mtip_flags_fops); + debugfs_create_file("registers", 0444, dd->dfs_node, dd, + &mtip_regs_fops); return 0; } @@ -2726,15 +2725,11 @@ static void mtip_softirq_done_fn(struct request *rq) blk_mq_end_request(rq, cmd->status); } -static void mtip_abort_cmd(struct request *req, void *data, - bool reserved) +static void mtip_abort_cmd(struct request *req, void *data, bool reserved) { struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req); struct driver_data *dd = data; - if (!blk_mq_request_started(req)) - return; - dbg_printk(MTIP_DRV_NAME " Aborting request, tag = %d\n", req->tag); clear_bit(req->tag, dd->port->cmds_to_issue); @@ -2742,14 +2737,10 @@ static void mtip_abort_cmd(struct request *req, void *data, mtip_softirq_done_fn(req); } -static void mtip_queue_cmd(struct request *req, void *data, - bool reserved) +static void mtip_queue_cmd(struct request *req, void *data, bool reserved) { struct driver_data *dd = data; - if (!blk_mq_request_started(req)) - return; - set_bit(req->tag, dd->port->cmds_to_issue); blk_abort_request(req); } @@ -3720,7 +3711,8 @@ static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req, struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req); cmd->status = BLK_STS_TIMEOUT; - return BLK_EH_HANDLED; + blk_mq_complete_request(req); + return BLK_EH_DONE; } if (test_bit(req->tag, dd->port->cmds_to_issue)) @@ -3862,7 +3854,6 @@ skip_create_disk: blk_queue_max_hw_sectors(dd->queue, 0xffff); blk_queue_max_segment_size(dd->queue, 0x400000); blk_queue_io_min(dd->queue, 4096); - blk_queue_bounce_limit(dd->queue, dd->pdev->dma_mask); /* Signal trim support */ if (dd->trim_supp == true) { @@ -4273,7 +4264,7 @@ static int mtip_pci_probe(struct pci_dev *pdev, if (!dd->isr_workq) { dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance); rv = -ENOMEM; - goto block_initialize_err; + goto setmask_err; } memset(cpu_list, 0, sizeof(cpu_list)); @@ -4614,7 +4605,7 @@ static int __init mtip_init(void) } if (dfs_parent) { dfs_device_status = debugfs_create_file("device_status", - S_IRUGO, dfs_parent, NULL, + 0444, dfs_parent, NULL, &mtip_device_status_fops); if (IS_ERR_OR_NULL(dfs_device_status)) { pr_err("Error creating device_status node\n"); diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index afbc202ca6fd..3ed1ef8ee528 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -166,16 +166,19 @@ static ssize_t pid_show(struct device *dev, } static const struct device_attribute pid_attr = { - .attr = { .name = "pid", .mode = S_IRUGO}, + .attr = { .name = "pid", .mode = 0444}, .show = pid_show, }; static void nbd_dev_remove(struct nbd_device *nbd) { struct gendisk *disk = nbd->disk; + struct request_queue *q; + if (disk) { + q = disk->queue; del_gendisk(disk); - blk_cleanup_queue(disk->queue); + blk_cleanup_queue(q); blk_mq_free_tag_set(&nbd->tag_set); disk->private_data = NULL; put_disk(disk); @@ -213,7 +216,15 @@ static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock, } if (!nsock->dead) { kernel_sock_shutdown(nsock->sock, SHUT_RDWR); - atomic_dec(&nbd->config->live_connections); + if (atomic_dec_return(&nbd->config->live_connections) == 0) { + if (test_and_clear_bit(NBD_DISCONNECT_REQUESTED, + &nbd->config->runtime_flags)) { + set_bit(NBD_DISCONNECTED, + &nbd->config->runtime_flags); + dev_info(nbd_to_dev(nbd), + "Disconnected due to user request.\n"); + } + } } nsock->dead = true; nsock->pending = NULL; @@ -231,9 +242,22 @@ static void nbd_size_clear(struct nbd_device *nbd) static void nbd_size_update(struct nbd_device *nbd) { struct nbd_config *config = nbd->config; + struct block_device *bdev = bdget_disk(nbd->disk, 0); + + if (config->flags & NBD_FLAG_SEND_TRIM) { + nbd->disk->queue->limits.discard_granularity = config->blksize; + blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX); + } blk_queue_logical_block_size(nbd->disk->queue, config->blksize); blk_queue_physical_block_size(nbd->disk->queue, config->blksize); set_capacity(nbd->disk, config->bytesize >> 9); + if (bdev) { + if (bdev->bd_disk) + bd_set_size(bdev, config->bytesize); + else + bdev->bd_invalidated = 1; + bdput(bdev); + } kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); } @@ -243,6 +267,8 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize, struct nbd_config *config = nbd->config; config->blksize = blocksize; config->bytesize = blocksize * nr_blocks; + if (nbd->task_recv != NULL) + nbd_size_update(nbd); } static void nbd_complete_rq(struct request *req) @@ -286,13 +312,15 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, if (!refcount_inc_not_zero(&nbd->config_refs)) { cmd->status = BLK_STS_TIMEOUT; - return BLK_EH_HANDLED; + goto done; } config = nbd->config; if (config->num_connections > 1) { dev_err_ratelimited(nbd_to_dev(nbd), - "Connection timed out, retrying\n"); + "Connection timed out, retrying (%d/%d alive)\n", + atomic_read(&config->live_connections), + config->num_connections); /* * Hooray we have more connections, requeue this IO, the submit * path will put it on a real connection. @@ -314,7 +342,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, } blk_mq_requeue_request(req, true); nbd_config_put(nbd); - return BLK_EH_NOT_HANDLED; + return BLK_EH_DONE; } } else { dev_err_ratelimited(nbd_to_dev(nbd), @@ -324,8 +352,9 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, cmd->status = BLK_STS_IOERR; sock_shutdown(nbd); nbd_config_put(nbd); - - return BLK_EH_HANDLED; +done: + blk_mq_complete_request(req); + return BLK_EH_DONE; } /* @@ -647,11 +676,8 @@ static void recv_work(struct work_struct *work) static void nbd_clear_req(struct request *req, void *data, bool reserved) { - struct nbd_cmd *cmd; + struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); - if (!blk_mq_request_started(req)) - return; - cmd = blk_mq_rq_to_pdu(req); cmd->status = BLK_STS_IOERR; blk_mq_complete_request(req); } @@ -714,10 +740,9 @@ static int wait_for_reconnect(struct nbd_device *nbd) return 0; if (test_bit(NBD_DISCONNECTED, &config->runtime_flags)) return 0; - wait_event_timeout(config->conn_wait, - atomic_read(&config->live_connections), - config->dead_conn_timeout); - return atomic_read(&config->live_connections); + return wait_event_timeout(config->conn_wait, + atomic_read(&config->live_connections) > 0, + config->dead_conn_timeout) > 0; } static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) @@ -950,10 +975,6 @@ static void nbd_bdev_reset(struct block_device *bdev) if (bdev->bd_openers > 1) return; bd_set_size(bdev, 0); - if (max_part > 0) { - blkdev_reread_part(bdev); - bdev->bd_invalidated = 1; - } } static void nbd_parse_flags(struct nbd_device *nbd) @@ -1040,6 +1061,8 @@ static void nbd_config_put(struct nbd_device *nbd) nbd->config = NULL; nbd->tag_set.timeout = 0; + nbd->disk->queue->limits.discard_granularity = 0; + blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX); blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue); mutex_unlock(&nbd->config_lock); @@ -1109,7 +1132,6 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b if (ret) return ret; - bd_set_size(bdev, config->bytesize); if (max_part) bdev->bd_invalidated = 1; mutex_unlock(&nbd->config_lock); @@ -1118,7 +1140,7 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b if (ret) sock_shutdown(nbd); mutex_lock(&nbd->config_lock); - bd_set_size(bdev, 0); + nbd_bdev_reset(bdev); /* user requested, ignore socket errors */ if (test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags)) ret = 0; @@ -1269,6 +1291,9 @@ static int nbd_open(struct block_device *bdev, fmode_t mode) refcount_set(&nbd->config_refs, 1); refcount_inc(&nbd->refs); mutex_unlock(&nbd->config_lock); + bdev->bd_invalidated = 1; + } else if (nbd_disconnected(nbd->config)) { + bdev->bd_invalidated = 1; } out: mutex_unlock(&nbd_index_mutex); @@ -1490,8 +1515,8 @@ static int nbd_dev_add(int index) */ blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue); - disk->queue->limits.discard_granularity = 512; - blk_queue_max_discard_sectors(disk->queue, UINT_MAX); + disk->queue->limits.discard_granularity = 0; + blk_queue_max_discard_sectors(disk->queue, 0); blk_queue_max_segment_size(disk->queue, UINT_MAX); blk_queue_max_segments(disk->queue, USHRT_MAX); blk_queue_max_hw_sectors(disk->queue, 65536); @@ -1755,6 +1780,7 @@ static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info) } mutex_lock(&nbd->config_lock); nbd_disconnect(nbd); + nbd_clear_sock(nbd); mutex_unlock(&nbd->config_lock); if (test_and_clear_bit(NBD_HAS_CONFIG_REF, &nbd->config->runtime_flags)) @@ -2093,7 +2119,8 @@ static int __init nbd_init(void) if (nbds_max > 1UL << (MINORBITS - part_shift)) return -EINVAL; recv_workqueue = alloc_workqueue("knbd-recv", - WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); + WQ_MEM_RECLAIM | WQ_HIGHPRI | + WQ_UNBOUND, 0); if (!recv_workqueue) return -ENOMEM; diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index a76553293a31..2bdadd7f1454 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -157,23 +157,23 @@ enum { }; static int g_no_sched; -module_param_named(no_sched, g_no_sched, int, S_IRUGO); +module_param_named(no_sched, g_no_sched, int, 0444); MODULE_PARM_DESC(no_sched, "No io scheduler"); static int g_submit_queues = 1; -module_param_named(submit_queues, g_submit_queues, int, S_IRUGO); +module_param_named(submit_queues, g_submit_queues, int, 0444); MODULE_PARM_DESC(submit_queues, "Number of submission queues"); static int g_home_node = NUMA_NO_NODE; -module_param_named(home_node, g_home_node, int, S_IRUGO); +module_param_named(home_node, g_home_node, int, 0444); MODULE_PARM_DESC(home_node, "Home node for the device"); #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION static char g_timeout_str[80]; -module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), S_IRUGO); +module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), 0444); static char g_requeue_str[80]; -module_param_string(requeue, g_requeue_str, sizeof(g_requeue_str), S_IRUGO); +module_param_string(requeue, g_requeue_str, sizeof(g_requeue_str), 0444); #endif static int g_queue_mode = NULL_Q_MQ; @@ -203,27 +203,27 @@ static const struct kernel_param_ops null_queue_mode_param_ops = { .get = param_get_int, }; -device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, S_IRUGO); +device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, 0444); MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)"); static int g_gb = 250; -module_param_named(gb, g_gb, int, S_IRUGO); +module_param_named(gb, g_gb, int, 0444); MODULE_PARM_DESC(gb, "Size in GB"); static int g_bs = 512; -module_param_named(bs, g_bs, int, S_IRUGO); +module_param_named(bs, g_bs, int, 0444); MODULE_PARM_DESC(bs, "Block size (in bytes)"); static int nr_devices = 1; -module_param(nr_devices, int, S_IRUGO); +module_param(nr_devices, int, 0444); MODULE_PARM_DESC(nr_devices, "Number of devices to register"); static bool g_blocking; -module_param_named(blocking, g_blocking, bool, S_IRUGO); +module_param_named(blocking, g_blocking, bool, 0444); MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device"); static bool shared_tags; -module_param(shared_tags, bool, S_IRUGO); +module_param(shared_tags, bool, 0444); MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq"); static int g_irqmode = NULL_IRQ_SOFTIRQ; @@ -239,19 +239,19 @@ static const struct kernel_param_ops null_irqmode_param_ops = { .get = param_get_int, }; -device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, S_IRUGO); +device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, 0444); MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer"); static unsigned long g_completion_nsec = 10000; -module_param_named(completion_nsec, g_completion_nsec, ulong, S_IRUGO); +module_param_named(completion_nsec, g_completion_nsec, ulong, 0444); MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns"); static int g_hw_queue_depth = 64; -module_param_named(hw_queue_depth, g_hw_queue_depth, int, S_IRUGO); +module_param_named(hw_queue_depth, g_hw_queue_depth, int, 0444); MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); static bool g_use_per_node_hctx; -module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, S_IRUGO); +module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, 0444); MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false"); static struct nullb_device *null_alloc_dev(void); @@ -1365,7 +1365,8 @@ static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio) static enum blk_eh_timer_return null_rq_timed_out_fn(struct request *rq) { pr_info("null: rq %p timed out\n", rq); - return BLK_EH_HANDLED; + blk_mq_complete_request(rq); + return BLK_EH_DONE; } static int null_rq_prep_fn(struct request_queue *q, struct request *req) @@ -1427,7 +1428,8 @@ static void null_request_fn(struct request_queue *q) static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res) { pr_info("null: rq %p timed out\n", rq); - return BLK_EH_HANDLED; + blk_mq_complete_request(rq); + return BLK_EH_DONE; } static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index 27a44b97393a..8961b190e256 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c @@ -740,7 +740,7 @@ static int pd_special_command(struct pd_unit *disk, { struct request *rq; - rq = blk_get_request(disk->gd->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); + rq = blk_get_request(disk->gd->queue, REQ_OP_DRV_IN, 0); if (IS_ERR(rq)) return PTR_ERR(rq); diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index c61d20c9f3f8..1a2c0101cfcb 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -97,8 +97,8 @@ static int pktdev_major; static int write_congestion_on = PKT_WRITE_CONGESTION_ON; static int write_congestion_off = PKT_WRITE_CONGESTION_OFF; static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */ -static mempool_t *psd_pool; -static struct bio_set *pkt_bio_set; +static mempool_t psd_pool; +static struct bio_set pkt_bio_set; static struct class *class_pktcdvd = NULL; /* /sys/class/pktcdvd */ static struct dentry *pkt_debugfs_root = NULL; /* /sys/kernel/debug/pktcdvd */ @@ -478,8 +478,8 @@ static void pkt_debugfs_dev_new(struct pktcdvd_device *pd) if (!pd->dfs_d_root) return; - pd->dfs_f_info = debugfs_create_file("info", S_IRUGO, - pd->dfs_d_root, pd, &debug_fops); + pd->dfs_f_info = debugfs_create_file("info", 0444, + pd->dfs_d_root, pd, &debug_fops); } static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd) @@ -631,7 +631,7 @@ static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node) static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node) { rb_erase(&node->rb_node, &pd->bio_queue); - mempool_free(node, pd->rb_pool); + mempool_free(node, &pd->rb_pool); pd->bio_queue_size--; BUG_ON(pd->bio_queue_size < 0); } @@ -704,13 +704,13 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command * int ret = 0; rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? - REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, __GFP_RECLAIM); + REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0); if (IS_ERR(rq)) return PTR_ERR(rq); if (cgc->buflen) { ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, - __GFP_RECLAIM); + GFP_NOIO); if (ret) goto out; } @@ -1285,7 +1285,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) * Fill-in bvec with data from orig_bios. */ spin_lock(&pkt->lock); - bio_copy_data(pkt->w_bio, pkt->orig_bios.head); + bio_list_copy_data(pkt->w_bio, pkt->orig_bios.head); pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE); spin_unlock(&pkt->lock); @@ -2303,14 +2303,14 @@ static void pkt_end_io_read_cloned(struct bio *bio) psd->bio->bi_status = bio->bi_status; bio_put(bio); bio_endio(psd->bio); - mempool_free(psd, psd_pool); + mempool_free(psd, &psd_pool); pkt_bio_finished(pd); } static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio) { - struct bio *cloned_bio = bio_clone_fast(bio, GFP_NOIO, pkt_bio_set); - struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO); + struct bio *cloned_bio = bio_clone_fast(bio, GFP_NOIO, &pkt_bio_set); + struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO); psd->pd = pd; psd->bio = bio; @@ -2381,7 +2381,7 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio) /* * No matching packet found. Store the bio in the work queue. */ - node = mempool_alloc(pd->rb_pool, GFP_NOIO); + node = mempool_alloc(&pd->rb_pool, GFP_NOIO); node->bio = bio; spin_lock(&pd->lock); BUG_ON(pd->bio_queue_size < 0); @@ -2451,7 +2451,7 @@ static blk_qc_t pkt_make_request(struct request_queue *q, struct bio *bio) split = bio_split(bio, last_zone - bio->bi_iter.bi_sector, - GFP_NOIO, pkt_bio_set); + GFP_NOIO, &pkt_bio_set); bio_chain(split, bio); } else { split = bio; @@ -2707,9 +2707,9 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) if (!pd) goto out_mutex; - pd->rb_pool = mempool_create_kmalloc_pool(PKT_RB_POOL_SIZE, - sizeof(struct pkt_rb_node)); - if (!pd->rb_pool) + ret = mempool_init_kmalloc_pool(&pd->rb_pool, PKT_RB_POOL_SIZE, + sizeof(struct pkt_rb_node)); + if (ret) goto out_mem; INIT_LIST_HEAD(&pd->cdrw.pkt_free_list); @@ -2766,7 +2766,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) out_mem2: put_disk(disk); out_mem: - mempool_destroy(pd->rb_pool); + mempool_exit(&pd->rb_pool); kfree(pd); out_mutex: mutex_unlock(&ctl_mutex); @@ -2817,7 +2817,7 @@ static int pkt_remove_dev(dev_t pkt_dev) blk_cleanup_queue(pd->disk->queue); put_disk(pd->disk); - mempool_destroy(pd->rb_pool); + mempool_exit(&pd->rb_pool); kfree(pd); /* This is safe: open() is still holding a reference. */ @@ -2914,14 +2914,14 @@ static int __init pkt_init(void) mutex_init(&ctl_mutex); - psd_pool = mempool_create_kmalloc_pool(PSD_POOL_SIZE, - sizeof(struct packet_stacked_data)); - if (!psd_pool) - return -ENOMEM; - pkt_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0); - if (!pkt_bio_set) { - mempool_destroy(psd_pool); - return -ENOMEM; + ret = mempool_init_kmalloc_pool(&psd_pool, PSD_POOL_SIZE, + sizeof(struct packet_stacked_data)); + if (ret) + return ret; + ret = bioset_init(&pkt_bio_set, BIO_POOL_SIZE, 0, 0); + if (ret) { + mempool_exit(&psd_pool); + return ret; } ret = register_blkdev(pktdev_major, DRIVER_NAME); @@ -2954,8 +2954,8 @@ out_misc: out: unregister_blkdev(pktdev_major, DRIVER_NAME); out2: - mempool_destroy(psd_pool); - bioset_free(pkt_bio_set); + mempool_exit(&psd_pool); + bioset_exit(&pkt_bio_set); return ret; } @@ -2968,8 +2968,8 @@ static void __exit pkt_exit(void) pkt_sysfs_cleanup(); unregister_blkdev(pktdev_major, DRIVER_NAME); - mempool_destroy(psd_pool); - bioset_free(pkt_bio_set); + mempool_exit(&psd_pool); + bioset_exit(&pkt_bio_set); } MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives"); diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index 075662f2cf46..afe1508d82c6 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c @@ -465,8 +465,6 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev) priv->queue = queue; queue->queuedata = dev; - blk_queue_bounce_limit(queue, BLK_BOUNCE_HIGH); - blk_queue_max_hw_sectors(queue, dev->bounce_size >> 9); blk_queue_segment_boundary(queue, -1UL); blk_queue_dma_alignment(queue, dev->blk_size-1); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 33b36fea1d73..af354047ac4b 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -424,7 +424,7 @@ static struct workqueue_struct *rbd_wq; * single-major requires >= 0.75 version of userspace rbd utility. */ static bool single_major = true; -module_param(single_major, bool, S_IRUGO); +module_param(single_major, bool, 0444); MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)"); static ssize_t rbd_add(struct bus_type *bus, const char *buf, @@ -468,11 +468,11 @@ static ssize_t rbd_supported_features_show(struct bus_type *bus, char *buf) return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED); } -static BUS_ATTR(add, S_IWUSR, NULL, rbd_add); -static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove); -static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major); -static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major); -static BUS_ATTR(supported_features, S_IRUGO, rbd_supported_features_show, NULL); +static BUS_ATTR(add, 0200, NULL, rbd_add); +static BUS_ATTR(remove, 0200, NULL, rbd_remove); +static BUS_ATTR(add_single_major, 0200, NULL, rbd_add_single_major); +static BUS_ATTR(remove_single_major, 0200, NULL, rbd_remove_single_major); +static BUS_ATTR(supported_features, 0444, rbd_supported_features_show, NULL); static struct attribute *rbd_bus_attrs[] = { &bus_attr_add.attr, @@ -4204,22 +4204,22 @@ static ssize_t rbd_image_refresh(struct device *dev, return size; } -static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL); -static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL); -static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL); -static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL); -static DEVICE_ATTR(client_addr, S_IRUGO, rbd_client_addr_show, NULL); -static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL); -static DEVICE_ATTR(cluster_fsid, S_IRUGO, rbd_cluster_fsid_show, NULL); -static DEVICE_ATTR(config_info, S_IRUSR, rbd_config_info_show, NULL); -static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL); -static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL); -static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL); -static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL); -static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh); -static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL); -static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL); -static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL); +static DEVICE_ATTR(size, 0444, rbd_size_show, NULL); +static DEVICE_ATTR(features, 0444, rbd_features_show, NULL); +static DEVICE_ATTR(major, 0444, rbd_major_show, NULL); +static DEVICE_ATTR(minor, 0444, rbd_minor_show, NULL); +static DEVICE_ATTR(client_addr, 0444, rbd_client_addr_show, NULL); +static DEVICE_ATTR(client_id, 0444, rbd_client_id_show, NULL); +static DEVICE_ATTR(cluster_fsid, 0444, rbd_cluster_fsid_show, NULL); +static DEVICE_ATTR(config_info, 0400, rbd_config_info_show, NULL); +static DEVICE_ATTR(pool, 0444, rbd_pool_show, NULL); +static DEVICE_ATTR(pool_id, 0444, rbd_pool_id_show, NULL); +static DEVICE_ATTR(name, 0444, rbd_name_show, NULL); +static DEVICE_ATTR(image_id, 0444, rbd_image_id_show, NULL); +static DEVICE_ATTR(refresh, 0200, NULL, rbd_image_refresh); +static DEVICE_ATTR(current_snap, 0444, rbd_snap_show, NULL); +static DEVICE_ATTR(snap_id, 0444, rbd_snap_id_show, NULL); +static DEVICE_ATTR(parent, 0444, rbd_parent_show, NULL); static struct attribute *rbd_attrs[] = { &dev_attr_size.attr, diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c index 34997df132e2..09537bee387f 100644 --- a/drivers/block/rsxx/core.c +++ b/drivers/block/rsxx/core.c @@ -247,19 +247,19 @@ static void rsxx_debugfs_dev_new(struct rsxx_cardinfo *card) if (IS_ERR_OR_NULL(card->debugfs_dir)) goto failed_debugfs_dir; - debugfs_stats = debugfs_create_file("stats", S_IRUGO, + debugfs_stats = debugfs_create_file("stats", 0444, card->debugfs_dir, card, &debugfs_stats_fops); if (IS_ERR_OR_NULL(debugfs_stats)) goto failed_debugfs_stats; - debugfs_pci_regs = debugfs_create_file("pci_regs", S_IRUGO, + debugfs_pci_regs = debugfs_create_file("pci_regs", 0444, card->debugfs_dir, card, &debugfs_pci_regs_fops); if (IS_ERR_OR_NULL(debugfs_pci_regs)) goto failed_debugfs_pci_regs; - debugfs_cram = debugfs_create_file("cram", S_IRUGO | S_IWUSR, + debugfs_cram = debugfs_create_file("cram", 0644, card->debugfs_dir, card, &debugfs_cram_fops); if (IS_ERR_OR_NULL(debugfs_cram)) diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c index 08586dc14e85..4d90e5eba2f5 100644 --- a/drivers/block/sx8.c +++ b/drivers/block/sx8.c @@ -567,7 +567,7 @@ static struct carm_request *carm_get_special(struct carm_host *host) if (!crq) return NULL; - rq = blk_get_request(host->oob_q, REQ_OP_DRV_OUT, GFP_KERNEL); + rq = blk_get_request(host->oob_q, REQ_OP_DRV_OUT, 0); if (IS_ERR(rq)) { spin_lock_irqsave(&host->lock, flags); carm_put_request(host, crq); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 4a07593c2efd..23752dc99b00 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -298,7 +298,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str) struct request *req; int err; - req = blk_get_request(q, REQ_OP_DRV_IN, GFP_KERNEL); + req = blk_get_request(q, REQ_OP_DRV_IN, 0); if (IS_ERR(req)) return PTR_ERR(req); @@ -371,7 +371,7 @@ static ssize_t virtblk_serial_show(struct device *dev, return err; } -static DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL); +static DEVICE_ATTR(serial, 0444, virtblk_serial_show, NULL); /* The queue's logical block size must be set before calling this */ static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize) @@ -576,10 +576,10 @@ virtblk_cache_type_show(struct device *dev, struct device_attribute *attr, } static const struct device_attribute dev_attr_cache_type_ro = - __ATTR(cache_type, S_IRUGO, + __ATTR(cache_type, 0444, virtblk_cache_type_show, NULL); static const struct device_attribute dev_attr_cache_type_rw = - __ATTR(cache_type, S_IRUGO|S_IWUSR, + __ATTR(cache_type, 0644, virtblk_cache_type_show, virtblk_cache_type_store); static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq, diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 987d665e82de..b55b245e8052 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -98,7 +98,7 @@ MODULE_PARM_DESC(max_queues, * backend, 4KB page granularity is used. */ unsigned int xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER; -module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, S_IRUGO); +module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444); MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring"); /* * The LRU mechanism to clean the lists of persistent grants needs to diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 21c1be1eb226..66412eededda 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -367,7 +367,7 @@ int __init xen_blkif_interface_init(void) out: \ return sprintf(buf, format, result); \ } \ - static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) + static DEVICE_ATTR(name, 0444, show_##name, NULL) VBD_SHOW_ALLRING(oo_req, "%llu\n"); VBD_SHOW_ALLRING(rd_req, "%llu\n"); @@ -403,7 +403,7 @@ static const struct attribute_group xen_vbdstat_group = { \ return sprintf(buf, format, ##args); \ } \ - static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) + static DEVICE_ATTR(name, 0444, show_##name, NULL) VBD_SHOW(physical_device, "%x:%x\n", be->major, be->minor); VBD_SHOW(mode, "%s\n", be->mode); diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 2a8e7813bd1a..ae00a82f350b 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -129,13 +129,12 @@ static const struct block_device_operations xlvbd_block_fops; */ static unsigned int xen_blkif_max_segments = 32; -module_param_named(max_indirect_segments, xen_blkif_max_segments, uint, - S_IRUGO); +module_param_named(max_indirect_segments, xen_blkif_max_segments, uint, 0444); MODULE_PARM_DESC(max_indirect_segments, "Maximum amount of segments in indirect requests (default is 32)"); static unsigned int xen_blkif_max_queues = 4; -module_param_named(max_queues, xen_blkif_max_queues, uint, S_IRUGO); +module_param_named(max_queues, xen_blkif_max_queues, uint, 0444); MODULE_PARM_DESC(max_queues, "Maximum number of hardware queues/rings used per virtual disk"); /* @@ -143,7 +142,7 @@ MODULE_PARM_DESC(max_queues, "Maximum number of hardware queues/rings used per v * backend, 4KB page granularity is used. */ static unsigned int xen_blkif_max_ring_order; -module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, S_IRUGO); +module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444); MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring"); #define BLK_RING_SIZE(info) \ |