diff options
author | Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 2015-04-27 16:32:45 +0300 |
---|---|---|
committer | Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 2015-04-27 16:32:45 +0300 |
commit | b3e5ced63e051e8f911b795ac5b06229a5328f7b (patch) | |
tree | e63badb76509839ec948431859255923b6e2a09c /block | |
parent | e183201b9e917daf2530b637b2f34f1d5afb934d (diff) | |
parent | b787f68c36d49bb1d9236f403813641efa74a031 (diff) | |
download | linux-b3e5ced63e051e8f911b795ac5b06229a5328f7b.tar.xz |
Merge tag 'v4.1-rc1' into patchwork
Linux 4.1-rc1
* tag 'v4.1-rc1': (11651 commits)
Linux 4.1-rc1
x86_64, asm: Work around AMD SYSRET SS descriptor attribute issue
v4l: xilinx: fix for include file movement
platform/chrome: chromeos_laptop - instantiate Atmel at primary address
RCU pathwalk breakage when running into a symlink overmounting something
fix I_DIO_WAKEUP definition
direct-io: only inc/dec inode->i_dio_count for file systems
fs/9p: fix readdir()
Btrfs: prevent list corruption during free space cache processing
toshiba_acpi: Do not register vendor backlight when acpi_video bl is available
x86: fix special __probe_kernel_write() tail zeroing case
crypto: img-hash - CRYPTO_DEV_IMGTEC_HASH should depend on HAS_DMA
crypto: x86/sha512_ssse3 - fixup for asm function prototype change
nios2: rework cache
nios2: Add types.h header required for __u32 type
ALSA: hda - fix headset mic detection problem for one more machine
eth: bf609 eth clock: add pclk clock for stmmac driver probe
blackfin: Wire up missing syscalls
Btrfs: fix inode cache writeout
ACPI / scan: Add a scan handler for PRP0001
...
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 19 | ||||
-rw-r--r-- | block/blk-map.c | 6 | ||||
-rw-r--r-- | block/blk-merge.c | 2 | ||||
-rw-r--r-- | block/blk-mq-sysfs.c | 1 | ||||
-rw-r--r-- | block/blk-mq-tag.c | 6 | ||||
-rw-r--r-- | block/blk-mq.c | 75 | ||||
-rw-r--r-- | block/blk-settings.c | 6 | ||||
-rw-r--r-- | block/scsi_ioctl.c | 12 |
8 files changed, 77 insertions, 50 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 794c3e7f01cf..fd154b94447a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -557,6 +557,18 @@ void blk_cleanup_queue(struct request_queue *q) } EXPORT_SYMBOL(blk_cleanup_queue); +/* Allocate memory local to the request queue */ +static void *alloc_request_struct(gfp_t gfp_mask, void *data) +{ + int nid = (int)(long)data; + return kmem_cache_alloc_node(request_cachep, gfp_mask, nid); +} + +static void free_request_struct(void *element, void *unused) +{ + kmem_cache_free(request_cachep, element); +} + int blk_init_rl(struct request_list *rl, struct request_queue *q, gfp_t gfp_mask) { @@ -569,9 +581,10 @@ int blk_init_rl(struct request_list *rl, struct request_queue *q, init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); - rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, - mempool_free_slab, request_cachep, - gfp_mask, q->node); + rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, alloc_request_struct, + free_request_struct, + (void *)(long)q->node, gfp_mask, + q->node); if (!rl->rq_pool) return -ENOMEM; diff --git a/block/blk-map.c b/block/blk-map.c index b8d2725324a6..da310a105429 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -124,10 +124,10 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, { struct iovec iov; struct iov_iter i; + int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i); - iov.iov_base = ubuf; - iov.iov_len = len; - iov_iter_init(&i, rq_data_dir(rq), &iov, 1, len); + if (unlikely(ret < 0)) + return ret; return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); } diff --git a/block/blk-merge.c b/block/blk-merge.c index fc1ff3b1ea1f..fd3fee81c23c 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -592,7 +592,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) { struct bio_vec *bprev; - bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1]; + bprev = &rq->biotail->bi_io_vec[rq->biotail->bi_vcnt - 1]; if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset)) return false; } diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index 1630a20d5dcf..b79685e06b70 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -436,6 +436,7 @@ int blk_mq_register_disk(struct gendisk *disk) return 0; } +EXPORT_SYMBOL_GPL(blk_mq_register_disk); void blk_mq_sysfs_unregister(struct request_queue *q) { diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index d53a764b05ea..be3290cc0644 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -278,9 +278,11 @@ static int bt_get(struct blk_mq_alloc_data *data, /* * We're out of tags on this hardware queue, kick any * pending IO submits before going to sleep waiting for - * some to complete. + * some to complete. Note that hctx can be NULL here for + * reserved tag allocation. */ - blk_mq_run_hw_queue(hctx, false); + if (hctx) + blk_mq_run_hw_queue(hctx, false); /* * Retry tag allocation after running the hardware queue, diff --git a/block/blk-mq.c b/block/blk-mq.c index 4f4bea21052e..ade8a2d1b0aa 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -33,7 +33,6 @@ static DEFINE_MUTEX(all_q_mutex); static LIST_HEAD(all_q_list); static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx); -static void blk_mq_run_queues(struct request_queue *q); /* * Check if any of the ctx's have pending work in this hardware queue @@ -42,7 +41,7 @@ static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) { unsigned int i; - for (i = 0; i < hctx->ctx_map.map_size; i++) + for (i = 0; i < hctx->ctx_map.size; i++) if (hctx->ctx_map.map[i].word) return true; @@ -78,7 +77,7 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word); } -static int blk_mq_queue_enter(struct request_queue *q) +static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp) { while (true) { int ret; @@ -86,6 +85,9 @@ static int blk_mq_queue_enter(struct request_queue *q) if (percpu_ref_tryget_live(&q->mq_usage_counter)) return 0; + if (!(gfp & __GFP_WAIT)) + return -EBUSY; + ret = wait_event_interruptible(q->mq_freeze_wq, !q->mq_freeze_depth || blk_queue_dying(q)); if (blk_queue_dying(q)) @@ -118,7 +120,7 @@ void blk_mq_freeze_queue_start(struct request_queue *q) if (freeze) { percpu_ref_kill(&q->mq_usage_counter); - blk_mq_run_queues(q); + blk_mq_run_hw_queues(q, false); } } EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start); @@ -257,7 +259,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, struct blk_mq_alloc_data alloc_data; int ret; - ret = blk_mq_queue_enter(q); + ret = blk_mq_queue_enter(q, gfp); if (ret) return ERR_PTR(ret); @@ -728,7 +730,7 @@ static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) struct blk_mq_ctx *ctx; int i; - for (i = 0; i < hctx->ctx_map.map_size; i++) { + for (i = 0; i < hctx->ctx_map.size; i++) { struct blk_align_bitmap *bm = &hctx->ctx_map.map[i]; unsigned int off, bit; @@ -904,7 +906,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) &hctx->run_work, 0); } -static void blk_mq_run_queues(struct request_queue *q) +void blk_mq_run_hw_queues(struct request_queue *q, bool async) { struct blk_mq_hw_ctx *hctx; int i; @@ -915,9 +917,10 @@ static void blk_mq_run_queues(struct request_queue *q) test_bit(BLK_MQ_S_STOPPED, &hctx->state)) continue; - blk_mq_run_hw_queue(hctx, false); + blk_mq_run_hw_queue(hctx, async); } } +EXPORT_SYMBOL(blk_mq_run_hw_queues); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) { @@ -1186,7 +1189,7 @@ static struct request *blk_mq_map_request(struct request_queue *q, int rw = bio_data_dir(bio); struct blk_mq_alloc_data alloc_data; - if (unlikely(blk_mq_queue_enter(q))) { + if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) { bio_endio(bio, -EIO); return NULL; } @@ -1457,7 +1460,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set, do { page = alloc_pages_node(set->numa_node, - GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY, + GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO, this_order); if (page) break; @@ -1479,8 +1482,6 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set, left -= to_do * rq_size; for (j = 0; j < to_do; j++) { tags->rqs[i] = p; - tags->rqs[i]->atomic_flags = 0; - tags->rqs[i]->cmd_flags = 0; if (set->ops->init_request) { if (set->ops->init_request(set->driver_data, tags->rqs[i], hctx_idx, i, @@ -1519,8 +1520,6 @@ static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node) if (!bitmap->map) return -ENOMEM; - bitmap->map_size = num_maps; - total = nr_cpu_ids; for (i = 0; i < num_maps; i++) { bitmap->map[i].depth = min(total, bitmap->bits_per_word); @@ -1761,8 +1760,6 @@ static void blk_mq_init_cpu_queues(struct request_queue *q, continue; hctx = q->mq_ops->map_queue(q, i); - cpumask_set_cpu(i, hctx->cpumask); - hctx->nr_ctx++; /* * Set local node, IFF we have more than one hw queue. If @@ -1799,6 +1796,8 @@ static void blk_mq_map_swqueue(struct request_queue *q) } queue_for_each_hw_ctx(q, hctx, i) { + struct blk_mq_ctxmap *map = &hctx->ctx_map; + /* * If no software queues are mapped to this hardware queue, * disable it and free the request entries. @@ -1815,6 +1814,13 @@ static void blk_mq_map_swqueue(struct request_queue *q) } /* + * Set the map size to the number of mapped software queues. + * This is more accurate and more efficient than looping + * over all possibly mapped software queues. + */ + map->size = DIV_ROUND_UP(hctx->nr_ctx, map->bits_per_word); + + /* * Initialize batch roundrobin counts */ hctx->next_cpu = cpumask_first(hctx->cpumask); @@ -1891,9 +1897,25 @@ void blk_mq_release(struct request_queue *q) struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) { + struct request_queue *uninit_q, *q; + + uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node); + if (!uninit_q) + return ERR_PTR(-ENOMEM); + + q = blk_mq_init_allocated_queue(set, uninit_q); + if (IS_ERR(q)) + blk_cleanup_queue(uninit_q); + + return q; +} +EXPORT_SYMBOL(blk_mq_init_queue); + +struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, + struct request_queue *q) +{ struct blk_mq_hw_ctx **hctxs; struct blk_mq_ctx __percpu *ctx; - struct request_queue *q; unsigned int *map; int i; @@ -1928,20 +1950,16 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) hctxs[i]->queue_num = i; } - q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node); - if (!q) - goto err_hctxs; - /* * Init percpu_ref in atomic mode so that it's faster to shutdown. * See blk_register_queue() for details. */ if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release, PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) - goto err_map; + goto err_hctxs; setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); - blk_queue_rq_timeout(q, 30000); + blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30000); q->nr_queues = nr_cpu_ids; q->nr_hw_queues = set->nr_hw_queues; @@ -1967,9 +1985,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) else blk_queue_make_request(q, blk_sq_make_request); - if (set->timeout) - blk_queue_rq_timeout(q, set->timeout); - /* * Do this after blk_queue_make_request() overrides it... */ @@ -1981,7 +1996,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) blk_mq_init_cpu_queues(q, set->nr_hw_queues); if (blk_mq_init_hw_queues(q, set)) - goto err_hw; + goto err_hctxs; mutex_lock(&all_q_mutex); list_add_tail(&q->all_q_node, &all_q_list); @@ -1993,8 +2008,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) return q; -err_hw: - blk_cleanup_queue(q); err_hctxs: kfree(map); for (i = 0; i < set->nr_hw_queues; i++) { @@ -2009,7 +2022,7 @@ err_percpu: free_percpu(ctx); return ERR_PTR(-ENOMEM); } -EXPORT_SYMBOL(blk_mq_init_queue); +EXPORT_SYMBOL(blk_mq_init_allocated_queue); void blk_mq_free_queue(struct request_queue *q) { @@ -2161,7 +2174,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) return -EINVAL; - if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue) + if (!set->ops->queue_rq || !set->ops->map_queue) return -EINVAL; if (set->queue_depth > BLK_MQ_MAX_DEPTH) { diff --git a/block/blk-settings.c b/block/blk-settings.c index 6ed2cbe5e8c9..12600bfffca9 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -585,7 +585,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, b->physical_block_size); t->io_min = max(t->io_min, b->io_min); - t->io_opt = lcm(t->io_opt, b->io_opt); + t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); t->cluster &= b->cluster; t->discard_zeroes_data &= b->discard_zeroes_data; @@ -616,7 +616,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, b->raid_partial_stripes_expensive); /* Find lowest common alignment_offset */ - t->alignment_offset = lcm(t->alignment_offset, alignment) + t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment) % max(t->physical_block_size, t->io_min); /* Verify that new alignment_offset is on a logical block boundary */ @@ -643,7 +643,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, b->max_discard_sectors); t->discard_granularity = max(t->discard_granularity, b->discard_granularity); - t->discard_alignment = lcm(t->discard_alignment, alignment) % + t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) % t->discard_granularity; } diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index e1f71c396193..55b6f15dac90 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -335,16 +335,14 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk, struct iov_iter i; struct iovec *iov = NULL; - ret = rw_copy_check_uvector(-1, hdr->dxferp, hdr->iovec_count, - 0, NULL, &iov); - if (ret < 0) { - kfree(iov); + ret = import_iovec(rq_data_dir(rq), + hdr->dxferp, hdr->iovec_count, + 0, &iov, &i); + if (ret < 0) goto out_free_cdb; - } /* SG_IO howto says that the shorter of the two wins */ - iov_iter_init(&i, rq_data_dir(rq), iov, hdr->iovec_count, - min_t(unsigned, ret, hdr->dxfer_len)); + iov_iter_truncate(&i, hdr->dxfer_len); ret = blk_rq_map_user_iov(q, rq, NULL, &i, GFP_KERNEL); kfree(iov); |