diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-06-13 04:28:00 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-06-13 04:28:00 +0300 |
commit | b08fc5277aaa1d8ea15470d38bf36f19dfb0e125 (patch) | |
tree | 1910dc474cb1ede95581dd9faa81a3bebeded0dc /drivers/md | |
parent | 4597fcff07044d89c646d0c5d8b42cd976d966a1 (diff) | |
parent | 9d2a789c1db75d0f55b14fa57bec548d94332ad8 (diff) | |
download | linux-b08fc5277aaa1d8ea15470d38bf36f19dfb0e125.tar.xz |
Merge tag 'overflow-v4.18-rc1-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux
Pull more overflow updates from Kees Cook:
"The rest of the overflow changes for v4.18-rc1.
This includes the explicit overflow fixes from Silvio, further
struct_size() conversions from Matthew, and a bug fix from Dan.
But the bulk of it is the treewide conversions to use either the
2-factor argument allocators (e.g. kmalloc(a * b, ...) into
kmalloc_array(a, b, ...) or the array_size() macros (e.g. vmalloc(a *
b) into vmalloc(array_size(a, b)).
Coccinelle was fighting me on several fronts, so I've done a bunch of
manual whitespace updates in the patches as well.
Summary:
- Error path bug fix for overflow tests (Dan)
- Additional struct_size() conversions (Matthew, Kees)
- Explicitly reported overflow fixes (Silvio, Kees)
- Add missing kvcalloc() function (Kees)
- Treewide conversions of allocators to use either 2-factor argument
variant when available, or array_size() and array3_size() as needed
(Kees)"
* tag 'overflow-v4.18-rc1-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux: (26 commits)
treewide: Use array_size in f2fs_kvzalloc()
treewide: Use array_size() in f2fs_kzalloc()
treewide: Use array_size() in f2fs_kmalloc()
treewide: Use array_size() in sock_kmalloc()
treewide: Use array_size() in kvzalloc_node()
treewide: Use array_size() in vzalloc_node()
treewide: Use array_size() in vzalloc()
treewide: Use array_size() in vmalloc()
treewide: devm_kzalloc() -> devm_kcalloc()
treewide: devm_kmalloc() -> devm_kmalloc_array()
treewide: kvzalloc() -> kvcalloc()
treewide: kvmalloc() -> kvmalloc_array()
treewide: kzalloc_node() -> kcalloc_node()
treewide: kzalloc() -> kcalloc()
treewide: kmalloc() -> kmalloc_array()
mm: Introduce kvcalloc()
video: uvesafb: Fix integer overflow in allocation
UBIFS: Fix potential integer overflow in allocation
leds: Use struct_size() in allocation
Convert intel uncore to struct_size
...
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/bcache/super.c | 11 | ||||
-rw-r--r-- | drivers/md/bcache/sysfs.c | 3 | ||||
-rw-r--r-- | drivers/md/dm-cache-policy-smq.c | 4 | ||||
-rw-r--r-- | drivers/md/dm-crypt.c | 5 | ||||
-rw-r--r-- | drivers/md/dm-integrity.c | 18 | ||||
-rw-r--r-- | drivers/md/dm-region-hash.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-snap.c | 9 | ||||
-rw-r--r-- | drivers/md/dm-stats.c | 4 | ||||
-rw-r--r-- | drivers/md/dm-switch.c | 3 | ||||
-rw-r--r-- | drivers/md/dm-table.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-thin.c | 4 | ||||
-rw-r--r-- | drivers/md/dm-verity-target.c | 5 | ||||
-rw-r--r-- | drivers/md/md-bitmap.c | 6 | ||||
-rw-r--r-- | drivers/md/md-cluster.c | 6 | ||||
-rw-r--r-- | drivers/md/md-multipath.c | 3 | ||||
-rw-r--r-- | drivers/md/raid0.c | 10 | ||||
-rw-r--r-- | drivers/md/raid1.c | 13 | ||||
-rw-r--r-- | drivers/md/raid10.c | 15 | ||||
-rw-r--r-- | drivers/md/raid5.c | 15 |
19 files changed, 80 insertions, 58 deletions
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index a31e55bcc4e5..fa4058e43202 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1715,7 +1715,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) iter_size = (sb->bucket_size / sb->block_size + 1) * sizeof(struct btree_iter_set); - if (!(c->devices = kzalloc(c->nr_uuids * sizeof(void *), GFP_KERNEL)) || + if (!(c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL)) || mempool_init_slab_pool(&c->search, 32, bch_search_cache) || mempool_init_kmalloc_pool(&c->bio_meta, 2, sizeof(struct bbio) + sizeof(struct bio_vec) * @@ -2041,10 +2041,11 @@ static int cache_alloc(struct cache *ca) !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) || !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || !init_heap(&ca->heap, free << 3, GFP_KERNEL) || - !(ca->buckets = vzalloc(sizeof(struct bucket) * - ca->sb.nbuckets)) || - !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) * - 2, GFP_KERNEL)) || + !(ca->buckets = vzalloc(array_size(sizeof(struct bucket), + ca->sb.nbuckets))) || + !(ca->prio_buckets = kzalloc(array3_size(sizeof(uint64_t), + prio_buckets(ca), 2), + GFP_KERNEL)) || !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca))) return -ENOMEM; diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 8ccbc8f3b3af..225b15aa0340 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -881,7 +881,8 @@ SHOW(__bch_cache) uint16_t q[31], *p, *cached; ssize_t ret; - cached = p = vmalloc(ca->sb.nbuckets * sizeof(uint16_t)); + cached = p = vmalloc(array_size(sizeof(uint16_t), + ca->sb.nbuckets)); if (!p) return -ENOMEM; diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c index 4ab23d0075f6..1b5b9ad9e492 100644 --- a/drivers/md/dm-cache-policy-smq.c +++ b/drivers/md/dm-cache-policy-smq.c @@ -69,7 +69,7 @@ static int space_init(struct entry_space *es, unsigned nr_entries) return 0; } - es->begin = vzalloc(sizeof(struct entry) * nr_entries); + es->begin = vzalloc(array_size(nr_entries, sizeof(struct entry))); if (!es->begin) return -ENOMEM; @@ -588,7 +588,7 @@ static int h_init(struct smq_hash_table *ht, struct entry_space *es, unsigned nr nr_buckets = roundup_pow_of_two(max(nr_entries / 4u, 16u)); ht->hash_bits = __ffs(nr_buckets); - ht->buckets = vmalloc(sizeof(*ht->buckets) * nr_buckets); + ht->buckets = vmalloc(array_size(nr_buckets, sizeof(*ht->buckets))); if (!ht->buckets) return -ENOMEM; diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 4939fbc34ff2..b61b069c33af 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1878,8 +1878,9 @@ static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode) unsigned i; int err; - cc->cipher_tfm.tfms = kzalloc(cc->tfms_count * - sizeof(struct crypto_skcipher *), GFP_KERNEL); + cc->cipher_tfm.tfms = kcalloc(cc->tfms_count, + sizeof(struct crypto_skcipher *), + GFP_KERNEL); if (!cc->cipher_tfm.tfms) return -ENOMEM; diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index fc68c7aaef8e..86438b2f10dd 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -2448,7 +2448,9 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int struct scatterlist **sl; unsigned i; - sl = kvmalloc(ic->journal_sections * sizeof(struct scatterlist *), GFP_KERNEL | __GFP_ZERO); + sl = kvmalloc_array(ic->journal_sections, + sizeof(struct scatterlist *), + GFP_KERNEL | __GFP_ZERO); if (!sl) return NULL; @@ -2464,7 +2466,8 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int n_pages = (end_index - start_index + 1); - s = kvmalloc(n_pages * sizeof(struct scatterlist), GFP_KERNEL); + s = kvmalloc_array(n_pages, sizeof(struct scatterlist), + GFP_KERNEL); if (!s) { dm_integrity_free_journal_scatterlist(ic, sl); return NULL; @@ -2643,7 +2646,9 @@ static int create_journal(struct dm_integrity_c *ic, char **error) goto bad; } - sg = kvmalloc((ic->journal_pages + 1) * sizeof(struct scatterlist), GFP_KERNEL); + sg = kvmalloc_array(ic->journal_pages + 1, + sizeof(struct scatterlist), + GFP_KERNEL); if (!sg) { *error = "Unable to allocate sg list"; r = -ENOMEM; @@ -2709,7 +2714,9 @@ static int create_journal(struct dm_integrity_c *ic, char **error) r = -ENOMEM; goto bad; } - ic->sk_requests = kvmalloc(ic->journal_sections * sizeof(struct skcipher_request *), GFP_KERNEL | __GFP_ZERO); + ic->sk_requests = kvmalloc_array(ic->journal_sections, + sizeof(struct skcipher_request *), + GFP_KERNEL | __GFP_ZERO); if (!ic->sk_requests) { *error = "Unable to allocate sk requests"; r = -ENOMEM; @@ -2743,7 +2750,8 @@ static int create_journal(struct dm_integrity_c *ic, char **error) r = -ENOMEM; goto bad; } - section_req->iv = kmalloc(ivsize * 2, GFP_KERNEL); + section_req->iv = kmalloc_array(ivsize, 2, + GFP_KERNEL); if (!section_req->iv) { skcipher_request_free(section_req); *error = "Unable to allocate iv"; diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c index c832ec398f02..1f760451e6f4 100644 --- a/drivers/md/dm-region-hash.c +++ b/drivers/md/dm-region-hash.c @@ -203,7 +203,7 @@ struct dm_region_hash *dm_region_hash_create( rh->shift = RH_HASH_SHIFT; rh->prime = RH_HASH_MULT; - rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets)); + rh->buckets = vmalloc(array_size(nr_buckets, sizeof(*rh->buckets))); if (!rh->buckets) { DMERR("unable to allocate region hash bucket memory"); kfree(rh); diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index f745404da721..97de7a7334d4 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -326,8 +326,8 @@ static int init_origin_hash(void) { int i; - _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), - GFP_KERNEL); + _origins = kmalloc_array(ORIGIN_HASH_SIZE, sizeof(struct list_head), + GFP_KERNEL); if (!_origins) { DMERR("unable to allocate memory for _origins"); return -ENOMEM; @@ -335,8 +335,9 @@ static int init_origin_hash(void) for (i = 0; i < ORIGIN_HASH_SIZE; i++) INIT_LIST_HEAD(_origins + i); - _dm_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), - GFP_KERNEL); + _dm_origins = kmalloc_array(ORIGIN_HASH_SIZE, + sizeof(struct list_head), + GFP_KERNEL); if (!_dm_origins) { DMERR("unable to allocate memory for _dm_origins"); kfree(_origins); diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c index 56059fb56e2d..21de30b4e2a1 100644 --- a/drivers/md/dm-stats.c +++ b/drivers/md/dm-stats.c @@ -915,7 +915,9 @@ static int parse_histogram(const char *h, unsigned *n_histogram_entries, if (*q == ',') (*n_histogram_entries)++; - *histogram_boundaries = kmalloc(*n_histogram_entries * sizeof(unsigned long long), GFP_KERNEL); + *histogram_boundaries = kmalloc_array(*n_histogram_entries, + sizeof(unsigned long long), + GFP_KERNEL); if (!*histogram_boundaries) return -ENOMEM; diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c index 7924a6a33ddc..fae35caf3672 100644 --- a/drivers/md/dm-switch.c +++ b/drivers/md/dm-switch.c @@ -114,7 +114,8 @@ static int alloc_region_table(struct dm_target *ti, unsigned nr_paths) return -EINVAL; } - sctx->region_table = vmalloc(nr_slots * sizeof(region_table_slot_t)); + sctx->region_table = vmalloc(array_size(nr_slots, + sizeof(region_table_slot_t))); if (!sctx->region_table) { ti->error = "Cannot allocate region table"; return -ENOMEM; diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index caa51dd351b6..938766794c2e 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -561,7 +561,7 @@ static char **realloc_argv(unsigned *size, char **old_argv) new_size = 8; gfp = GFP_NOIO; } - argv = kmalloc(new_size * sizeof(*argv), gfp); + argv = kmalloc_array(new_size, sizeof(*argv), gfp); if (argv) { memcpy(argv, old_argv, *size * sizeof(*argv)); *size = new_size; diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 6cf9c9364103..7945238df1c0 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -2940,7 +2940,9 @@ static struct pool *pool_create(struct mapped_device *pool_md, goto bad_mapping_pool; } - pool->cell_sort_array = vmalloc(sizeof(*pool->cell_sort_array) * CELL_SORT_ARRAY_SIZE); + pool->cell_sort_array = + vmalloc(array_size(CELL_SORT_ARRAY_SIZE, + sizeof(*pool->cell_sort_array))); if (!pool->cell_sort_array) { *error = "Error allocating cell sort array"; err_p = ERR_PTR(-ENOMEM); diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index fc893f636a98..12decdbd722d 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -797,8 +797,9 @@ static int verity_alloc_most_once(struct dm_verity *v) return -E2BIG; } - v->validated_blocks = kvzalloc(BITS_TO_LONGS(v->data_blocks) * - sizeof(unsigned long), GFP_KERNEL); + v->validated_blocks = kvcalloc(BITS_TO_LONGS(v->data_blocks), + sizeof(unsigned long), + GFP_KERNEL); if (!v->validated_blocks) { ti->error = "failed to allocate bitset for check_at_most_once"; return -ENOMEM; diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index 239c7bb3929b..f983c3fdf204 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -789,8 +789,8 @@ static int bitmap_storage_alloc(struct bitmap_storage *store, num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE); offset = slot_number * num_pages; - store->filemap = kmalloc(sizeof(struct page *) - * num_pages, GFP_KERNEL); + store->filemap = kmalloc_array(num_pages, sizeof(struct page *), + GFP_KERNEL); if (!store->filemap) return -ENOMEM; @@ -2117,7 +2117,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks, pages = DIV_ROUND_UP(chunks, PAGE_COUNTER_RATIO); - new_bp = kzalloc(pages * sizeof(*new_bp), GFP_KERNEL); + new_bp = kcalloc(pages, sizeof(*new_bp), GFP_KERNEL); ret = -ENOMEM; if (!new_bp) { bitmap_file_unmap(&store); diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 79bfbc840385..021cbf9ef1bf 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -1380,9 +1380,9 @@ static int lock_all_bitmaps(struct mddev *mddev) char str[64]; struct md_cluster_info *cinfo = mddev->cluster_info; - cinfo->other_bitmap_lockres = kzalloc((mddev->bitmap_info.nodes - 1) * - sizeof(struct dlm_lock_resource *), - GFP_KERNEL); + cinfo->other_bitmap_lockres = + kcalloc(mddev->bitmap_info.nodes - 1, + sizeof(struct dlm_lock_resource *), GFP_KERNEL); if (!cinfo->other_bitmap_lockres) { pr_err("md: can't alloc mem for other bitmap locks\n"); return 0; diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c index f71fcdb9b39c..881487de1e25 100644 --- a/drivers/md/md-multipath.c +++ b/drivers/md/md-multipath.c @@ -399,7 +399,8 @@ static int multipath_run (struct mddev *mddev) if (!conf) goto out; - conf->multipaths = kzalloc(sizeof(struct multipath_info)*mddev->raid_disks, + conf->multipaths = kcalloc(mddev->raid_disks, + sizeof(struct multipath_info), GFP_KERNEL); if (!conf->multipaths) goto out_free_conf; diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 65ae47a02218..ac1cffd2a09b 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -159,12 +159,14 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) } err = -ENOMEM; - conf->strip_zone = kzalloc(sizeof(struct strip_zone)* - conf->nr_strip_zones, GFP_KERNEL); + conf->strip_zone = kcalloc(conf->nr_strip_zones, + sizeof(struct strip_zone), + GFP_KERNEL); if (!conf->strip_zone) goto abort; - conf->devlist = kzalloc(sizeof(struct md_rdev*)* - conf->nr_strip_zones*mddev->raid_disks, + conf->devlist = kzalloc(array3_size(sizeof(struct md_rdev *), + conf->nr_strip_zones, + mddev->raid_disks), GFP_KERNEL); if (!conf->devlist) goto abort; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 0b344d087581..8e05c1092aef 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -126,8 +126,8 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) if (!r1_bio) return NULL; - rps = kmalloc(sizeof(struct resync_pages) * pi->raid_disks, - gfp_flags); + rps = kmalloc_array(pi->raid_disks, sizeof(struct resync_pages), + gfp_flags); if (!rps) goto out_free_r1bio; @@ -2936,9 +2936,9 @@ static struct r1conf *setup_conf(struct mddev *mddev) if (!conf->barrier) goto abort; - conf->mirrors = kzalloc(sizeof(struct raid1_info) - * mddev->raid_disks * 2, - GFP_KERNEL); + conf->mirrors = kzalloc(array3_size(sizeof(struct raid1_info), + mddev->raid_disks, 2), + GFP_KERNEL); if (!conf->mirrors) goto abort; @@ -3241,7 +3241,8 @@ static int raid1_reshape(struct mddev *mddev) kfree(newpoolinfo); return ret; } - newmirrors = kzalloc(sizeof(struct raid1_info) * raid_disks * 2, + newmirrors = kzalloc(array3_size(sizeof(struct raid1_info), + raid_disks, 2), GFP_KERNEL); if (!newmirrors) { kfree(newpoolinfo); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 1147ae59e3b6..478cf446827f 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -175,7 +175,7 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) nalloc_rp = nalloc; else nalloc_rp = nalloc * 2; - rps = kmalloc(sizeof(struct resync_pages) * nalloc_rp, gfp_flags); + rps = kmalloc_array(nalloc_rp, sizeof(struct resync_pages), gfp_flags); if (!rps) goto out_free_r10bio; @@ -3688,8 +3688,8 @@ static struct r10conf *setup_conf(struct mddev *mddev) goto out; /* FIXME calc properly */ - conf->mirrors = kzalloc(sizeof(struct raid10_info)*(mddev->raid_disks + - max(0,-mddev->delta_disks)), + conf->mirrors = kcalloc(mddev->raid_disks + max(0, -mddev->delta_disks), + sizeof(struct raid10_info), GFP_KERNEL); if (!conf->mirrors) goto out; @@ -4129,11 +4129,10 @@ static int raid10_check_reshape(struct mddev *mddev) conf->mirrors_new = NULL; if (mddev->delta_disks > 0) { /* allocate new 'mirrors' list */ - conf->mirrors_new = kzalloc( - sizeof(struct raid10_info) - *(mddev->raid_disks + - mddev->delta_disks), - GFP_KERNEL); + conf->mirrors_new = + kcalloc(mddev->raid_disks + mddev->delta_disks, + sizeof(struct raid10_info), + GFP_KERNEL); if (!conf->mirrors_new) return -ENOMEM; } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 73489446bbcb..2031506a0ecd 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2396,7 +2396,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) * is completely stalled, so now is a good time to resize * conf->disks and the scribble region */ - ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO); + ndisks = kcalloc(newsize, sizeof(struct disk_info), GFP_NOIO); if (ndisks) { for (i = 0; i < conf->pool_size; i++) ndisks[i] = conf->disks[i]; @@ -6664,9 +6664,9 @@ static int alloc_thread_groups(struct r5conf *conf, int cnt, } *group_cnt = num_possible_nodes(); size = sizeof(struct r5worker) * cnt; - workers = kzalloc(size * *group_cnt, GFP_NOIO); - *worker_groups = kzalloc(sizeof(struct r5worker_group) * - *group_cnt, GFP_NOIO); + workers = kcalloc(size, *group_cnt, GFP_NOIO); + *worker_groups = kcalloc(*group_cnt, sizeof(struct r5worker_group), + GFP_NOIO); if (!*worker_groups || !workers) { kfree(workers); kfree(*worker_groups); @@ -6894,8 +6894,9 @@ static struct r5conf *setup_conf(struct mddev *mddev) goto abort; INIT_LIST_HEAD(&conf->free_list); INIT_LIST_HEAD(&conf->pending_list); - conf->pending_data = kzalloc(sizeof(struct r5pending_data) * - PENDING_IO_MAX, GFP_KERNEL); + conf->pending_data = kcalloc(PENDING_IO_MAX, + sizeof(struct r5pending_data), + GFP_KERNEL); if (!conf->pending_data) goto abort; for (i = 0; i < PENDING_IO_MAX; i++) @@ -6944,7 +6945,7 @@ static struct r5conf *setup_conf(struct mddev *mddev) conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; max_disks = max(conf->raid_disks, conf->previous_raid_disks); - conf->disks = kzalloc(max_disks * sizeof(struct disk_info), + conf->disks = kcalloc(max_disks, sizeof(struct disk_info), GFP_KERNEL); if (!conf->disks) |