diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-10-31 22:12:19 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-10-31 22:12:19 +0300 |
commit | bf699c9bac124f0a095d8ef06f2d6b219300a822 (patch) | |
tree | de1c4f852b0c68a87eeed25119443eeca8379c66 /crypto | |
parent | aefba418bfecd1985a08f50a95bd854a119f0153 (diff) | |
parent | da17bf4306fd3a52e938b121df82a7baa10eb282 (diff) | |
download | linux-bf699c9bac124f0a095d8ef06f2d6b219300a822.tar.xz |
Merge branch 'for-linus' of git://neil.brown.name/md
* 'for-linus' of git://neil.brown.name/md:
async_tx: fix asynchronous raid6 recovery for ddf layouts
async_pq: rename scribble page
async_pq: kill a stray dma_map() call and other cleanups
md/raid6: kill a gcc-4.0.1 'uninitialized variable' warning
raid6/async_tx: handle holes in block list in async_syndrome_val
md/async: don't pass a memory pointer as a page pointer.
md: Fix handling of raid5 array which is being reshaped to fewer devices.
md: fix problems with RAID6 calculations for DDF.
md/raid456: downlevel multicore operations to raid_run_ops
md: drivers/md/unroll.pl replaced with awk analog
md: remove clumsy usage of do_sync_mapping_range from bitmap code
md: raid1/raid10: handle allocation errors during array setup.
md/raid5: initialize conf->device_lock earlier
md/raid1/raid10: add a cond_resched
Revert "md: do not progress the resync process if the stripe was blocked"
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/async_tx/async_pq.c | 60 | ||||
-rw-r--r-- | crypto/async_tx/async_raid6_recov.c | 100 | ||||
-rw-r--r-- | crypto/async_tx/async_xor.c | 18 |
3 files changed, 113 insertions, 65 deletions
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index b88db6d1dc65..6b5cc4fba59f 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c @@ -26,14 +26,10 @@ #include <linux/async_tx.h> /** - * scribble - space to hold throwaway P buffer for synchronous gen_syndrome + * pq_scribble_page - space to hold throwaway P or Q buffer for + * synchronous gen_syndrome */ -static struct page *scribble; - -static bool is_raid6_zero_block(struct page *p) -{ - return p == (void *) raid6_empty_zero_page; -} +static struct page *pq_scribble_page; /* the struct page *blocks[] parameter passed to async_gen_syndrome() * and async_syndrome_val() contains the 'P' destination address at @@ -83,7 +79,7 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, * sources and update the coefficients accordingly */ for (i = 0, idx = 0; i < src_cnt; i++) { - if (is_raid6_zero_block(blocks[i])) + if (blocks[i] == NULL) continue; dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len, DMA_TO_DEVICE); @@ -160,9 +156,9 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, srcs = (void **) blocks; for (i = 0; i < disks; i++) { - if (is_raid6_zero_block(blocks[i])) { + if (blocks[i] == NULL) { BUG_ON(i > disks - 3); /* P or Q can't be zero */ - srcs[i] = blocks[i]; + srcs[i] = (void*)raid6_empty_zero_page; } else srcs[i] = page_address(blocks[i]) + offset; } @@ -186,10 +182,14 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <= * PAGE_SIZE as a temporary buffer of this size is used in the * synchronous path. 'disks' always accounts for both destination - * buffers. + * buffers. If any source buffers (blocks[i] where i < disks - 2) are + * set to NULL those buffers will be replaced with the raid6_zero_page + * in the synchronous path and omitted in the hardware-asynchronous + * path. * * 'blocks' note: if submit->scribble is NULL then the contents of - * 'blocks' may be overridden + * 'blocks' may be overwritten to perform address conversions + * (dma_map_page() or page_address()). */ struct dma_async_tx_descriptor * async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, @@ -227,11 +227,11 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, async_tx_quiesce(&submit->depend_tx); if (!P(blocks, disks)) { - P(blocks, disks) = scribble; + P(blocks, disks) = pq_scribble_page; BUG_ON(len + offset > PAGE_SIZE); } if (!Q(blocks, disks)) { - Q(blocks, disks) = scribble; + Q(blocks, disks) = pq_scribble_page; BUG_ON(len + offset > PAGE_SIZE); } do_sync_gen_syndrome(blocks, offset, disks, len, submit); @@ -265,8 +265,10 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, len); struct dma_device *device = chan ? chan->device : NULL; struct dma_async_tx_descriptor *tx; + unsigned char coefs[disks-2]; enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; dma_addr_t *dma_src = NULL; + int src_cnt = 0; BUG_ON(disks < 4); @@ -285,22 +287,32 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, __func__, disks, len); if (!P(blocks, disks)) dma_flags |= DMA_PREP_PQ_DISABLE_P; + else + pq[0] = dma_map_page(dev, P(blocks, disks), + offset, len, + DMA_TO_DEVICE); if (!Q(blocks, disks)) dma_flags |= DMA_PREP_PQ_DISABLE_Q; + else + pq[1] = dma_map_page(dev, Q(blocks, disks), + offset, len, + DMA_TO_DEVICE); + if (submit->flags & ASYNC_TX_FENCE) dma_flags |= DMA_PREP_FENCE; - for (i = 0; i < disks; i++) + for (i = 0; i < disks-2; i++) if (likely(blocks[i])) { - BUG_ON(is_raid6_zero_block(blocks[i])); - dma_src[i] = dma_map_page(dev, blocks[i], - offset, len, - DMA_TO_DEVICE); + dma_src[src_cnt] = dma_map_page(dev, blocks[i], + offset, len, + DMA_TO_DEVICE); + coefs[src_cnt] = raid6_gfexp[i]; + src_cnt++; } for (;;) { tx = device->device_prep_dma_pq_val(chan, pq, dma_src, - disks - 2, - raid6_gfexp, + src_cnt, + coefs, len, pqres, dma_flags); if (likely(tx)) @@ -373,9 +385,9 @@ EXPORT_SYMBOL_GPL(async_syndrome_val); static int __init async_pq_init(void) { - scribble = alloc_page(GFP_KERNEL); + pq_scribble_page = alloc_page(GFP_KERNEL); - if (scribble) + if (pq_scribble_page) return 0; pr_err("%s: failed to allocate required spare page\n", __func__); @@ -385,7 +397,7 @@ static int __init async_pq_init(void) static void __exit async_pq_exit(void) { - put_page(scribble); + put_page(pq_scribble_page); } module_init(async_pq_init); diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c index 6d73dde4786d..943f2abac9b4 100644 --- a/crypto/async_tx/async_raid6_recov.c +++ b/crypto/async_tx/async_raid6_recov.c @@ -131,8 +131,8 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, } static struct dma_async_tx_descriptor * -__2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks, - struct async_submit_ctl *submit) +__2data_recov_4(int disks, size_t bytes, int faila, int failb, + struct page **blocks, struct async_submit_ctl *submit) { struct dma_async_tx_descriptor *tx = NULL; struct page *p, *q, *a, *b; @@ -143,8 +143,8 @@ __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks, void *cb_param = submit->cb_param; void *scribble = submit->scribble; - p = blocks[4-2]; - q = blocks[4-1]; + p = blocks[disks-2]; + q = blocks[disks-1]; a = blocks[faila]; b = blocks[failb]; @@ -170,8 +170,8 @@ __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks, } static struct dma_async_tx_descriptor * -__2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks, - struct async_submit_ctl *submit) +__2data_recov_5(int disks, size_t bytes, int faila, int failb, + struct page **blocks, struct async_submit_ctl *submit) { struct dma_async_tx_descriptor *tx = NULL; struct page *p, *q, *g, *dp, *dq; @@ -181,21 +181,22 @@ __2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks, dma_async_tx_callback cb_fn = submit->cb_fn; void *cb_param = submit->cb_param; void *scribble = submit->scribble; - int uninitialized_var(good); - int i; + int good_srcs, good, i; - for (i = 0; i < 3; i++) { + good_srcs = 0; + good = -1; + for (i = 0; i < disks-2; i++) { + if (blocks[i] == NULL) + continue; if (i == faila || i == failb) continue; - else { - good = i; - break; - } + good = i; + good_srcs++; } - BUG_ON(i >= 3); + BUG_ON(good_srcs > 1); - p = blocks[5-2]; - q = blocks[5-1]; + p = blocks[disks-2]; + q = blocks[disks-1]; g = blocks[good]; /* Compute syndrome with zero for the missing data pages @@ -263,10 +264,10 @@ __2data_recov_n(int disks, size_t bytes, int faila, int failb, * delta p and delta q */ dp = blocks[faila]; - blocks[faila] = (void *)raid6_empty_zero_page; + blocks[faila] = NULL; blocks[disks-2] = dp; dq = blocks[failb]; - blocks[failb] = (void *)raid6_empty_zero_page; + blocks[failb] = NULL; blocks[disks-1] = dq; init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); @@ -323,6 +324,8 @@ struct dma_async_tx_descriptor * async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, struct page **blocks, struct async_submit_ctl *submit) { + int non_zero_srcs, i; + BUG_ON(faila == failb); if (failb < faila) swap(faila, failb); @@ -334,11 +337,13 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, */ if (!submit->scribble) { void **ptrs = (void **) blocks; - int i; async_tx_quiesce(&submit->depend_tx); for (i = 0; i < disks; i++) - ptrs[i] = page_address(blocks[i]); + if (blocks[i] == NULL) + ptrs[i] = (void *) raid6_empty_zero_page; + else + ptrs[i] = page_address(blocks[i]); raid6_2data_recov(disks, bytes, faila, failb, ptrs); @@ -347,19 +352,30 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, return NULL; } - switch (disks) { - case 4: + non_zero_srcs = 0; + for (i = 0; i < disks-2 && non_zero_srcs < 4; i++) + if (blocks[i]) + non_zero_srcs++; + switch (non_zero_srcs) { + case 0: + case 1: + /* There must be at least 2 sources - the failed devices. */ + BUG(); + + case 2: /* dma devices do not uniformly understand a zero source pq * operation (in contrast to the synchronous case), so - * explicitly handle the 4 disk special case + * explicitly handle the special case of a 4 disk array with + * both data disks missing. */ - return __2data_recov_4(bytes, faila, failb, blocks, submit); - case 5: + return __2data_recov_4(disks, bytes, faila, failb, blocks, submit); + case 3: /* dma devices do not uniformly understand a single * source pq operation (in contrast to the synchronous - * case), so explicitly handle the 5 disk special case + * case), so explicitly handle the special case of a 5 disk + * array with 2 of 3 data disks missing. */ - return __2data_recov_5(bytes, faila, failb, blocks, submit); + return __2data_recov_5(disks, bytes, faila, failb, blocks, submit); default: return __2data_recov_n(disks, bytes, faila, failb, blocks, submit); } @@ -385,6 +401,7 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, dma_async_tx_callback cb_fn = submit->cb_fn; void *cb_param = submit->cb_param; void *scribble = submit->scribble; + int good_srcs, good, i; struct page *srcs[2]; pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); @@ -394,11 +411,13 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, */ if (!scribble) { void **ptrs = (void **) blocks; - int i; async_tx_quiesce(&submit->depend_tx); for (i = 0; i < disks; i++) - ptrs[i] = page_address(blocks[i]); + if (blocks[i] == NULL) + ptrs[i] = (void*)raid6_empty_zero_page; + else + ptrs[i] = page_address(blocks[i]); raid6_datap_recov(disks, bytes, faila, ptrs); @@ -407,6 +426,20 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, return NULL; } + good_srcs = 0; + good = -1; + for (i = 0; i < disks-2; i++) { + if (i == faila) + continue; + if (blocks[i]) { + good = i; + good_srcs++; + if (good_srcs > 1) + break; + } + } + BUG_ON(good_srcs == 0); + p = blocks[disks-2]; q = blocks[disks-1]; @@ -414,14 +447,13 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, * Use the dead data page as temporary storage for delta q */ dq = blocks[faila]; - blocks[faila] = (void *)raid6_empty_zero_page; + blocks[faila] = NULL; blocks[disks-1] = dq; - /* in the 4 disk case we only need to perform a single source - * multiplication + /* in the 4-disk case we only need to perform a single source + * multiplication with the one good data block. */ - if (disks == 4) { - int good = faila == 0 ? 1 : 0; + if (good_srcs == 1) { struct page *g = blocks[good]; init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index b459a9034aac..79182dcb91b7 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -44,20 +44,23 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, void *cb_param_orig = submit->cb_param; enum async_tx_flags flags_orig = submit->flags; enum dma_ctrl_flags dma_flags; - int xor_src_cnt; + int xor_src_cnt = 0; dma_addr_t dma_dest; /* map the dest bidrectional in case it is re-used as a source */ dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL); for (i = 0; i < src_cnt; i++) { /* only map the dest once */ + if (!src_list[i]) + continue; if (unlikely(src_list[i] == dest)) { - dma_src[i] = dma_dest; + dma_src[xor_src_cnt++] = dma_dest; continue; } - dma_src[i] = dma_map_page(dma->dev, src_list[i], offset, - len, DMA_TO_DEVICE); + dma_src[xor_src_cnt++] = dma_map_page(dma->dev, src_list[i], offset, + len, DMA_TO_DEVICE); } + src_cnt = xor_src_cnt; while (src_cnt) { submit->flags = flags_orig; @@ -123,7 +126,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, int src_cnt, size_t len, struct async_submit_ctl *submit) { int i; - int xor_src_cnt; + int xor_src_cnt = 0; int src_off = 0; void *dest_buf; void **srcs; @@ -135,8 +138,9 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, /* convert to buffer pointers */ for (i = 0; i < src_cnt; i++) - srcs[i] = page_address(src_list[i]) + offset; - + if (src_list[i]) + srcs[xor_src_cnt++] = page_address(src_list[i]) + offset; + src_cnt = xor_src_cnt; /* set destination address */ dest_buf = page_address(dest) + offset; |