diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/bio.c | 11 | ||||
-rw-r--r-- | block/blk-core.c | 8 | ||||
-rw-r--r-- | block/blk-flush.c | 11 | ||||
-rw-r--r-- | block/blk-lib.c | 178 | ||||
-rw-r--r-- | block/blk-map.c | 47 | ||||
-rw-r--r-- | block/blk-mq-tag.c | 17 | ||||
-rw-r--r-- | block/blk-mq.c | 5 | ||||
-rw-r--r-- | block/blk-settings.c | 50 | ||||
-rw-r--r-- | block/blk-sysfs.c | 39 | ||||
-rw-r--r-- | block/blk-throttle.c | 5 | ||||
-rw-r--r-- | block/partitions/efi.c | 4 | ||||
-rw-r--r-- | block/partitions/ldm.c | 60 |
12 files changed, 175 insertions, 260 deletions
diff --git a/block/bio.c b/block/bio.c index 807d25e466ec..0e4aa42bc30d 100644 --- a/block/bio.c +++ b/block/bio.c @@ -311,17 +311,6 @@ static void bio_chain_endio(struct bio *bio) bio_endio(__bio_chain_endio(bio)); } -/* - * Increment chain count for the bio. Make sure the CHAIN flag update - * is visible before the raised count. - */ -static inline void bio_inc_remaining(struct bio *bio) -{ - bio_set_flag(bio, BIO_CHAIN); - smp_mb__before_atomic(); - atomic_inc(&bio->__bi_remaining); -} - /** * bio_chain - chain bio completions * @bio: the target bio diff --git a/block/blk-core.c b/block/blk-core.c index b60537b2c35b..2475b1c72773 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1523,6 +1523,7 @@ EXPORT_SYMBOL(blk_put_request); * blk_add_request_payload - add a payload to a request * @rq: request to update * @page: page backing the payload + * @offset: offset in page * @len: length of the payload. * * This allows to later add a payload to an already submitted request by @@ -1533,12 +1534,12 @@ EXPORT_SYMBOL(blk_put_request); * discard requests should ever use it. */ void blk_add_request_payload(struct request *rq, struct page *page, - unsigned int len) + int offset, unsigned int len) { struct bio *bio = rq->bio; bio->bi_io_vec->bv_page = page; - bio->bi_io_vec->bv_offset = 0; + bio->bi_io_vec->bv_offset = offset; bio->bi_io_vec->bv_len = len; bio->bi_iter.bi_size = len; @@ -1963,7 +1964,8 @@ generic_make_request_checks(struct bio *bio) * drivers without flush support don't have to worry * about them. */ - if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) { + if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && + !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA); if (!nr_sectors) { err = 0; diff --git a/block/blk-flush.c b/block/blk-flush.c index 9c423e53324a..b1c91d229e5e 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -95,17 +95,18 @@ enum { static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq); -static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq) +static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq) { unsigned int policy = 0; if (blk_rq_sectors(rq)) policy |= REQ_FSEQ_DATA; - if (fflags & REQ_FLUSH) { + if (fflags & (1UL << QUEUE_FLAG_WC)) { if (rq->cmd_flags & REQ_FLUSH) policy |= REQ_FSEQ_PREFLUSH; - if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA)) + if (!(fflags & (1UL << QUEUE_FLAG_FUA)) && + (rq->cmd_flags & REQ_FUA)) policy |= REQ_FSEQ_POSTFLUSH; } return policy; @@ -384,7 +385,7 @@ static void mq_flush_data_end_io(struct request *rq, int error) void blk_insert_flush(struct request *rq) { struct request_queue *q = rq->q; - unsigned int fflags = q->flush_flags; /* may change, cache */ + unsigned long fflags = q->queue_flags; /* may change, cache */ unsigned int policy = blk_flush_policy(fflags, rq); struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx); @@ -393,7 +394,7 @@ void blk_insert_flush(struct request *rq) * REQ_FLUSH and FUA for the driver. */ rq->cmd_flags &= ~REQ_FLUSH; - if (!(fflags & REQ_FUA)) + if (!(fflags & (1UL << QUEUE_FLAG_FUA))) rq->cmd_flags &= ~REQ_FUA; /* diff --git a/block/blk-lib.c b/block/blk-lib.c index 9ebf65379556..23d7f301a196 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -9,82 +9,46 @@ #include "blk.h" -struct bio_batch { - atomic_t done; - int error; - struct completion *wait; -}; - -static void bio_batch_end_io(struct bio *bio) +static struct bio *next_bio(struct bio *bio, int rw, unsigned int nr_pages, + gfp_t gfp) { - struct bio_batch *bb = bio->bi_private; + struct bio *new = bio_alloc(gfp, nr_pages); + + if (bio) { + bio_chain(bio, new); + submit_bio(rw, bio); + } - if (bio->bi_error && bio->bi_error != -EOPNOTSUPP) - bb->error = bio->bi_error; - if (atomic_dec_and_test(&bb->done)) - complete(bb->wait); - bio_put(bio); + return new; } -/** - * blkdev_issue_discard - queue a discard - * @bdev: blockdev to issue discard for - * @sector: start sector - * @nr_sects: number of sectors to discard - * @gfp_mask: memory allocation flags (for bio_alloc) - * @flags: BLKDEV_IFL_* flags to control behaviour - * - * Description: - * Issue a discard request for the sectors in question. - */ -int blkdev_issue_discard(struct block_device *bdev, sector_t sector, - sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) +int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop) { - DECLARE_COMPLETION_ONSTACK(wait); struct request_queue *q = bdev_get_queue(bdev); - int type = REQ_WRITE | REQ_DISCARD; + struct bio *bio = *biop; unsigned int granularity; int alignment; - struct bio_batch bb; - struct bio *bio; - int ret = 0; - struct blk_plug plug; if (!q) return -ENXIO; - if (!blk_queue_discard(q)) return -EOPNOTSUPP; + if ((type & REQ_SECURE) && !blk_queue_secdiscard(q)) + return -EOPNOTSUPP; /* Zero-sector (unknown) and one-sector granularities are the same. */ granularity = max(q->limits.discard_granularity >> 9, 1U); alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; - if (flags & BLKDEV_DISCARD_SECURE) { - if (!blk_queue_secdiscard(q)) - return -EOPNOTSUPP; - type |= REQ_SECURE; - } - - atomic_set(&bb.done, 1); - bb.error = 0; - bb.wait = &wait; - - blk_start_plug(&plug); while (nr_sects) { unsigned int req_sects; sector_t end_sect, tmp; - bio = bio_alloc(gfp_mask, 1); - if (!bio) { - ret = -ENOMEM; - break; - } - /* Make sure bi_size doesn't overflow */ req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9); - /* + /** * If splitting a request, and the next starting sector would be * misaligned, stop the discard at the previous aligned sector. */ @@ -98,18 +62,14 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, req_sects = end_sect - sector; } + bio = next_bio(bio, type, 1, gfp_mask); bio->bi_iter.bi_sector = sector; - bio->bi_end_io = bio_batch_end_io; bio->bi_bdev = bdev; - bio->bi_private = &bb; bio->bi_iter.bi_size = req_sects << 9; nr_sects -= req_sects; sector = end_sect; - atomic_inc(&bb.done); - submit_bio(type, bio); - /* * We can loop for a long time in here, if someone does * full device discards (like mkfs). Be nice and allow @@ -118,14 +78,44 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, */ cond_resched(); } - blk_finish_plug(&plug); - /* Wait for bios in-flight */ - if (!atomic_dec_and_test(&bb.done)) - wait_for_completion_io(&wait); + *biop = bio; + return 0; +} +EXPORT_SYMBOL(__blkdev_issue_discard); + +/** + * blkdev_issue_discard - queue a discard + * @bdev: blockdev to issue discard for + * @sector: start sector + * @nr_sects: number of sectors to discard + * @gfp_mask: memory allocation flags (for bio_alloc) + * @flags: BLKDEV_IFL_* flags to control behaviour + * + * Description: + * Issue a discard request for the sectors in question. + */ +int blkdev_issue_discard(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) +{ + int type = REQ_WRITE | REQ_DISCARD; + struct bio *bio = NULL; + struct blk_plug plug; + int ret; + + if (flags & BLKDEV_DISCARD_SECURE) + type |= REQ_SECURE; + + blk_start_plug(&plug); + ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, type, + &bio); + if (!ret && bio) { + ret = submit_bio_wait(type, bio); + if (ret == -EOPNOTSUPP) + ret = 0; + } + blk_finish_plug(&plug); - if (bb.error) - return bb.error; return ret; } EXPORT_SYMBOL(blkdev_issue_discard); @@ -145,11 +135,9 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct page *page) { - DECLARE_COMPLETION_ONSTACK(wait); struct request_queue *q = bdev_get_queue(bdev); unsigned int max_write_same_sectors; - struct bio_batch bb; - struct bio *bio; + struct bio *bio = NULL; int ret = 0; if (!q) @@ -158,21 +146,10 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, /* Ensure that max_write_same_sectors doesn't overflow bi_size */ max_write_same_sectors = UINT_MAX >> 9; - atomic_set(&bb.done, 1); - bb.error = 0; - bb.wait = &wait; - while (nr_sects) { - bio = bio_alloc(gfp_mask, 1); - if (!bio) { - ret = -ENOMEM; - break; - } - + bio = next_bio(bio, REQ_WRITE | REQ_WRITE_SAME, 1, gfp_mask); bio->bi_iter.bi_sector = sector; - bio->bi_end_io = bio_batch_end_io; bio->bi_bdev = bdev; - bio->bi_private = &bb; bio->bi_vcnt = 1; bio->bi_io_vec->bv_page = page; bio->bi_io_vec->bv_offset = 0; @@ -186,18 +163,11 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, bio->bi_iter.bi_size = nr_sects << 9; nr_sects = 0; } - - atomic_inc(&bb.done); - submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio); } - /* Wait for bios in-flight */ - if (!atomic_dec_and_test(&bb.done)) - wait_for_completion_io(&wait); - - if (bb.error) - return bb.error; - return ret; + if (bio) + ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio); + return ret != -EOPNOTSUPP ? ret : 0; } EXPORT_SYMBOL(blkdev_issue_write_same); @@ -216,28 +186,15 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask) { int ret; - struct bio *bio; - struct bio_batch bb; + struct bio *bio = NULL; unsigned int sz; - DECLARE_COMPLETION_ONSTACK(wait); - - atomic_set(&bb.done, 1); - bb.error = 0; - bb.wait = &wait; - ret = 0; while (nr_sects != 0) { - bio = bio_alloc(gfp_mask, - min(nr_sects, (sector_t)BIO_MAX_PAGES)); - if (!bio) { - ret = -ENOMEM; - break; - } - + bio = next_bio(bio, WRITE, + min(nr_sects, (sector_t)BIO_MAX_PAGES), + gfp_mask); bio->bi_iter.bi_sector = sector; bio->bi_bdev = bdev; - bio->bi_end_io = bio_batch_end_io; - bio->bi_private = &bb; while (nr_sects != 0) { sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); @@ -247,18 +204,11 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, if (ret < (sz << 9)) break; } - ret = 0; - atomic_inc(&bb.done); - submit_bio(WRITE, bio); } - /* Wait for bios in-flight */ - if (!atomic_dec_and_test(&bb.done)) - wait_for_completion_io(&wait); - - if (bb.error) - return bb.error; - return ret; + if (bio) + return submit_bio_wait(WRITE, bio); + return 0; } /** diff --git a/block/blk-map.c b/block/blk-map.c index a54f0543b956..b9f88b7751fb 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -9,24 +9,6 @@ #include "blk.h" -static bool iovec_gap_to_prv(struct request_queue *q, - struct iovec *prv, struct iovec *cur) -{ - unsigned long prev_end; - - if (!queue_virt_boundary(q)) - return false; - - if (prv->iov_base == NULL && prv->iov_len == 0) - /* prv is not set - don't check */ - return false; - - prev_end = (unsigned long)(prv->iov_base + prv->iov_len); - - return (((unsigned long)cur->iov_base & queue_virt_boundary(q)) || - prev_end & queue_virt_boundary(q)); -} - int blk_rq_append_bio(struct request_queue *q, struct request *rq, struct bio *bio) { @@ -125,31 +107,18 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, struct rq_map_data *map_data, const struct iov_iter *iter, gfp_t gfp_mask) { - struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0}; - bool copy = (q->dma_pad_mask & iter->count) || map_data; + bool copy = false; + unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); struct bio *bio = NULL; struct iov_iter i; int ret; - if (!iter || !iter->count) - return -EINVAL; - - iov_for_each(iov, i, *iter) { - unsigned long uaddr = (unsigned long) iov.iov_base; - - if (!iov.iov_len) - return -EINVAL; - - /* - * Keep going so we check length of all segments - */ - if ((uaddr & queue_dma_alignment(q)) || - iovec_gap_to_prv(q, &prv, &iov)) - copy = true; - - prv.iov_base = iov.iov_base; - prv.iov_len = iov.iov_len; - } + if (map_data) + copy = true; + else if (iov_iter_alignment(iter) & align) + copy = true; + else if (queue_virt_boundary(q)) + copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter); i = *iter; do { diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index abdbb47405cb..56a0c37a3d06 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -464,15 +464,26 @@ static void bt_tags_for_each(struct blk_mq_tags *tags, } } -void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, - void *priv) +static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, + busy_tag_iter_fn *fn, void *priv) { if (tags->nr_reserved_tags) bt_tags_for_each(tags, &tags->breserved_tags, 0, fn, priv, true); bt_tags_for_each(tags, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv, false); } -EXPORT_SYMBOL(blk_mq_all_tag_busy_iter); + +void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, + busy_tag_iter_fn *fn, void *priv) +{ + int i; + + for (i = 0; i < tagset->nr_hw_queues; i++) { + if (tagset->tags && tagset->tags[i]) + blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv); + } +} +EXPORT_SYMBOL(blk_mq_tagset_busy_iter); void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, void *priv) diff --git a/block/blk-mq.c b/block/blk-mq.c index 1699baf39b78..7df9c9263b21 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1122,8 +1122,7 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) { init_request_from_bio(rq, bio); - if (blk_do_io_stat(rq)) - blk_account_io_start(rq, 1); + blk_account_io_start(rq, 1); } static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx) @@ -1496,7 +1495,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set, int to_do; void *p; - while (left < order_to_size(this_order - 1) && this_order) + while (this_order && left < order_to_size(this_order - 1)) this_order--; do { diff --git a/block/blk-settings.c b/block/blk-settings.c index 331e4eee0dda..f679ae122843 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -820,32 +820,40 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask) } EXPORT_SYMBOL(blk_queue_update_dma_alignment); -/** - * blk_queue_flush - configure queue's cache flush capability - * @q: the request queue for the device - * @flush: 0, REQ_FLUSH or REQ_FLUSH | REQ_FUA - * - * Tell block layer cache flush capability of @q. If it supports - * flushing, REQ_FLUSH should be set. If it supports bypassing - * write cache for individual writes, REQ_FUA should be set. - */ -void blk_queue_flush(struct request_queue *q, unsigned int flush) -{ - WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA)); - - if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA))) - flush &= ~REQ_FUA; - - q->flush_flags = flush & (REQ_FLUSH | REQ_FUA); -} -EXPORT_SYMBOL_GPL(blk_queue_flush); - void blk_queue_flush_queueable(struct request_queue *q, bool queueable) { - q->flush_not_queueable = !queueable; + spin_lock_irq(q->queue_lock); + if (queueable) + clear_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags); + else + set_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags); + spin_unlock_irq(q->queue_lock); } EXPORT_SYMBOL_GPL(blk_queue_flush_queueable); +/** + * blk_queue_write_cache - configure queue's write cache + * @q: the request queue for the device + * @wc: write back cache on or off + * @fua: device supports FUA writes, if true + * + * Tell the block layer about the write cache of @q. + */ +void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua) +{ + spin_lock_irq(q->queue_lock); + if (wc) + queue_flag_set(QUEUE_FLAG_WC, q); + else + queue_flag_clear(QUEUE_FLAG_WC, q); + if (fua) + queue_flag_set(QUEUE_FLAG_FUA, q); + else + queue_flag_clear(QUEUE_FLAG_FUA, q); + spin_unlock_irq(q->queue_lock); +} +EXPORT_SYMBOL_GPL(blk_queue_write_cache); + static int __init blk_settings_init(void) { blk_max_low_pfn = max_low_pfn - 1; diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 995b58d46ed1..99205965f559 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -347,6 +347,38 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page, return ret; } +static ssize_t queue_wc_show(struct request_queue *q, char *page) +{ + if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) + return sprintf(page, "write back\n"); + + return sprintf(page, "write through\n"); +} + +static ssize_t queue_wc_store(struct request_queue *q, const char *page, + size_t count) +{ + int set = -1; + + if (!strncmp(page, "write back", 10)) + set = 1; + else if (!strncmp(page, "write through", 13) || + !strncmp(page, "none", 4)) + set = 0; + + if (set == -1) + return -EINVAL; + + spin_lock_irq(q->queue_lock); + if (set) + queue_flag_set(QUEUE_FLAG_WC, q); + else + queue_flag_clear(QUEUE_FLAG_WC, q); + spin_unlock_irq(q->queue_lock); + + return count; +} + static struct queue_sysfs_entry queue_requests_entry = { .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, .show = queue_requests_show, @@ -478,6 +510,12 @@ static struct queue_sysfs_entry queue_poll_entry = { .store = queue_poll_store, }; +static struct queue_sysfs_entry queue_wc_entry = { + .attr = {.name = "write_cache", .mode = S_IRUGO | S_IWUSR }, + .show = queue_wc_show, + .store = queue_wc_store, +}; + static struct attribute *default_attrs[] = { &queue_requests_entry.attr, &queue_ra_entry.attr, @@ -503,6 +541,7 @@ static struct attribute *default_attrs[] = { &queue_iostats_entry.attr, &queue_random_entry.attr, &queue_poll_entry.attr, + &queue_wc_entry.attr, NULL, }; diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 2149a1ddbacf..47a3e540631a 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -211,15 +211,14 @@ static struct throtl_data *sq_to_td(struct throtl_service_queue *sq) * * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a * throtl_grp; otherwise, just "throtl". - * - * TODO: this should be made a function and name formatting should happen - * after testing whether blktrace is enabled. */ #define throtl_log(sq, fmt, args...) do { \ struct throtl_grp *__tg = sq_to_tg((sq)); \ struct throtl_data *__td = sq_to_td((sq)); \ \ (void)__td; \ + if (likely(!blk_trace_note_message_enabled(__td->queue))) \ + break; \ if ((__tg)) { \ char __pbuf[128]; \ \ diff --git a/block/partitions/efi.c b/block/partitions/efi.c index 26cb624ace05..bcd86e5cd546 100644 --- a/block/partitions/efi.c +++ b/block/partitions/efi.c @@ -430,7 +430,7 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba, } /* Check that sizeof_partition_entry has the correct value */ if (le32_to_cpu((*gpt)->sizeof_partition_entry) != sizeof(gpt_entry)) { - pr_debug("GUID Partitition Entry Size check failed.\n"); + pr_debug("GUID Partition Entry Size check failed.\n"); goto fail; } @@ -443,7 +443,7 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba, le32_to_cpu((*gpt)->sizeof_partition_entry)); if (crc != le32_to_cpu((*gpt)->partition_entry_array_crc32)) { - pr_debug("GUID Partitition Entry Array CRC check failed.\n"); + pr_debug("GUID Partition Entry Array CRC check failed.\n"); goto fail_ptes; } diff --git a/block/partitions/ldm.c b/block/partitions/ldm.c index e507cfbd044e..edcea70674c9 100644 --- a/block/partitions/ldm.c +++ b/block/partitions/ldm.c @@ -27,6 +27,8 @@ #include <linux/pagemap.h> #include <linux/stringify.h> #include <linux/kernel.h> +#include <linux/uuid.h> + #include "ldm.h" #include "check.h" #include "msdos.h" @@ -66,60 +68,6 @@ void _ldm_printk(const char *level, const char *function, const char *fmt, ...) } /** - * ldm_parse_hexbyte - Convert a ASCII hex number to a byte - * @src: Pointer to at least 2 characters to convert. - * - * Convert a two character ASCII hex string to a number. - * - * Return: 0-255 Success, the byte was parsed correctly - * -1 Error, an invalid character was supplied - */ -static int ldm_parse_hexbyte (const u8 *src) -{ - unsigned int x; /* For correct wrapping */ - int h; - - /* high part */ - x = h = hex_to_bin(src[0]); - if (h < 0) - return -1; - - /* low part */ - h = hex_to_bin(src[1]); - if (h < 0) - return -1; - - return (x << 4) + h; -} - -/** - * ldm_parse_guid - Convert GUID from ASCII to binary - * @src: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba - * @dest: Memory block to hold binary GUID (16 bytes) - * - * N.B. The GUID need not be NULL terminated. - * - * Return: 'true' @dest contains binary GUID - * 'false' @dest contents are undefined - */ -static bool ldm_parse_guid (const u8 *src, u8 *dest) -{ - static const int size[] = { 4, 2, 2, 2, 6 }; - int i, j, v; - - if (src[8] != '-' || src[13] != '-' || - src[18] != '-' || src[23] != '-') - return false; - - for (j = 0; j < 5; j++, src++) - for (i = 0; i < size[j]; i++, src+=2, *dest++ = v) - if ((v = ldm_parse_hexbyte (src)) < 0) - return false; - - return true; -} - -/** * ldm_parse_privhead - Read the LDM Database PRIVHEAD structure * @data: Raw database PRIVHEAD structure loaded from the device * @ph: In-memory privhead structure in which to return parsed information @@ -167,7 +115,7 @@ static bool ldm_parse_privhead(const u8 *data, struct privhead *ph) ldm_error("PRIVHEAD disk size doesn't match real disk size"); return false; } - if (!ldm_parse_guid(data + 0x0030, ph->disk_id)) { + if (uuid_be_to_bin(data + 0x0030, (uuid_be *)ph->disk_id)) { ldm_error("PRIVHEAD contains an invalid GUID."); return false; } @@ -944,7 +892,7 @@ static bool ldm_parse_dsk3 (const u8 *buffer, int buflen, struct vblk *vb) disk = &vb->vblk.disk; ldm_get_vstr (buffer + 0x18 + r_diskid, disk->alt_name, sizeof (disk->alt_name)); - if (!ldm_parse_guid (buffer + 0x19 + r_name, disk->disk_id)) + if (uuid_be_to_bin(buffer + 0x19 + r_name, (uuid_be *)disk->disk_id)) return false; return true; |