diff options
author | Song Liu <songliubraving@fb.com> | 2016-11-18 02:24:37 +0300 |
---|---|---|
committer | Shaohua Li <shli@fb.com> | 2016-11-19 00:25:40 +0300 |
commit | 937621c36e0ea1af2aceeaea412ba3bd80247199 (patch) | |
tree | f0fdbd7b0912f7d9180a0912ec7e72a5995a6b24 /drivers/md/raid5.h | |
parent | c757ec95c22036b1cb85c56ede368bf8f6c08658 (diff) | |
download | linux-937621c36e0ea1af2aceeaea412ba3bd80247199.tar.xz |
md/r5cache: move some code to raid5.h
Move some define and inline functions to raid5.h, so they can be
used in raid5-cache.c
Signed-off-by: Song Liu <songliubraving@fb.com>
Signed-off-by: Shaohua Li <shli@fb.com>
Diffstat (limited to 'drivers/md/raid5.h')
-rw-r--r-- | drivers/md/raid5.h | 77 |
1 files changed, 77 insertions, 0 deletions
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 57ec49f0839e..ffc13c4d7e63 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -410,6 +410,83 @@ struct disk_info { struct md_rdev *rdev, *replacement; }; +/* + * Stripe cache + */ + +#define NR_STRIPES 256 +#define STRIPE_SIZE PAGE_SIZE +#define STRIPE_SHIFT (PAGE_SHIFT - 9) +#define STRIPE_SECTORS (STRIPE_SIZE>>9) +#define IO_THRESHOLD 1 +#define BYPASS_THRESHOLD 1 +#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head)) +#define HASH_MASK (NR_HASH - 1) +#define MAX_STRIPE_BATCH 8 + +/* bio's attached to a stripe+device for I/O are linked together in bi_sector + * order without overlap. There may be several bio's per stripe+device, and + * a bio could span several devices. + * When walking this list for a particular stripe+device, we must never proceed + * beyond a bio that extends past this device, as the next bio might no longer + * be valid. + * This function is used to determine the 'next' bio in the list, given the + * sector of the current stripe+device + */ +static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) +{ + int sectors = bio_sectors(bio); + + if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS) + return bio->bi_next; + else + return NULL; +} + +/* + * We maintain a biased count of active stripes in the bottom 16 bits of + * bi_phys_segments, and a count of processed stripes in the upper 16 bits + */ +static inline int raid5_bi_processed_stripes(struct bio *bio) +{ + atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; + + return (atomic_read(segments) >> 16) & 0xffff; +} + +static inline int raid5_dec_bi_active_stripes(struct bio *bio) +{ + atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; + + return atomic_sub_return(1, segments) & 0xffff; +} + +static inline void raid5_inc_bi_active_stripes(struct bio *bio) +{ + atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; + + atomic_inc(segments); +} + +static inline void raid5_set_bi_processed_stripes(struct bio *bio, + unsigned int cnt) +{ + atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; + int old, new; + + do { + old = atomic_read(segments); + new = (old & 0xffff) | (cnt << 16); + } while (atomic_cmpxchg(segments, old, new) != old); +} + +static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt) +{ + atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; + + atomic_set(segments, cnt); +} + /* NOTE NR_STRIPE_HASH_LOCKS must remain below 64. * This is because we sometimes take all the spinlocks * and creating that much locking depth can cause |