diff options
author | Christoph Hellwig <hch@lst.de> | 2022-04-20 07:27:17 +0300 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2022-05-02 23:06:20 +0300 |
commit | bbb1ebe7a909db4de49777fb7676d5bf293f34c9 (patch) | |
tree | 0cff6de0224344909db8e482e9167d93b94f8c2a /block | |
parent | f4a6a61cb6d40d9ae63e47743d33200f3efe3fe7 (diff) | |
download | linux-bbb1ebe7a909db4de49777fb7676d5bf293f34c9.tar.xz |
blk-cgroup: replace bio_blkcg with bio_blkcg_css
All callers of bio_blkcg actually want the CSS, so replace it with an
interface that does return the CSS. This now allows to move
struct blkcg_gq to block/blk-cgroup.h instead of exposing it in a
public header.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Tejun Heo <tj@kernel.org>
Link: https://lore.kernel.org/r/20220420042723.1010598-10-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-cgroup.c | 18 | ||||
-rw-r--r-- | block/blk-cgroup.h | 67 |
2 files changed, 81 insertions, 4 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index bb52797c02bd..8e32cc494808 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -156,6 +156,22 @@ static void blkg_async_bio_workfn(struct work_struct *work) } /** + * bio_blkcg_css - return the blkcg CSS associated with a bio + * @bio: target bio + * + * This returns the CSS for the blkcg associated with a bio, or %NULL if not + * associated. Callers are expected to either handle %NULL or know association + * has been done prior to calling this. + */ +struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio) +{ + if (!bio || !bio->bi_blkg) + return NULL; + return &bio->bi_blkg->blkcg->css; +} +EXPORT_SYMBOL_GPL(bio_blkcg_css); + +/** * blkcg_parent - get the parent of a blkcg * @blkcg: blkcg of interest * @@ -1938,7 +1954,7 @@ void bio_associate_blkg(struct bio *bio) rcu_read_lock(); if (bio->bi_blkg) - css = &bio_blkcg(bio)->css; + css = bio_blkcg_css(bio); else css = blkcg_css(); diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index b00fb1169e7c..03405ddf2a7b 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h @@ -25,6 +25,64 @@ struct blkg_policy_data; #define BLKG_STAT_CPU_BATCH (INT_MAX / 2) #ifdef CONFIG_BLK_CGROUP + +enum blkg_iostat_type { + BLKG_IOSTAT_READ, + BLKG_IOSTAT_WRITE, + BLKG_IOSTAT_DISCARD, + + BLKG_IOSTAT_NR, +}; + +struct blkg_iostat { + u64 bytes[BLKG_IOSTAT_NR]; + u64 ios[BLKG_IOSTAT_NR]; +}; + +struct blkg_iostat_set { + struct u64_stats_sync sync; + struct blkg_iostat cur; + struct blkg_iostat last; +}; + +/* association between a blk cgroup and a request queue */ +struct blkcg_gq { + /* Pointer to the associated request_queue */ + struct request_queue *q; + struct list_head q_node; + struct hlist_node blkcg_node; + struct blkcg *blkcg; + + /* all non-root blkcg_gq's are guaranteed to have access to parent */ + struct blkcg_gq *parent; + + /* reference count */ + struct percpu_ref refcnt; + + /* is this blkg online? protected by both blkcg and q locks */ + bool online; + + struct blkg_iostat_set __percpu *iostat_cpu; + struct blkg_iostat_set iostat; + + struct blkg_policy_data *pd[BLKCG_MAX_POLS]; + + spinlock_t async_bio_lock; + struct bio_list async_bios; + union { + struct work_struct async_bio_work; + struct work_struct free_work; + }; + + atomic_t use_delay; + atomic64_t delay_nsec; + atomic64_t delay_start; + u64 last_delay; + int last_use; + + struct rcu_head rcu_head; +}; + struct blkcg { struct cgroup_subsys_state css; spinlock_t lock; @@ -173,9 +231,9 @@ static inline struct cgroup_subsys_state *blkcg_css(void) * * In order to avoid priority inversions we sometimes need to issue a bio as if * it were attached to the root blkg, and then backcharge to the actual owning - * blkg. The idea is we do bio_blkcg() to look up the actual context for the - * bio and attach the appropriate blkg to the bio. Then we call this helper and - * if it is true run with the root blkg for that queue and then do any + * blkg. The idea is we do bio_blkcg_css() to look up the actual context for + * the bio and attach the appropriate blkg to the bio. Then we call this helper + * and if it is true run with the root blkg for that queue and then do any * backcharging to the originating cgroup once the io is complete. */ static inline bool bio_issue_as_root_blkg(struct bio *bio) @@ -464,6 +522,9 @@ struct blkcg_policy_data { struct blkcg_policy { }; +struct blkcg { +}; + #ifdef CONFIG_BLOCK static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; } |