diff options
author | Christoph Hellwig <hch@lst.de> | 2023-02-03 18:03:44 +0300 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2023-02-03 18:20:04 +0300 |
commit | 0b6f93bdf07e52620f725f721e547408e0d04c9d (patch) | |
tree | f3af6f21fa7ef66764f8fb9d9d69ecdf42bef90a /block/blk-cgroup.c | |
parent | 178fa7d49815ea8001f43ade37a22072829fd8ab (diff) | |
download | linux-0b6f93bdf07e52620f725f721e547408e0d04c9d.tar.xz |
blk-cgroup: improve error unwinding in blkg_alloc
Unwind only the previous initialization steps that happened in blkg_alloc
using goto based unwinding. This avoids the need for the !queue special
case in blkg_free and thus ensures that any blkg seens outside of
blkg_alloc is always fully constructed.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Tejun Heo <tj@kernel.org>
Link: https://lore.kernel.org/r/20230203150400.3199230-4-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r-- | block/blk-cgroup.c | 39 |
1 files changed, 19 insertions, 20 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index cb110fc51940..9df02a6d04d3 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -128,22 +128,16 @@ static void blkg_free_workfn(struct work_struct *work) * blkcg_mutex is used to synchronize blkg_free_workfn() and * blkcg_deactivate_policy(). */ - if (q) - mutex_lock(&q->blkcg_mutex); - + mutex_lock(&q->blkcg_mutex); for (i = 0; i < BLKCG_MAX_POLS; i++) if (blkg->pd[i]) blkcg_policy[i]->pd_free_fn(blkg->pd[i]); - if (blkg->parent) blkg_put(blkg->parent); + list_del_init(&blkg->q_node); + mutex_unlock(&q->blkcg_mutex); - if (q) { - list_del_init(&blkg->q_node); - mutex_unlock(&q->blkcg_mutex); - blk_put_queue(q); - } - + blk_put_queue(q); free_percpu(blkg->iostat_cpu); percpu_ref_exit(&blkg->refcnt); kfree(blkg); @@ -265,16 +259,13 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk, blkg = kzalloc_node(sizeof(*blkg), gfp_mask, disk->queue->node); if (!blkg) return NULL; - if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask)) - goto err_free; - + goto out_free_blkg; blkg->iostat_cpu = alloc_percpu_gfp(struct blkg_iostat_set, gfp_mask); if (!blkg->iostat_cpu) - goto err_free; - + goto out_exit_refcnt; if (!blk_get_queue(disk->queue)) - goto err_free; + goto out_free_iostat; blkg->q = disk->queue; INIT_LIST_HEAD(&blkg->q_node); @@ -299,8 +290,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk, /* alloc per-policy data and attach it to blkg */ pd = pol->pd_alloc_fn(gfp_mask, disk->queue, blkcg); if (!pd) - goto err_free; - + goto out_free_pds; blkg->pd[i] = pd; pd->blkg = blkg; pd->plid = i; @@ -309,8 +299,17 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk, return blkg; -err_free: - blkg_free(blkg); +out_free_pds: + while (--i >= 0) + if (blkg->pd[i]) + blkcg_policy[i]->pd_free_fn(blkg->pd[i]); + blk_put_queue(disk->queue); +out_free_iostat: + free_percpu(blkg->iostat_cpu); +out_exit_refcnt: + percpu_ref_exit(&blkg->refcnt); +out_free_blkg: + kfree(blkg); return NULL; } |