summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c4
-rw-r--r--block/blk-wbt.c4
-rw-r--r--block/kyber-iosched.c16
3 files changed, 12 insertions, 12 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 958cedaff8b8..07b0a03c46e6 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -926,14 +926,14 @@ static bool reorder_tags_to_front(struct list_head *list)
return first != NULL;
}
-static int blk_mq_dispatch_wake(wait_queue_t *wait, unsigned mode, int flags,
+static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
void *key)
{
struct blk_mq_hw_ctx *hctx;
hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
- list_del(&wait->task_list);
+ list_del(&wait->entry);
clear_bit_unlock(BLK_MQ_S_TAG_WAITING, &hctx->state);
blk_mq_run_hw_queue(hctx, true);
return 1;
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 17676f4d7fd1..6a9a0f03a67b 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -503,7 +503,7 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
}
static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw,
- wait_queue_t *wait, unsigned long rw)
+ wait_queue_entry_t *wait, unsigned long rw)
{
/*
* inc it here even if disabled, since we'll dec it at completion.
@@ -520,7 +520,7 @@ static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw,
* in line to be woken up, wait for our turn.
*/
if (waitqueue_active(&rqw->wait) &&
- rqw->wait.task_list.next != &wait->task_list)
+ rqw->wait.head.next != &wait->entry)
return false;
return atomic_inc_below(&rqw->inflight, get_limit(rwb, rw));
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index b9faabc75fdb..9bf1484365b2 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -99,7 +99,7 @@ struct kyber_hctx_data {
struct list_head rqs[KYBER_NUM_DOMAINS];
unsigned int cur_domain;
unsigned int batching;
- wait_queue_t domain_wait[KYBER_NUM_DOMAINS];
+ wait_queue_entry_t domain_wait[KYBER_NUM_DOMAINS];
atomic_t wait_index[KYBER_NUM_DOMAINS];
};
@@ -385,7 +385,7 @@ static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
INIT_LIST_HEAD(&khd->rqs[i]);
- INIT_LIST_HEAD(&khd->domain_wait[i].task_list);
+ INIT_LIST_HEAD(&khd->domain_wait[i].entry);
atomic_set(&khd->wait_index[i], 0);
}
@@ -507,12 +507,12 @@ static void kyber_flush_busy_ctxs(struct kyber_hctx_data *khd,
}
}
-static int kyber_domain_wake(wait_queue_t *wait, unsigned mode, int flags,
+static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
void *key)
{
struct blk_mq_hw_ctx *hctx = READ_ONCE(wait->private);
- list_del_init(&wait->task_list);
+ list_del_init(&wait->entry);
blk_mq_run_hw_queue(hctx, true);
return 1;
}
@@ -523,7 +523,7 @@ static int kyber_get_domain_token(struct kyber_queue_data *kqd,
{
unsigned int sched_domain = khd->cur_domain;
struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain];
- wait_queue_t *wait = &khd->domain_wait[sched_domain];
+ wait_queue_entry_t *wait = &khd->domain_wait[sched_domain];
struct sbq_wait_state *ws;
int nr;
@@ -536,7 +536,7 @@ static int kyber_get_domain_token(struct kyber_queue_data *kqd,
* run when one becomes available. Note that this is serialized on
* khd->lock, but we still need to be careful about the waker.
*/
- if (list_empty_careful(&wait->task_list)) {
+ if (list_empty_careful(&wait->entry)) {
init_waitqueue_func_entry(wait, kyber_domain_wake);
wait->private = hctx;
ws = sbq_wait_ptr(domain_tokens,
@@ -734,9 +734,9 @@ static int kyber_##name##_waiting_show(void *data, struct seq_file *m) \
{ \
struct blk_mq_hw_ctx *hctx = data; \
struct kyber_hctx_data *khd = hctx->sched_data; \
- wait_queue_t *wait = &khd->domain_wait[domain]; \
+ wait_queue_entry_t *wait = &khd->domain_wait[domain]; \
\
- seq_printf(m, "%d\n", !list_empty_careful(&wait->task_list)); \
+ seq_printf(m, "%d\n", !list_empty_careful(&wait->entry)); \
return 0; \
}
KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ, read)