summaryrefslogtreecommitdiff
path: root/drivers/md/dm-crypt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r--drivers/md/dm-crypt.c51
1 files changed, 36 insertions, 15 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 4519a7c0098c..713a96237a80 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -22,6 +22,7 @@
#include <linux/backing-dev.h>
#include <linux/atomic.h>
#include <linux/scatterlist.h>
+#include <linux/rbtree.h>
#include <asm/page.h>
#include <asm/unaligned.h>
#include <crypto/hash.h>
@@ -60,7 +61,7 @@ struct dm_crypt_io {
int error;
sector_t sector;
- struct list_head list;
+ struct rb_node rb_node;
} CRYPTO_MINALIGN_ATTR;
struct dm_crypt_request {
@@ -134,7 +135,7 @@ struct crypt_config {
struct task_struct *write_thread;
wait_queue_head_t write_thread_wait;
- struct list_head write_thread_list;
+ struct rb_root write_tree;
char *cipher;
char *cipher_string;
@@ -1169,11 +1170,15 @@ static void kcryptd_io_write(struct dm_crypt_io *io)
generic_make_request(clone);
}
+#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
+
static int dmcrypt_write(void *data)
{
struct crypt_config *cc = data;
+ struct dm_crypt_io *io;
+
while (1) {
- struct list_head local_list;
+ struct rb_root write_tree;
struct blk_plug plug;
DECLARE_WAITQUEUE(wait, current);
@@ -1181,7 +1186,7 @@ static int dmcrypt_write(void *data)
spin_lock_irq(&cc->write_thread_wait.lock);
continue_locked:
- if (!list_empty(&cc->write_thread_list))
+ if (!RB_EMPTY_ROOT(&cc->write_tree))
goto pop_from_list;
__set_current_state(TASK_INTERRUPTIBLE);
@@ -1203,20 +1208,22 @@ continue_locked:
goto continue_locked;
pop_from_list:
- local_list = cc->write_thread_list;
- local_list.next->prev = &local_list;
- local_list.prev->next = &local_list;
- INIT_LIST_HEAD(&cc->write_thread_list);
-
+ write_tree = cc->write_tree;
+ cc->write_tree = RB_ROOT;
spin_unlock_irq(&cc->write_thread_wait.lock);
+ BUG_ON(rb_parent(write_tree.rb_node));
+
+ /*
+ * Note: we cannot walk the tree here with rb_next because
+ * the structures may be freed when kcryptd_io_write is called.
+ */
blk_start_plug(&plug);
do {
- struct dm_crypt_io *io = container_of(local_list.next,
- struct dm_crypt_io, list);
- list_del(&io->list);
+ io = crypt_io_from_node(rb_first(&write_tree));
+ rb_erase(&io->rb_node, &write_tree);
kcryptd_io_write(io);
- } while (!list_empty(&local_list));
+ } while (!RB_EMPTY_ROOT(&write_tree));
blk_finish_plug(&plug);
}
return 0;
@@ -1227,6 +1234,8 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
struct bio *clone = io->ctx.bio_out;
struct crypt_config *cc = io->cc;
unsigned long flags;
+ sector_t sector;
+ struct rb_node **rbp, *parent;
if (unlikely(io->error < 0)) {
crypt_free_buffer_pages(cc, clone);
@@ -1246,7 +1255,19 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
}
spin_lock_irqsave(&cc->write_thread_wait.lock, flags);
- list_add_tail(&io->list, &cc->write_thread_list);
+ rbp = &cc->write_tree.rb_node;
+ parent = NULL;
+ sector = io->sector;
+ while (*rbp) {
+ parent = *rbp;
+ if (sector < crypt_io_from_node(parent)->sector)
+ rbp = &(*rbp)->rb_left;
+ else
+ rbp = &(*rbp)->rb_right;
+ }
+ rb_link_node(&io->rb_node, parent, rbp);
+ rb_insert_color(&io->rb_node, &cc->write_tree);
+
wake_up_locked(&cc->write_thread_wait);
spin_unlock_irqrestore(&cc->write_thread_wait.lock, flags);
}
@@ -1836,7 +1857,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
init_waitqueue_head(&cc->write_thread_wait);
- INIT_LIST_HEAD(&cc->write_thread_list);
+ cc->write_tree = RB_ROOT;
cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write");
if (IS_ERR(cc->write_thread)) {