diff options
author | Andreas Gruenbacher <agruen@linbit.com> | 2011-07-07 16:19:42 +0400 |
---|---|---|
committer | Philipp Reisner <philipp.reisner@linbit.com> | 2014-02-17 19:46:46 +0400 |
commit | 0500813fe0c9a617ace86d91344e36839050dad6 (patch) | |
tree | e866ddce790b671cea8dd2034a3de6f08d50f1ff /drivers/block/drbd/drbd_main.c | |
parent | 3ab706fe52a5cc12b021d7861943581db766a171 (diff) | |
download | linux-0500813fe0c9a617ace86d91344e36839050dad6.tar.xz |
drbd: Move conf_mutex from connection to resource
Signed-off-by: Andreas Gruenbacher <agruen@linbit.com>
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
Diffstat (limited to 'drivers/block/drbd/drbd_main.c')
-rw-r--r-- | drivers/block/drbd/drbd_main.c | 36 |
1 files changed, 18 insertions, 18 deletions
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 54df98fa2881..fc439605aa69 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -198,7 +198,7 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr, int expect_epoch = 0; int expect_size = 0; - spin_lock_irq(&connection->req_lock); + spin_lock_irq(&connection->resource->req_lock); /* find oldest not yet barrier-acked write request, * count writes in its epoch. */ @@ -255,12 +255,12 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr, break; _req_mod(req, BARRIER_ACKED); } - spin_unlock_irq(&connection->req_lock); + spin_unlock_irq(&connection->resource->req_lock); return; bail: - spin_unlock_irq(&connection->req_lock); + spin_unlock_irq(&connection->resource->req_lock); conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD); } @@ -284,9 +284,9 @@ void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what) void tl_restart(struct drbd_connection *connection, enum drbd_req_event what) { - spin_lock_irq(&connection->req_lock); + spin_lock_irq(&connection->resource->req_lock); _tl_restart(connection, what); - spin_unlock_irq(&connection->req_lock); + spin_unlock_irq(&connection->resource->req_lock); } /** @@ -311,7 +311,7 @@ void tl_abort_disk_io(struct drbd_device *device) struct drbd_connection *connection = first_peer_device(device)->connection; struct drbd_request *req, *r; - spin_lock_irq(&connection->req_lock); + spin_lock_irq(&connection->resource->req_lock); list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) { if (!(req->rq_state & RQ_LOCAL_PENDING)) continue; @@ -319,7 +319,7 @@ void tl_abort_disk_io(struct drbd_device *device) continue; _req_mod(req, ABORT_DISK_IO); } - spin_unlock_irq(&connection->req_lock); + spin_unlock_irq(&connection->resource->req_lock); } static int drbd_thread_setup(void *arg) @@ -1836,7 +1836,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode) int rv = 0; mutex_lock(&drbd_main_mutex); - spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags); + spin_lock_irqsave(&device->resource->req_lock, flags); /* to have a stable device->state.role * and no race with updating open_cnt */ @@ -1849,7 +1849,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode) if (!rv) device->open_cnt++; - spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags); + spin_unlock_irqrestore(&device->resource->req_lock, flags); mutex_unlock(&drbd_main_mutex); return rv; @@ -2546,6 +2546,8 @@ struct drbd_resource *drbd_create_resource(const char *name) idr_init(&resource->devices); INIT_LIST_HEAD(&resource->connections); list_add_tail_rcu(&resource->resources, &drbd_resources); + mutex_init(&resource->conf_update); + spin_lock_init(&resource->req_lock); return resource; } @@ -2588,8 +2590,6 @@ struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts) connection->cstate = C_STANDALONE; mutex_init(&connection->cstate_mutex); - spin_lock_init(&connection->req_lock); - mutex_init(&connection->conf_update); init_waitqueue_head(&connection->ping_wait); idr_init(&connection->peer_devices); @@ -2720,7 +2720,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_resource *resource, unsigned i blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8); blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); blk_queue_merge_bvec(q, drbd_merge_bvec); - q->queue_lock = &connection->req_lock; + q->queue_lock = &resource->req_lock; device->md_io_page = alloc_page(GFP_KERNEL); if (!device->md_io_page) @@ -3281,14 +3281,14 @@ int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev) rv = NO_ERROR; - spin_lock_irq(&first_peer_device(device)->connection->req_lock); + spin_lock_irq(&device->resource->req_lock); if (device->state.conn < C_CONNECTED) { unsigned int peer; peer = be32_to_cpu(buffer->la_peer_max_bio_size); peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE); device->peer_max_bio_size = peer; } - spin_unlock_irq(&first_peer_device(device)->connection->req_lock); + spin_unlock_irq(&device->resource->req_lock); err: drbd_md_put_buffer(device); @@ -3577,13 +3577,13 @@ void drbd_queue_bitmap_io(struct drbd_device *device, device->bm_io_work.why = why; device->bm_io_work.flags = flags; - spin_lock_irq(&first_peer_device(device)->connection->req_lock); + spin_lock_irq(&device->resource->req_lock); set_bit(BITMAP_IO, &device->flags); if (atomic_read(&device->ap_bio_cnt) == 0) { if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags)) drbd_queue_work(&first_peer_device(device)->connection->sender_work, &device->bm_io_work.w); } - spin_unlock_irq(&first_peer_device(device)->connection->req_lock); + spin_unlock_irq(&device->resource->req_lock); } /** @@ -3751,10 +3751,10 @@ int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i) /* Indicate to wake up device->misc_wait on progress. */ i->waiting = true; prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE); - spin_unlock_irq(&first_peer_device(device)->connection->req_lock); + spin_unlock_irq(&device->resource->req_lock); timeout = schedule_timeout(timeout); finish_wait(&device->misc_wait, &wait); - spin_lock_irq(&first_peer_device(device)->connection->req_lock); + spin_lock_irq(&device->resource->req_lock); if (!timeout || device->state.conn < C_CONNECTED) return -ETIMEDOUT; if (signal_pending(current)) |