diff options
author | Andreas Gruenbacher <agruen@linbit.com> | 2011-05-30 18:15:21 +0400 |
---|---|---|
committer | Philipp Reisner <philipp.reisner@linbit.com> | 2014-02-17 19:36:44 +0400 |
commit | 547616979372b65646d691e8dab90e850be582fe (patch) | |
tree | e64d5fd706f5ecf30bcf1bdba5fb678978903f9c /drivers/block/drbd/drbd_main.c | |
parent | a3603a6e3b4f2f0fb5529821134424e2eeec88fd (diff) | |
download | linux-547616979372b65646d691e8dab90e850be582fe.tar.xz |
drbd: Rename struct drbd_conf -> struct drbd_device
sed -i -e 's:\<drbd_conf\>:drbd_device:g'
Signed-off-by: Andreas Gruenbacher <agruen@linbit.com>
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
Diffstat (limited to 'drivers/block/drbd/drbd_main.c')
-rw-r--r-- | drivers/block/drbd/drbd_main.c | 158 |
1 files changed, 79 insertions, 79 deletions
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 2a8e703bd66f..fd7312928073 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -165,7 +165,7 @@ struct bio *bio_alloc_drbd(gfp_t gfp_mask) /* When checking with sparse, and this is an inline function, sparse will give tons of false positives. When this is a real functions sparse works. */ -int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins) +int _get_ldev_if_state(struct drbd_device *mdev, enum drbd_disk_state mins) { int io_allowed; @@ -306,7 +306,7 @@ void tl_clear(struct drbd_tconn *tconn) * tl_abort_disk_io() - Abort disk I/O for all requests for a certain mdev in the TL * @mdev: DRBD device. */ -void tl_abort_disk_io(struct drbd_conf *mdev) +void tl_abort_disk_io(struct drbd_device *mdev) { struct drbd_tconn *tconn = mdev->tconn; struct drbd_request *req, *r; @@ -495,7 +495,7 @@ char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *tas int conn_lowest_minor(struct drbd_tconn *tconn) { - struct drbd_conf *mdev; + struct drbd_device *mdev; int vnr = 0, m; rcu_read_lock(); @@ -631,7 +631,7 @@ void *conn_prepare_command(struct drbd_tconn *tconn, struct drbd_socket *sock) return p; } -void *drbd_prepare_command(struct drbd_conf *mdev, struct drbd_socket *sock) +void *drbd_prepare_command(struct drbd_device *mdev, struct drbd_socket *sock) { return conn_prepare_command(mdev->tconn, sock); } @@ -680,7 +680,7 @@ int conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock, return err; } -int drbd_send_command(struct drbd_conf *mdev, struct drbd_socket *sock, +int drbd_send_command(struct drbd_device *mdev, struct drbd_socket *sock, enum drbd_packet cmd, unsigned int header_size, void *data, unsigned int size) { @@ -712,7 +712,7 @@ int drbd_send_ping_ack(struct drbd_tconn *tconn) return conn_send_command(tconn, sock, P_PING_ACK, 0, NULL, 0); } -int drbd_send_sync_param(struct drbd_conf *mdev) +int drbd_send_sync_param(struct drbd_device *mdev) { struct drbd_socket *sock; struct p_rs_param_95 *p; @@ -822,7 +822,7 @@ int drbd_send_protocol(struct drbd_tconn *tconn) return err; } -static int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags) +static int _drbd_send_uuids(struct drbd_device *mdev, u64 uuid_flags) { struct drbd_socket *sock; struct p_uuids *p; @@ -855,17 +855,17 @@ static int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags) return drbd_send_command(mdev, sock, P_UUIDS, sizeof(*p), NULL, 0); } -int drbd_send_uuids(struct drbd_conf *mdev) +int drbd_send_uuids(struct drbd_device *mdev) { return _drbd_send_uuids(mdev, 0); } -int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev) +int drbd_send_uuids_skip_initial_sync(struct drbd_device *mdev) { return _drbd_send_uuids(mdev, 8); } -void drbd_print_uuids(struct drbd_conf *mdev, const char *text) +void drbd_print_uuids(struct drbd_device *mdev, const char *text) { if (get_ldev_if_state(mdev, D_NEGOTIATING)) { u64 *uuid = mdev->ldev->md.uuid; @@ -883,7 +883,7 @@ void drbd_print_uuids(struct drbd_conf *mdev, const char *text) } } -void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev) +void drbd_gen_and_send_sync_uuid(struct drbd_device *mdev) { struct drbd_socket *sock; struct p_rs_uuid *p; @@ -908,7 +908,7 @@ void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev) } } -int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags) +int drbd_send_sizes(struct drbd_device *mdev, int trigger_reply, enum dds_flags flags) { struct drbd_socket *sock; struct p_sizes *p; @@ -956,7 +956,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl * drbd_send_current_state() - Sends the drbd state to the peer * @mdev: DRBD device. */ -int drbd_send_current_state(struct drbd_conf *mdev) +int drbd_send_current_state(struct drbd_device *mdev) { struct drbd_socket *sock; struct p_state *p; @@ -979,7 +979,7 @@ int drbd_send_current_state(struct drbd_conf *mdev) * between queuing and processing of the after_state_ch work, we still * want to send each intermediary state in the order it occurred. */ -int drbd_send_state(struct drbd_conf *mdev, union drbd_state state) +int drbd_send_state(struct drbd_device *mdev, union drbd_state state) { struct drbd_socket *sock; struct p_state *p; @@ -992,7 +992,7 @@ int drbd_send_state(struct drbd_conf *mdev, union drbd_state state) return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0); } -int drbd_send_state_req(struct drbd_conf *mdev, union drbd_state mask, union drbd_state val) +int drbd_send_state_req(struct drbd_device *mdev, union drbd_state mask, union drbd_state val) { struct drbd_socket *sock; struct p_req_state *p; @@ -1022,7 +1022,7 @@ int conn_send_state_req(struct drbd_tconn *tconn, union drbd_state mask, union d return conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0); } -void drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode) +void drbd_send_sr_reply(struct drbd_device *mdev, enum drbd_state_rv retcode) { struct drbd_socket *sock; struct p_req_state_reply *p; @@ -1066,7 +1066,7 @@ static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n) p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4); } -static int fill_bitmap_rle_bits(struct drbd_conf *mdev, +static int fill_bitmap_rle_bits(struct drbd_device *mdev, struct p_compressed_bm *p, unsigned int size, struct bm_xfer_ctx *c) @@ -1170,7 +1170,7 @@ static int fill_bitmap_rle_bits(struct drbd_conf *mdev, * code upon failure. */ static int -send_bitmap_rle_or_plain(struct drbd_conf *mdev, struct bm_xfer_ctx *c) +send_bitmap_rle_or_plain(struct drbd_device *mdev, struct bm_xfer_ctx *c) { struct drbd_socket *sock = &mdev->tconn->data; unsigned int header_size = drbd_header_size(mdev->tconn); @@ -1226,7 +1226,7 @@ send_bitmap_rle_or_plain(struct drbd_conf *mdev, struct bm_xfer_ctx *c) } /* See the comment at receive_bitmap() */ -static int _drbd_send_bitmap(struct drbd_conf *mdev) +static int _drbd_send_bitmap(struct drbd_device *mdev) { struct bm_xfer_ctx c; int err; @@ -1263,7 +1263,7 @@ static int _drbd_send_bitmap(struct drbd_conf *mdev) return err == 0; } -int drbd_send_bitmap(struct drbd_conf *mdev) +int drbd_send_bitmap(struct drbd_device *mdev) { struct drbd_socket *sock = &mdev->tconn->data; int err = -1; @@ -1300,7 +1300,7 @@ void drbd_send_b_ack(struct drbd_tconn *tconn, u32 barrier_nr, u32 set_size) * @blksize: size in byte, needs to be in big endian byte order * @block_id: Id, big endian byte order */ -static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd, +static int _drbd_send_ack(struct drbd_device *mdev, enum drbd_packet cmd, u64 sector, u32 blksize, u64 block_id) { struct drbd_socket *sock; @@ -1323,7 +1323,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd, /* dp->sector and dp->block_id already/still in network byte order, * data_size is payload size according to dp->head, * and may need to be corrected for digest size. */ -void drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd, +void drbd_send_ack_dp(struct drbd_device *mdev, enum drbd_packet cmd, struct p_data *dp, int data_size) { if (mdev->tconn->peer_integrity_tfm) @@ -1332,7 +1332,7 @@ void drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd, dp->block_id); } -void drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd, +void drbd_send_ack_rp(struct drbd_device *mdev, enum drbd_packet cmd, struct p_block_req *rp) { _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id); @@ -1344,7 +1344,7 @@ void drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd, * @cmd: packet command code * @peer_req: peer request */ -int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd, +int drbd_send_ack(struct drbd_device *mdev, enum drbd_packet cmd, struct drbd_peer_request *peer_req) { return _drbd_send_ack(mdev, cmd, @@ -1355,7 +1355,7 @@ int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd, /* This function misuses the block_id field to signal if the blocks * are is sync or not. */ -int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd, +int drbd_send_ack_ex(struct drbd_device *mdev, enum drbd_packet cmd, sector_t sector, int blksize, u64 block_id) { return _drbd_send_ack(mdev, cmd, @@ -1364,7 +1364,7 @@ int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd, cpu_to_be64(block_id)); } -int drbd_send_drequest(struct drbd_conf *mdev, int cmd, +int drbd_send_drequest(struct drbd_device *mdev, int cmd, sector_t sector, int size, u64 block_id) { struct drbd_socket *sock; @@ -1380,7 +1380,7 @@ int drbd_send_drequest(struct drbd_conf *mdev, int cmd, return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0); } -int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector, int size, +int drbd_send_drequest_csum(struct drbd_device *mdev, sector_t sector, int size, void *digest, int digest_size, enum drbd_packet cmd) { struct drbd_socket *sock; @@ -1399,7 +1399,7 @@ int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector, int size, digest, digest_size); } -int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size) +int drbd_send_ov_request(struct drbd_device *mdev, sector_t sector, int size) { struct drbd_socket *sock; struct p_block_req *p; @@ -1469,7 +1469,7 @@ static void drbd_update_congested(struct drbd_tconn *tconn) * As a workaround, we disable sendpage on pages * with page_count == 0 or PageSlab. */ -static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page, +static int _drbd_no_send_page(struct drbd_device *mdev, struct page *page, int offset, size_t size, unsigned msg_flags) { struct socket *socket; @@ -1485,7 +1485,7 @@ static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page, return err; } -static int _drbd_send_page(struct drbd_conf *mdev, struct page *page, +static int _drbd_send_page(struct drbd_device *mdev, struct page *page, int offset, size_t size, unsigned msg_flags) { struct socket *socket = mdev->tconn->data.socket; @@ -1534,7 +1534,7 @@ static int _drbd_send_page(struct drbd_conf *mdev, struct page *page, return err; } -static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio) +static int _drbd_send_bio(struct drbd_device *mdev, struct bio *bio) { struct bio_vec bvec; struct bvec_iter iter; @@ -1553,7 +1553,7 @@ static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio) return 0; } -static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio) +static int _drbd_send_zc_bio(struct drbd_device *mdev, struct bio *bio) { struct bio_vec bvec; struct bvec_iter iter; @@ -1571,7 +1571,7 @@ static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio) return 0; } -static int _drbd_send_zc_ee(struct drbd_conf *mdev, +static int _drbd_send_zc_ee(struct drbd_device *mdev, struct drbd_peer_request *peer_req) { struct page *page = peer_req->pages; @@ -1591,7 +1591,7 @@ static int _drbd_send_zc_ee(struct drbd_conf *mdev, return 0; } -static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw) +static u32 bio_flags_to_wire(struct drbd_device *mdev, unsigned long bi_rw) { if (mdev->tconn->agreed_pro_version >= 95) return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) | @@ -1605,7 +1605,7 @@ static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw) /* Used to send write requests * R_PRIMARY -> Peer (P_DATA) */ -int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req) +int drbd_send_dblock(struct drbd_device *mdev, struct drbd_request *req) { struct drbd_socket *sock; struct p_data *p; @@ -1677,7 +1677,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req) * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY) * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY) */ -int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd, +int drbd_send_block(struct drbd_device *mdev, enum drbd_packet cmd, struct drbd_peer_request *peer_req) { struct drbd_socket *sock; @@ -1706,7 +1706,7 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd, return err; } -int drbd_send_out_of_sync(struct drbd_conf *mdev, struct drbd_request *req) +int drbd_send_out_of_sync(struct drbd_device *mdev, struct drbd_request *req) { struct drbd_socket *sock; struct p_block_desc *p; @@ -1827,7 +1827,7 @@ int drbd_send_all(struct drbd_tconn *tconn, struct socket *sock, void *buffer, static int drbd_open(struct block_device *bdev, fmode_t mode) { - struct drbd_conf *mdev = bdev->bd_disk->private_data; + struct drbd_device *mdev = bdev->bd_disk->private_data; unsigned long flags; int rv = 0; @@ -1853,13 +1853,13 @@ static int drbd_open(struct block_device *bdev, fmode_t mode) static void drbd_release(struct gendisk *gd, fmode_t mode) { - struct drbd_conf *mdev = gd->private_data; + struct drbd_device *mdev = gd->private_data; mutex_lock(&drbd_main_mutex); mdev->open_cnt--; mutex_unlock(&drbd_main_mutex); } -static void drbd_set_defaults(struct drbd_conf *mdev) +static void drbd_set_defaults(struct drbd_device *mdev) { /* Beware! The actual layout differs * between big endian and little endian */ @@ -1872,7 +1872,7 @@ static void drbd_set_defaults(struct drbd_conf *mdev) } }; } -void drbd_init_set_defaults(struct drbd_conf *mdev) +void drbd_init_set_defaults(struct drbd_device *mdev) { /* the memset(,0,) did most of this. * note: only assignments, no allocation in here */ @@ -1947,7 +1947,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev) mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE; } -void drbd_mdev_cleanup(struct drbd_conf *mdev) +void drbd_mdev_cleanup(struct drbd_device *mdev) { int i; if (mdev->tconn->receiver.t_state != NONE) @@ -2130,7 +2130,7 @@ static struct notifier_block drbd_notifier = { .notifier_call = drbd_notify_sys, }; -static void drbd_release_all_peer_reqs(struct drbd_conf *mdev) +static void drbd_release_all_peer_reqs(struct drbd_device *mdev) { int rr; @@ -2158,7 +2158,7 @@ static void drbd_release_all_peer_reqs(struct drbd_conf *mdev) /* caution. no locking. */ void drbd_minor_destroy(struct kref *kref) { - struct drbd_conf *mdev = container_of(kref, struct drbd_conf, kref); + struct drbd_device *mdev = container_of(kref, struct drbd_device, kref); struct drbd_tconn *tconn = mdev->tconn; del_timer_sync(&mdev->request_timer); @@ -2217,7 +2217,7 @@ static void do_retry(struct work_struct *ws) spin_unlock_irq(&retry->lock); list_for_each_entry_safe(req, tmp, &writes, tl_requests) { - struct drbd_conf *mdev = req->w.mdev; + struct drbd_device *mdev = req->w.mdev; struct bio *bio = req->master_bio; unsigned long start_time = req->start_time; bool expected; @@ -2277,7 +2277,7 @@ void drbd_restart_request(struct drbd_request *req) static void drbd_cleanup(void) { unsigned int i; - struct drbd_conf *mdev; + struct drbd_device *mdev; struct drbd_tconn *tconn, *tmp; unregister_reboot_notifier(&drbd_notifier); @@ -2331,7 +2331,7 @@ static void drbd_cleanup(void) */ static int drbd_congested(void *congested_data, int bdi_bits) { - struct drbd_conf *mdev = congested_data; + struct drbd_device *mdev = congested_data; struct request_queue *q; char reason = '-'; int r = 0; @@ -2591,7 +2591,7 @@ void conn_destroy(struct kref *kref) kfree(tconn); } -static int init_submitter(struct drbd_conf *mdev) +static int init_submitter(struct drbd_device *mdev) { /* opencoded create_singlethread_workqueue(), * to be able to say "drbd%d", ..., minor */ @@ -2608,7 +2608,7 @@ static int init_submitter(struct drbd_conf *mdev) enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr) { - struct drbd_conf *mdev; + struct drbd_device *mdev; struct gendisk *disk; struct request_queue *q; int vnr_got = vnr; @@ -2620,7 +2620,7 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, return ERR_MINOR_EXISTS; /* GFP_KERNEL, we are outside of all write-out paths */ - mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL); + mdev = kzalloc(sizeof(struct drbd_device), GFP_KERNEL); if (!mdev) return ERR_NOMEM; @@ -2843,7 +2843,7 @@ void drbd_free_sock(struct drbd_tconn *tconn) void conn_md_sync(struct drbd_tconn *tconn) { - struct drbd_conf *mdev; + struct drbd_device *mdev; int vnr; rcu_read_lock(); @@ -2882,7 +2882,7 @@ struct meta_data_on_disk { -void drbd_md_write(struct drbd_conf *mdev, void *b) +void drbd_md_write(struct drbd_device *mdev, void *b) { struct meta_data_on_disk *buffer = b; sector_t sector; @@ -2922,7 +2922,7 @@ void drbd_md_write(struct drbd_conf *mdev, void *b) * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set * @mdev: DRBD device. */ -void drbd_md_sync(struct drbd_conf *mdev) +void drbd_md_sync(struct drbd_device *mdev) { struct meta_data_on_disk *buffer; @@ -2955,7 +2955,7 @@ out: put_ldev(mdev); } -static int check_activity_log_stripe_size(struct drbd_conf *mdev, +static int check_activity_log_stripe_size(struct drbd_device *mdev, struct meta_data_on_disk *on_disk, struct drbd_md *in_core) { @@ -3000,7 +3000,7 @@ err: return -EINVAL; } -static int check_offsets_and_sizes(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) +static int check_offsets_and_sizes(struct drbd_device *mdev, struct drbd_backing_dev *bdev) { sector_t capacity = drbd_get_capacity(bdev->md_bdev); struct drbd_md *in_core = &bdev->md; @@ -3091,7 +3091,7 @@ err: * Called exactly once during drbd_adm_attach(), while still being D_DISKLESS, * even before @bdev is assigned to @mdev->ldev. */ -int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) +int drbd_md_read(struct drbd_device *mdev, struct drbd_backing_dev *bdev) { struct meta_data_on_disk *buffer; u32 magic, flags; @@ -3196,7 +3196,7 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) * timer that ensures that within five seconds you have to call drbd_md_sync(). */ #ifdef DEBUG -void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func) +void drbd_md_mark_dirty_(struct drbd_device *mdev, unsigned int line, const char *func) { if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) { mod_timer(&mdev->md_sync_timer, jiffies + HZ); @@ -3205,14 +3205,14 @@ void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char * } } #else -void drbd_md_mark_dirty(struct drbd_conf *mdev) +void drbd_md_mark_dirty(struct drbd_device *mdev) { if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ); } #endif -void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local) +void drbd_uuid_move_history(struct drbd_device *mdev) __must_hold(local) { int i; @@ -3220,7 +3220,7 @@ void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local) mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i]; } -void __drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) +void __drbd_uuid_set(struct drbd_device *mdev, int idx, u64 val) __must_hold(local) { if (idx == UI_CURRENT) { if (mdev->state.role == R_PRIMARY) @@ -3235,7 +3235,7 @@ void __drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local drbd_md_mark_dirty(mdev); } -void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) +void _drbd_uuid_set(struct drbd_device *mdev, int idx, u64 val) __must_hold(local) { unsigned long flags; spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags); @@ -3243,7 +3243,7 @@ void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags); } -void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) +void drbd_uuid_set(struct drbd_device *mdev, int idx, u64 val) __must_hold(local) { unsigned long flags; spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags); @@ -3262,7 +3262,7 @@ void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) * Creates a new current UUID, and rotates the old current UUID into * the bitmap slot. Causes an incremental resync upon next connect. */ -void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local) +void drbd_uuid_new_current(struct drbd_device *mdev) __must_hold(local) { u64 val; unsigned long long bm_uuid; @@ -3284,7 +3284,7 @@ void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local) drbd_md_sync(mdev); } -void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local) +void drbd_uuid_set_bm(struct drbd_device *mdev, u64 val) __must_hold(local) { unsigned long flags; if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0) @@ -3313,7 +3313,7 @@ void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local) * * Sets all bits in the bitmap and writes the whole bitmap to stable storage. */ -int drbd_bmio_set_n_write(struct drbd_conf *mdev) +int drbd_bmio_set_n_write(struct drbd_device *mdev) { int rv = -EIO; @@ -3341,7 +3341,7 @@ int drbd_bmio_set_n_write(struct drbd_conf *mdev) * * Clears all bits in the bitmap and writes the whole bitmap to stable storage. */ -int drbd_bmio_clear_n_write(struct drbd_conf *mdev) +int drbd_bmio_clear_n_write(struct drbd_device *mdev) { int rv = -EIO; @@ -3358,7 +3358,7 @@ int drbd_bmio_clear_n_write(struct drbd_conf *mdev) static int w_bitmap_io(struct drbd_work *w, int unused) { struct bm_io_work *work = container_of(w, struct bm_io_work, w); - struct drbd_conf *mdev = w->mdev; + struct drbd_device *mdev = w->mdev; int rv = -EIO; D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0); @@ -3383,7 +3383,7 @@ static int w_bitmap_io(struct drbd_work *w, int unused) return 0; } -void drbd_ldev_destroy(struct drbd_conf *mdev) +void drbd_ldev_destroy(struct drbd_device *mdev) { lc_destroy(mdev->resync); mdev->resync = NULL; @@ -3398,7 +3398,7 @@ void drbd_ldev_destroy(struct drbd_conf *mdev) static int w_go_diskless(struct drbd_work *w, int unused) { - struct drbd_conf *mdev = w->mdev; + struct drbd_device *mdev = w->mdev; D_ASSERT(mdev->state.disk == D_FAILED); /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will @@ -3449,9 +3449,9 @@ static int w_go_diskless(struct drbd_work *w, int unused) * called from worker context. It MUST NOT be used while a previous such * work is still pending! */ -void drbd_queue_bitmap_io(struct drbd_conf *mdev, - int (*io_fn)(struct drbd_conf *), - void (*done)(struct drbd_conf *, int), +void drbd_queue_bitmap_io(struct drbd_device *mdev, + int (*io_fn)(struct drbd_device *), + void (*done)(struct drbd_device *, int), char *why, enum bm_flag flags) { D_ASSERT(current == mdev->tconn->worker.task); @@ -3486,7 +3486,7 @@ void drbd_queue_bitmap_io(struct drbd_conf *mdev, * freezes application IO while that the actual IO operations runs. This * functions MAY NOT be called from worker context. */ -int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), +int drbd_bitmap_io(struct drbd_device *mdev, int (*io_fn)(struct drbd_device *), char *why, enum bm_flag flags) { int rv; @@ -3506,7 +3506,7 @@ int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), return rv; } -void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local) +void drbd_md_set_flag(struct drbd_device *mdev, int flag) __must_hold(local) { if ((mdev->ldev->md.flags & flag) != flag) { drbd_md_mark_dirty(mdev); @@ -3514,7 +3514,7 @@ void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local) } } -void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local) +void drbd_md_clear_flag(struct drbd_device *mdev, int flag) __must_hold(local) { if ((mdev->ldev->md.flags & flag) != 0) { drbd_md_mark_dirty(mdev); @@ -3528,7 +3528,7 @@ int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag) static void md_sync_timer_fn(unsigned long data) { - struct drbd_conf *mdev = (struct drbd_conf *) data; + struct drbd_device *mdev = (struct drbd_device *) data; /* must not double-queue! */ if (list_empty(&mdev->md_sync_work.list)) @@ -3537,7 +3537,7 @@ static void md_sync_timer_fn(unsigned long data) static int w_md_sync(struct drbd_work *w, int unused) { - struct drbd_conf *mdev = w->mdev; + struct drbd_device *mdev = w->mdev; dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n"); #ifdef DEBUG @@ -3624,7 +3624,7 @@ const char *cmdname(enum drbd_packet cmd) * @i: the struct drbd_interval embedded in struct drbd_request or * struct drbd_peer_request */ -int drbd_wait_misc(struct drbd_conf *mdev, struct drbd_interval *i) +int drbd_wait_misc(struct drbd_device *mdev, struct drbd_interval *i) { struct net_conf *nc; DEFINE_WAIT(wait); @@ -3702,7 +3702,7 @@ _drbd_fault_str(unsigned int type) { } unsigned int -_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) +_drbd_insert_fault(struct drbd_device *mdev, unsigned int type) { static struct fault_random_state rrs = {0, 0}; |