diff options
author | David Howells <dhowells@redhat.com> | 2006-11-22 17:57:56 +0300 |
---|---|---|
committer | David Howells <dhowells@redhat.com> | 2006-11-22 17:57:56 +0300 |
commit | c4028958b6ecad064b1a6303a6a5906d4fe48d73 (patch) | |
tree | 1c4c89652c62a75da09f9b9442012007e4ac6250 /fs | |
parent | 65f27f38446e1976cc98fd3004b110fedcddd189 (diff) | |
download | linux-c4028958b6ecad064b1a6303a6a5906d4fe48d73.tar.xz |
WorkStruct: make allyesconfig
Fix up for make allyesconfig.
Signed-Off-By: David Howells <dhowells@redhat.com>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/9p/mux.c | 16 | ||||
-rw-r--r-- | fs/gfs2/glock.c | 8 | ||||
-rw-r--r-- | fs/ncpfs/inode.c | 8 | ||||
-rw-r--r-- | fs/ncpfs/sock.c | 20 | ||||
-rw-r--r-- | fs/nfsd/nfs4state.c | 7 | ||||
-rw-r--r-- | fs/ocfs2/alloc.c | 9 | ||||
-rw-r--r-- | fs/ocfs2/cluster/heartbeat.c | 10 | ||||
-rw-r--r-- | fs/ocfs2/cluster/quorum.c | 4 | ||||
-rw-r--r-- | fs/ocfs2/cluster/tcp.c | 78 | ||||
-rw-r--r-- | fs/ocfs2/cluster/tcp_internal.h | 8 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmcommon.h | 2 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmdomain.c | 2 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmrecovery.c | 5 | ||||
-rw-r--r-- | fs/ocfs2/dlm/userdlm.c | 10 | ||||
-rw-r--r-- | fs/ocfs2/journal.c | 7 | ||||
-rw-r--r-- | fs/ocfs2/journal.h | 2 | ||||
-rw-r--r-- | fs/ocfs2/ocfs2.h | 2 | ||||
-rw-r--r-- | fs/ocfs2/super.c | 2 | ||||
-rw-r--r-- | fs/reiserfs/journal.c | 12 | ||||
-rw-r--r-- | fs/xfs/linux-2.6/xfs_aops.c | 21 | ||||
-rw-r--r-- | fs/xfs/linux-2.6/xfs_buf.c | 9 |
21 files changed, 134 insertions, 108 deletions
diff --git a/fs/9p/mux.c b/fs/9p/mux.c index 90a79c784549..944273c3dbff 100644 --- a/fs/9p/mux.c +++ b/fs/9p/mux.c @@ -110,8 +110,8 @@ struct v9fs_mux_rpc { }; static int v9fs_poll_proc(void *); -static void v9fs_read_work(void *); -static void v9fs_write_work(void *); +static void v9fs_read_work(struct work_struct *work); +static void v9fs_write_work(struct work_struct *work); static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address, poll_table * p); static u16 v9fs_mux_get_tag(struct v9fs_mux_data *); @@ -297,8 +297,8 @@ struct v9fs_mux_data *v9fs_mux_init(struct v9fs_transport *trans, int msize, m->rbuf = NULL; m->wpos = m->wsize = 0; m->wbuf = NULL; - INIT_WORK(&m->rq, v9fs_read_work, m); - INIT_WORK(&m->wq, v9fs_write_work, m); + INIT_WORK(&m->rq, v9fs_read_work); + INIT_WORK(&m->wq, v9fs_write_work); m->wsched = 0; memset(&m->poll_waddr, 0, sizeof(m->poll_waddr)); m->poll_task = NULL; @@ -458,13 +458,13 @@ static int v9fs_poll_proc(void *a) /** * v9fs_write_work - called when a transport can send some data */ -static void v9fs_write_work(void *a) +static void v9fs_write_work(struct work_struct *work) { int n, err; struct v9fs_mux_data *m; struct v9fs_req *req; - m = a; + m = container_of(work, struct v9fs_mux_data, wq); if (m->err < 0) { clear_bit(Wworksched, &m->wsched); @@ -564,7 +564,7 @@ static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req) /** * v9fs_read_work - called when there is some data to be read from a transport */ -static void v9fs_read_work(void *a) +static void v9fs_read_work(struct work_struct *work) { int n, err; struct v9fs_mux_data *m; @@ -572,7 +572,7 @@ static void v9fs_read_work(void *a) struct v9fs_fcall *rcall; char *rbuf; - m = a; + m = container_of(work, struct v9fs_mux_data, rq); if (m->err < 0) return; diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 78fe0fae23ff..55f5333dae99 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -35,7 +35,7 @@ struct greedy { struct gfs2_holder gr_gh; - struct work_struct gr_work; + struct delayed_work gr_work; }; struct gfs2_gl_hash_bucket { @@ -1368,9 +1368,9 @@ static void gfs2_glock_prefetch(struct gfs2_glock *gl, unsigned int state, glops->go_xmote_th(gl, state, flags); } -static void greedy_work(void *data) +static void greedy_work(struct work_struct *work) { - struct greedy *gr = data; + struct greedy *gr = container_of(work, struct greedy, gr_work.work); struct gfs2_holder *gh = &gr->gr_gh; struct gfs2_glock *gl = gh->gh_gl; const struct gfs2_glock_operations *glops = gl->gl_ops; @@ -1422,7 +1422,7 @@ int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time) gfs2_holder_init(gl, 0, 0, gh); set_bit(HIF_GREEDY, &gh->gh_iflags); - INIT_WORK(&gr->gr_work, greedy_work, gr); + INIT_DELAYED_WORK(&gr->gr_work, greedy_work); set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags); schedule_delayed_work(&gr->gr_work, time); diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c index 42e3bef270c9..72dad552aa00 100644 --- a/fs/ncpfs/inode.c +++ b/fs/ncpfs/inode.c @@ -577,12 +577,12 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) server->rcv.ptr = (unsigned char*)&server->rcv.buf; server->rcv.len = 10; server->rcv.state = 0; - INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc, server); - INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc, server); + INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc); + INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc); sock->sk->sk_write_space = ncp_tcp_write_space; } else { - INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc, server); - INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc, server); + INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc); + INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc); server->timeout_tm.data = (unsigned long)server; server->timeout_tm.function = ncpdgram_timeout_call; } diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c index 11c2b252ebed..e496d8b65e92 100644 --- a/fs/ncpfs/sock.c +++ b/fs/ncpfs/sock.c @@ -350,9 +350,10 @@ static void info_server(struct ncp_server *server, unsigned int id, const void * } } -void ncpdgram_rcv_proc(void *s) +void ncpdgram_rcv_proc(struct work_struct *work) { - struct ncp_server *server = s; + struct ncp_server *server = + container_of(work, struct ncp_server, rcv.tq); struct socket* sock; sock = server->ncp_sock; @@ -468,9 +469,10 @@ static void __ncpdgram_timeout_proc(struct ncp_server *server) } } -void ncpdgram_timeout_proc(void *s) +void ncpdgram_timeout_proc(struct work_struct *work) { - struct ncp_server *server = s; + struct ncp_server *server = + container_of(work, struct ncp_server, timeout_tq); mutex_lock(&server->rcv.creq_mutex); __ncpdgram_timeout_proc(server); mutex_unlock(&server->rcv.creq_mutex); @@ -652,18 +654,20 @@ skipdata:; } } -void ncp_tcp_rcv_proc(void *s) +void ncp_tcp_rcv_proc(struct work_struct *work) { - struct ncp_server *server = s; + struct ncp_server *server = + container_of(work, struct ncp_server, rcv.tq); mutex_lock(&server->rcv.creq_mutex); __ncptcp_rcv_proc(server); mutex_unlock(&server->rcv.creq_mutex); } -void ncp_tcp_tx_proc(void *s) +void ncp_tcp_tx_proc(struct work_struct *work) { - struct ncp_server *server = s; + struct ncp_server *server = + container_of(work, struct ncp_server, tx.tq); mutex_lock(&server->rcv.creq_mutex); __ncptcp_try_send(server); diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 293b6495829f..e431e93ab503 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1829,9 +1829,8 @@ out: } static struct workqueue_struct *laundry_wq; -static struct work_struct laundromat_work; -static void laundromat_main(void *); -static DECLARE_WORK(laundromat_work, laundromat_main, NULL); +static void laundromat_main(struct work_struct *); +static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main); __be32 nfsd4_renew(clientid_t *clid) @@ -1940,7 +1939,7 @@ nfs4_laundromat(void) } void -laundromat_main(void *not_used) +laundromat_main(struct work_struct *not_used) { time_t t; diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index f43bc5f18a35..0b2ad163005e 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -1205,10 +1205,12 @@ int ocfs2_flush_truncate_log(struct ocfs2_super *osb) return status; } -static void ocfs2_truncate_log_worker(void *data) +static void ocfs2_truncate_log_worker(struct work_struct *work) { int status; - struct ocfs2_super *osb = data; + struct ocfs2_super *osb = + container_of(work, struct ocfs2_super, + osb_truncate_log_wq.work); mlog_entry_void(); @@ -1441,7 +1443,8 @@ int ocfs2_truncate_log_init(struct ocfs2_super *osb) /* ocfs2_truncate_log_shutdown keys on the existence of * osb->osb_tl_inode so we don't set any of the osb variables * until we're sure all is well. */ - INIT_WORK(&osb->osb_truncate_log_wq, ocfs2_truncate_log_worker, osb); + INIT_DELAYED_WORK(&osb->osb_truncate_log_wq, + ocfs2_truncate_log_worker); osb->osb_tl_bh = tl_bh; osb->osb_tl_inode = tl_inode; diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 305cba3681fe..4cd9a9580456 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -141,7 +141,7 @@ struct o2hb_region { * recognizes a node going up and down in one iteration */ u64 hr_generation; - struct work_struct hr_write_timeout_work; + struct delayed_work hr_write_timeout_work; unsigned long hr_last_timeout_start; /* Used during o2hb_check_slot to hold a copy of the block @@ -156,9 +156,11 @@ struct o2hb_bio_wait_ctxt { int wc_error; }; -static void o2hb_write_timeout(void *arg) +static void o2hb_write_timeout(struct work_struct *work) { - struct o2hb_region *reg = arg; + struct o2hb_region *reg = + container_of(work, struct o2hb_region, + hr_write_timeout_work.work); mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u " "milliseconds\n", reg->hr_dev_name, @@ -1404,7 +1406,7 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg, goto out; } - INIT_WORK(®->hr_write_timeout_work, o2hb_write_timeout, reg); + INIT_DELAYED_WORK(®->hr_write_timeout_work, o2hb_write_timeout); /* * A node is considered live after it has beat LIVE_THRESHOLD diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c index 7bba98fbfc15..4705d659fe57 100644 --- a/fs/ocfs2/cluster/quorum.c +++ b/fs/ocfs2/cluster/quorum.c @@ -88,7 +88,7 @@ void o2quo_disk_timeout(void) o2quo_fence_self(); } -static void o2quo_make_decision(void *arg) +static void o2quo_make_decision(struct work_struct *work) { int quorum; int lowest_hb, lowest_reachable = 0, fence = 0; @@ -306,7 +306,7 @@ void o2quo_init(void) struct o2quo_state *qs = &o2quo_state; spin_lock_init(&qs->qs_lock); - INIT_WORK(&qs->qs_work, o2quo_make_decision, NULL); + INIT_WORK(&qs->qs_work, o2quo_make_decision); } void o2quo_exit(void) diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index b650efa8c8be..9b3209dc0b16 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c @@ -140,11 +140,11 @@ static int o2net_sys_err_translations[O2NET_ERR_MAX] = [O2NET_ERR_DIED] = -EHOSTDOWN,}; /* can't quite avoid *all* internal declarations :/ */ -static void o2net_sc_connect_completed(void *arg); -static void o2net_rx_until_empty(void *arg); -static void o2net_shutdown_sc(void *arg); +static void o2net_sc_connect_completed(struct work_struct *work); +static void o2net_rx_until_empty(struct work_struct *work); +static void o2net_shutdown_sc(struct work_struct *work); static void o2net_listen_data_ready(struct sock *sk, int bytes); -static void o2net_sc_send_keep_req(void *arg); +static void o2net_sc_send_keep_req(struct work_struct *work); static void o2net_idle_timer(unsigned long data); static void o2net_sc_postpone_idle(struct o2net_sock_container *sc); @@ -308,10 +308,10 @@ static struct o2net_sock_container *sc_alloc(struct o2nm_node *node) o2nm_node_get(node); sc->sc_node = node; - INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed, sc); - INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty, sc); - INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc, sc); - INIT_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req, sc); + INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed); + INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty); + INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc); + INIT_DELAYED_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req); init_timer(&sc->sc_idle_timeout); sc->sc_idle_timeout.function = o2net_idle_timer; @@ -342,7 +342,7 @@ static void o2net_sc_queue_work(struct o2net_sock_container *sc, sc_put(sc); } static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc, - struct work_struct *work, + struct delayed_work *work, int delay) { sc_get(sc); @@ -350,7 +350,7 @@ static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc, sc_put(sc); } static void o2net_sc_cancel_delayed_work(struct o2net_sock_container *sc, - struct work_struct *work) + struct delayed_work *work) { if (cancel_delayed_work(work)) sc_put(sc); @@ -564,9 +564,11 @@ static void o2net_ensure_shutdown(struct o2net_node *nn, * ourselves as state_change couldn't get the nn_lock and call set_nn_state * itself. */ -static void o2net_shutdown_sc(void *arg) +static void o2net_shutdown_sc(struct work_struct *work) { - struct o2net_sock_container *sc = arg; + struct o2net_sock_container *sc = + container_of(work, struct o2net_sock_container, + sc_shutdown_work); struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); sclog(sc, "shutting down\n"); @@ -1201,9 +1203,10 @@ out: /* this work func is triggerd by data ready. it reads until it can read no * more. it interprets 0, eof, as fatal. if data_ready hits while we're doing * our work the work struct will be marked and we'll be called again. */ -static void o2net_rx_until_empty(void *arg) +static void o2net_rx_until_empty(struct work_struct *work) { - struct o2net_sock_container *sc = arg; + struct o2net_sock_container *sc = + container_of(work, struct o2net_sock_container, sc_rx_work); int ret; do { @@ -1249,9 +1252,11 @@ static int o2net_set_nodelay(struct socket *sock) /* called when a connect completes and after a sock is accepted. the * rx path will see the response and mark the sc valid */ -static void o2net_sc_connect_completed(void *arg) +static void o2net_sc_connect_completed(struct work_struct *work) { - struct o2net_sock_container *sc = arg; + struct o2net_sock_container *sc = + container_of(work, struct o2net_sock_container, + sc_connect_work); mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n", (unsigned long long)O2NET_PROTOCOL_VERSION, @@ -1262,9 +1267,11 @@ static void o2net_sc_connect_completed(void *arg) } /* this is called as a work_struct func. */ -static void o2net_sc_send_keep_req(void *arg) +static void o2net_sc_send_keep_req(struct work_struct *work) { - struct o2net_sock_container *sc = arg; + struct o2net_sock_container *sc = + container_of(work, struct o2net_sock_container, + sc_keepalive_work.work); o2net_sendpage(sc, o2net_keep_req, sizeof(*o2net_keep_req)); sc_put(sc); @@ -1314,14 +1321,15 @@ static void o2net_sc_postpone_idle(struct o2net_sock_container *sc) * having a connect attempt fail, etc. This centralizes the logic which decides * if a connect attempt should be made or if we should give up and all future * transmit attempts should fail */ -static void o2net_start_connect(void *arg) +static void o2net_start_connect(struct work_struct *work) { - struct o2net_node *nn = arg; + struct o2net_node *nn = + container_of(work, struct o2net_node, nn_connect_work.work); struct o2net_sock_container *sc = NULL; struct o2nm_node *node = NULL, *mynode = NULL; struct socket *sock = NULL; struct sockaddr_in myaddr = {0, }, remoteaddr = {0, }; - int ret = 0; + int ret = 0, stop; /* if we're greater we initiate tx, otherwise we accept */ if (o2nm_this_node() <= o2net_num_from_nn(nn)) @@ -1342,10 +1350,9 @@ static void o2net_start_connect(void *arg) spin_lock(&nn->nn_lock); /* see if we already have one pending or have given up */ - if (nn->nn_sc || nn->nn_persistent_error) - arg = NULL; + stop = (nn->nn_sc || nn->nn_persistent_error); spin_unlock(&nn->nn_lock); - if (arg == NULL) /* *shrug*, needed some indicator */ + if (stop) goto out; nn->nn_last_connect_attempt = jiffies; @@ -1421,9 +1428,10 @@ out: return; } -static void o2net_connect_expired(void *arg) +static void o2net_connect_expired(struct work_struct *work) { - struct o2net_node *nn = arg; + struct o2net_node *nn = + container_of(work, struct o2net_node, nn_connect_expired.work); spin_lock(&nn->nn_lock); if (!nn->nn_sc_valid) { @@ -1436,9 +1444,10 @@ static void o2net_connect_expired(void *arg) spin_unlock(&nn->nn_lock); } -static void o2net_still_up(void *arg) +static void o2net_still_up(struct work_struct *work) { - struct o2net_node *nn = arg; + struct o2net_node *nn = + container_of(work, struct o2net_node, nn_still_up.work); o2quo_hb_still_up(o2net_num_from_nn(nn)); } @@ -1644,9 +1653,9 @@ out: return ret; } -static void o2net_accept_many(void *arg) +static void o2net_accept_many(struct work_struct *work) { - struct socket *sock = arg; + struct socket *sock = o2net_listen_sock; while (o2net_accept_one(sock) == 0) cond_resched(); } @@ -1700,7 +1709,7 @@ static int o2net_open_listening_sock(__be16 port) write_unlock_bh(&sock->sk->sk_callback_lock); o2net_listen_sock = sock; - INIT_WORK(&o2net_listen_work, o2net_accept_many, sock); + INIT_WORK(&o2net_listen_work, o2net_accept_many); sock->sk->sk_reuse = 1; ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); @@ -1819,9 +1828,10 @@ int o2net_init(void) struct o2net_node *nn = o2net_nn_from_num(i); spin_lock_init(&nn->nn_lock); - INIT_WORK(&nn->nn_connect_work, o2net_start_connect, nn); - INIT_WORK(&nn->nn_connect_expired, o2net_connect_expired, nn); - INIT_WORK(&nn->nn_still_up, o2net_still_up, nn); + INIT_DELAYED_WORK(&nn->nn_connect_work, o2net_start_connect); + INIT_DELAYED_WORK(&nn->nn_connect_expired, + o2net_connect_expired); + INIT_DELAYED_WORK(&nn->nn_still_up, o2net_still_up); /* until we see hb from a node we'll return einval */ nn->nn_persistent_error = -ENOTCONN; init_waitqueue_head(&nn->nn_sc_wq); diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h index 4b46aac7d243..daebbd3a2c8c 100644 --- a/fs/ocfs2/cluster/tcp_internal.h +++ b/fs/ocfs2/cluster/tcp_internal.h @@ -86,18 +86,18 @@ struct o2net_node { * connect attempt fails and so can be self-arming. shutdown is * careful to first mark the nn such that no connects will be attempted * before canceling delayed connect work and flushing the queue. */ - struct work_struct nn_connect_work; + struct delayed_work nn_connect_work; unsigned long nn_last_connect_attempt; /* this is queued as nodes come up and is canceled when a connection is * established. this expiring gives up on the node and errors out * transmits */ - struct work_struct nn_connect_expired; + struct delayed_work nn_connect_expired; /* after we give up on a socket we wait a while before deciding * that it is still heartbeating and that we should do some * quorum work */ - struct work_struct nn_still_up; + struct delayed_work nn_still_up; }; struct o2net_sock_container { @@ -129,7 +129,7 @@ struct o2net_sock_container { struct work_struct sc_shutdown_work; struct timer_list sc_idle_timeout; - struct work_struct sc_keepalive_work; + struct delayed_work sc_keepalive_work; unsigned sc_handshake_ok:1; diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h index fa968180b072..6b6ff76538c5 100644 --- a/fs/ocfs2/dlm/dlmcommon.h +++ b/fs/ocfs2/dlm/dlmcommon.h @@ -153,7 +153,7 @@ static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned * called functions that cannot be directly called from the * net message handlers for some reason, usually because * they need to send net messages of their own. */ -void dlm_dispatch_work(void *data); +void dlm_dispatch_work(struct work_struct *work); struct dlm_lock_resource; struct dlm_work_item; diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index 8d1065f8b3bd..637646e6922e 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c @@ -1296,7 +1296,7 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain, spin_lock_init(&dlm->work_lock); INIT_LIST_HEAD(&dlm->work_list); - INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work, dlm); + INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work); kref_init(&dlm->dlm_refs); dlm->dlm_state = DLM_CTXT_NEW; diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index 9d950d7cea38..fb3e2b0817f1 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c @@ -153,9 +153,10 @@ static inline void dlm_reset_recovery(struct dlm_ctxt *dlm) } /* Worker function used during recovery. */ -void dlm_dispatch_work(void *data) +void dlm_dispatch_work(struct work_struct *work) { - struct dlm_ctxt *dlm = (struct dlm_ctxt *)data; + struct dlm_ctxt *dlm = + container_of(work, struct dlm_ctxt, dispatched_work); LIST_HEAD(tmp_list); struct list_head *iter, *iter2; struct dlm_work_item *item; diff --git a/fs/ocfs2/dlm/userdlm.c b/fs/ocfs2/dlm/userdlm.c index eead48bbfac6..7d2f578b267d 100644 --- a/fs/ocfs2/dlm/userdlm.c +++ b/fs/ocfs2/dlm/userdlm.c @@ -171,15 +171,14 @@ static inline void user_dlm_grab_inode_ref(struct user_lock_res *lockres) BUG(); } -static void user_dlm_unblock_lock(void *opaque); +static void user_dlm_unblock_lock(struct work_struct *work); static void __user_dlm_queue_lockres(struct user_lock_res *lockres) { if (!(lockres->l_flags & USER_LOCK_QUEUED)) { user_dlm_grab_inode_ref(lockres); - INIT_WORK(&lockres->l_work, user_dlm_unblock_lock, - lockres); + INIT_WORK(&lockres->l_work, user_dlm_unblock_lock); queue_work(user_dlm_worker, &lockres->l_work); lockres->l_flags |= USER_LOCK_QUEUED; @@ -279,10 +278,11 @@ static inline void user_dlm_drop_inode_ref(struct user_lock_res *lockres) iput(inode); } -static void user_dlm_unblock_lock(void *opaque) +static void user_dlm_unblock_lock(struct work_struct *work) { int new_level, status; - struct user_lock_res *lockres = (struct user_lock_res *) opaque; + struct user_lock_res *lockres = + container_of(work, struct user_lock_res, l_work); struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); mlog(0, "processing lockres %.*s\n", lockres->l_namelen, diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index fd9734def551..d95ee2720e6e 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -911,11 +911,12 @@ struct ocfs2_la_recovery_item { * NOTE: This function can and will sleep on recovery of other nodes * during cluster locking, just like any other ocfs2 process. */ -void ocfs2_complete_recovery(void *data) +void ocfs2_complete_recovery(struct work_struct *work) { int ret; - struct ocfs2_super *osb = data; - struct ocfs2_journal *journal = osb->journal; + struct ocfs2_journal *journal = + container_of(work, struct ocfs2_journal, j_recovery_work); + struct ocfs2_super *osb = journal->j_osb; struct ocfs2_dinode *la_dinode, *tl_dinode; struct ocfs2_la_recovery_item *item; struct list_head *p, *n; diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h index 2f3a6acdac45..5be161a4ad9f 100644 --- a/fs/ocfs2/journal.h +++ b/fs/ocfs2/journal.h @@ -172,7 +172,7 @@ static inline void ocfs2_handle_set_sync(struct ocfs2_journal_handle *handle, in } /* Exported only for the journal struct init code in super.c. Do not call. */ -void ocfs2_complete_recovery(void *data); +void ocfs2_complete_recovery(struct work_struct *work); /* * Journal Control: diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 0462a7f4e21b..9b1bad1d48ec 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -283,7 +283,7 @@ struct ocfs2_super /* Truncate log info */ struct inode *osb_tl_inode; struct buffer_head *osb_tl_bh; - struct work_struct osb_truncate_log_wq; + struct delayed_work osb_truncate_log_wq; struct ocfs2_node_map osb_recovering_orphan_dirs; unsigned int *osb_orphan_wipes; diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 76b46ebbb10c..9a8089030f55 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -1365,7 +1365,7 @@ static int ocfs2_initialize_super(struct super_block *sb, spin_lock_init(&journal->j_lock); journal->j_trans_id = (unsigned long) 1; INIT_LIST_HEAD(&journal->j_la_cleanups); - INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery, osb); + INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery); journal->j_state = OCFS2_JOURNAL_FREE; /* get some pseudo constants for clustersize bits */ diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index 85ce23268302..cd1bb75ceb24 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c @@ -104,7 +104,7 @@ static int release_journal_dev(struct super_block *super, struct reiserfs_journal *journal); static int dirty_one_transaction(struct super_block *s, struct reiserfs_journal_list *jl); -static void flush_async_commits(void *p); +static void flush_async_commits(struct work_struct *work); static void queue_log_writer(struct super_block *s); /* values for join in do_journal_begin_r */ @@ -2836,7 +2836,8 @@ int journal_init(struct super_block *p_s_sb, const char *j_dev_name, if (reiserfs_mounted_fs_count <= 1) commit_wq = create_workqueue("reiserfs"); - INIT_WORK(&journal->j_work, flush_async_commits, p_s_sb); + INIT_DELAYED_WORK(&journal->j_work, flush_async_commits); + journal->j_work_sb = p_s_sb; return 0; free_and_return: free_journal_ram(p_s_sb); @@ -3447,10 +3448,11 @@ int journal_end_sync(struct reiserfs_transaction_handle *th, /* ** writeback the pending async commits to disk */ -static void flush_async_commits(void *p) +static void flush_async_commits(struct work_struct *work) { - struct super_block *p_s_sb = p; - struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); + struct reiserfs_journal *journal = + container_of(work, struct reiserfs_journal, j_work.work); + struct super_block *p_s_sb = journal->j_work_sb; struct reiserfs_journal_list *jl; struct list_head *entry; diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 09360cf1e1f2..8e6b56fc1cad 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -149,9 +149,10 @@ xfs_destroy_ioend( */ STATIC void xfs_end_bio_delalloc( - void *data) + struct work_struct *work) { - xfs_ioend_t *ioend = data; + xfs_ioend_t *ioend = + container_of(work, xfs_ioend_t, io_work); xfs_destroy_ioend(ioend); } @@ -161,9 +162,10 @@ xfs_end_bio_delalloc( */ STATIC void xfs_end_bio_written( - void *data) + struct work_struct *work) { - xfs_ioend_t *ioend = data; + xfs_ioend_t *ioend = + container_of(work, xfs_ioend_t, io_work); xfs_destroy_ioend(ioend); } @@ -176,9 +178,10 @@ xfs_end_bio_written( */ STATIC void xfs_end_bio_unwritten( - void *data) + struct work_struct *work) { - xfs_ioend_t *ioend = data; + xfs_ioend_t *ioend = + container_of(work, xfs_ioend_t, io_work); bhv_vnode_t *vp = ioend->io_vnode; xfs_off_t offset = ioend->io_offset; size_t size = ioend->io_size; @@ -220,11 +223,11 @@ xfs_alloc_ioend( ioend->io_size = 0; if (type == IOMAP_UNWRITTEN) - INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend); + INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten); else if (type == IOMAP_DELAY) - INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc, ioend); + INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc); else - INIT_WORK(&ioend->io_work, xfs_end_bio_written, ioend); + INIT_WORK(&ioend->io_work, xfs_end_bio_written); return ioend; } diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index d3382843698e..eef4a0ba11e9 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -994,9 +994,10 @@ xfs_buf_wait_unpin( STATIC void xfs_buf_iodone_work( - void *v) + struct work_struct *work) { - xfs_buf_t *bp = (xfs_buf_t *)v; + xfs_buf_t *bp = + container_of(work, xfs_buf_t, b_iodone_work); if (bp->b_iodone) (*(bp->b_iodone))(bp); @@ -1017,10 +1018,10 @@ xfs_buf_ioend( if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) { if (schedule) { - INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work, bp); + INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work); queue_work(xfslogd_workqueue, &bp->b_iodone_work); } else { - xfs_buf_iodone_work(bp); + xfs_buf_iodone_work(&bp->b_iodone_work); } } else { up(&bp->b_iodonesema); |