From 721a669b7225edeeb0ca8e2bf71b83882326a71b Mon Sep 17 00:00:00 2001 From: Soeren Sandmann Date: Tue, 15 Sep 2009 14:33:08 +0200 Subject: perf events: Fix swevent hrtimer sampling by keeping track of remaining time when enabling/disabling swevent hrtimers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make the hrtimer based events work for sysprof. Whenever a swevent is scheduled out, the hrtimer is canceled. When it is scheduled back in, the timer is restarted. This happens every scheduler tick, which means the timer never expired because it was getting repeatedly restarted over and over with the same period. To fix that, save the remaining time when disabling; when reenabling, use that saved time as the period instead of the user-specified sampling period. Also, move the starting and stopping of the hrtimers to helper functions instead of duplicating the code. Signed-off-by: Søren Sandmann Pedersen LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 2e6d95f97419..9e7012689a84 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -471,8 +471,8 @@ struct hw_perf_event { unsigned long event_base; int idx; }; - union { /* software */ - atomic64_t count; + struct { /* software */ + s64 remaining; struct hrtimer hrtimer; }; }; -- cgit v1.2.3 From 6d3f1e12f46a2f9a1bb7e7aa433df8dd31ce5647 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Fri, 23 Oct 2009 19:36:19 -0400 Subject: tracing: Remove cpu arg from the rb_time_stamp() function The cpu argument is not used inside the rb_time_stamp() function. Plus fix a typo. Signed-off-by: Jiri Olsa Signed-off-by: Steven Rostedt Cc: Frederic Weisbecker LKML-Reference: <20091023233647.118547500@goodmis.org> Signed-off-by: Ingo Molnar --- include/linux/trace_seq.h | 2 +- kernel/trace/ring_buffer.c | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h index c134dd1fe6b6..09077f6ed128 100644 --- a/include/linux/trace_seq.h +++ b/include/linux/trace_seq.h @@ -7,7 +7,7 @@ /* * Trace sequences are used to allow a function to call several other functions - * to create a string of data to use (up to a max of PAGE_SIZE. + * to create a string of data to use (up to a max of PAGE_SIZE). */ struct trace_seq { diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 217f6991184f..3ffa502fb243 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -483,7 +483,7 @@ struct ring_buffer_iter { /* Up this if you want to test the TIME_EXTENTS and normalization */ #define DEBUG_SHIFT 0 -static inline u64 rb_time_stamp(struct ring_buffer *buffer, int cpu) +static inline u64 rb_time_stamp(struct ring_buffer *buffer) { /* shift to debug/test normalization and TIME_EXTENTS */ return buffer->clock() << DEBUG_SHIFT; @@ -494,7 +494,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu) u64 time; preempt_disable_notrace(); - time = rb_time_stamp(buffer, cpu); + time = rb_time_stamp(buffer); preempt_enable_no_resched_notrace(); return time; @@ -599,7 +599,7 @@ static struct list_head *rb_list_head(struct list_head *list) } /* - * rb_is_head_page - test if the give page is the head page + * rb_is_head_page - test if the given page is the head page * * Because the reader may move the head_page pointer, we can * not trust what the head page is (it may be pointing to @@ -1868,7 +1868,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, * Nested commits always have zero deltas, so * just reread the time stamp */ - *ts = rb_time_stamp(buffer, cpu_buffer->cpu); + *ts = rb_time_stamp(buffer); next_page->page->time_stamp = *ts; } @@ -2111,7 +2111,7 @@ rb_reserve_next_event(struct ring_buffer *buffer, if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) goto out_fail; - ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu); + ts = rb_time_stamp(cpu_buffer->buffer); /* * Only the first commit can update the timestamp. -- cgit v1.2.3 From 14d18a81b5171d4433e41129619c75748b4f4d26 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 29 Oct 2009 00:10:37 +0000 Subject: net: fix kmemcheck annotations struct sk_buff kmemcheck annotations enlarged this structure by 8/16 bytes Fix this by moving 'protocol' inside flags1 bitfield, and queue_mapping inside flags2 bitfield. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/skbuff.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index df7b23ac66e6..6aebfceca3eb 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -354,8 +354,8 @@ struct sk_buff { ipvs_property:1, peeked:1, nf_trace:1; + __be16 protocol:16; kmemcheck_bitfield_end(flags1); - __be16 protocol; void (*destructor)(struct sk_buff *skb); #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) @@ -367,7 +367,6 @@ struct sk_buff { #endif int iif; - __u16 queue_mapping; #ifdef CONFIG_NET_SCHED __u16 tc_index; /* traffic control index */ #ifdef CONFIG_NET_CLS_ACT @@ -376,6 +375,7 @@ struct sk_buff { #endif kmemcheck_bitfield_begin(flags2); + __u16 queue_mapping:16; #ifdef CONFIG_IPV6_NDISC_NODETYPE __u8 ndisc_nodetype:2; #endif -- cgit v1.2.3 From b0c110ca8e89f2c9cd52ec7fb1b98c5b7aa78496 Mon Sep 17 00:00:00 2001 From: jamal Date: Sun, 18 Oct 2009 02:12:33 +0000 Subject: net: Fix RPF to work with policy routing Policy routing is not looked up by mark on reverse path filtering. This fixes it. Signed-off-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- include/net/ip_fib.h | 3 ++- net/ipv4/fib_frontend.c | 5 ++++- net/ipv4/route.c | 8 ++++---- 3 files changed, 10 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index ef91fe924ba4..4d22fabc7719 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -210,7 +210,8 @@ extern struct fib_table *fib_get_table(struct net *net, u32 id); extern const struct nla_policy rtm_ipv4_policy[]; extern void ip_fib_init(void); extern int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, - struct net_device *dev, __be32 *spec_dst, u32 *itag); + struct net_device *dev, __be32 *spec_dst, + u32 *itag, u32 mark); extern void fib_select_default(struct net *net, const struct flowi *flp, struct fib_result *res); diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index e2f950592566..aa00398be80e 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -229,14 +229,17 @@ unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev, */ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, - struct net_device *dev, __be32 *spec_dst, u32 *itag) + struct net_device *dev, __be32 *spec_dst, + u32 *itag, u32 mark) { struct in_device *in_dev; struct flowi fl = { .nl_u = { .ip4_u = { .daddr = src, .saddr = dst, .tos = tos } }, + .mark = mark, .iif = oif }; + struct fib_result res; int no_addr, rpf; int ret; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index bb4199252026..5b1050a5d874 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1854,7 +1854,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, goto e_inval; spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK); } else if (fib_validate_source(saddr, 0, tos, 0, - dev, &spec_dst, &itag) < 0) + dev, &spec_dst, &itag, 0) < 0) goto e_inval; rth = dst_alloc(&ipv4_dst_ops); @@ -1967,7 +1967,7 @@ static int __mkroute_input(struct sk_buff *skb, err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res), - in_dev->dev, &spec_dst, &itag); + in_dev->dev, &spec_dst, &itag, skb->mark); if (err < 0) { ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr, saddr); @@ -2141,7 +2141,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, int result; result = fib_validate_source(saddr, daddr, tos, net->loopback_dev->ifindex, - dev, &spec_dst, &itag); + dev, &spec_dst, &itag, skb->mark); if (result < 0) goto martian_source; if (result) @@ -2170,7 +2170,7 @@ brd_input: spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK); else { err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst, - &itag); + &itag, skb->mark); if (err < 0) goto martian_source; if (err) -- cgit v1.2.3 From 9d410c796067686b1e032d54ce475b7055537138 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 30 Oct 2009 05:03:53 +0000 Subject: net: fix sk_forward_alloc corruption On UDP sockets, we must call skb_free_datagram() with socket locked, or risk sk_forward_alloc corruption. This requirement is not respected in SUNRPC. Add a convenient helper, skb_free_datagram_locked() and use it in SUNRPC Reported-by: Francis Moreau Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/skbuff.h | 2 ++ net/core/datagram.c | 10 +++++++++- net/ipv4/udp.c | 4 +--- net/ipv6/udp.c | 4 +--- net/sunrpc/svcsock.c | 10 +++++----- 5 files changed, 18 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 6aebfceca3eb..bcdd6606f468 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1757,6 +1757,8 @@ extern int skb_copy_datagram_const_iovec(const struct sk_buff *from, int to_offset, int size); extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb); +extern void skb_free_datagram_locked(struct sock *sk, + struct sk_buff *skb); extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); extern __wsum skb_checksum(const struct sk_buff *skb, int offset, diff --git a/net/core/datagram.c b/net/core/datagram.c index 1c6cf3a1a4f6..4ade3011bb3c 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -224,6 +224,15 @@ void skb_free_datagram(struct sock *sk, struct sk_buff *skb) consume_skb(skb); sk_mem_reclaim_partial(sk); } +EXPORT_SYMBOL(skb_free_datagram); + +void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb) +{ + lock_sock(sk); + skb_free_datagram(sk, skb); + release_sock(sk); +} +EXPORT_SYMBOL(skb_free_datagram_locked); /** * skb_kill_datagram - Free a datagram skbuff forcibly @@ -752,5 +761,4 @@ unsigned int datagram_poll(struct file *file, struct socket *sock, EXPORT_SYMBOL(datagram_poll); EXPORT_SYMBOL(skb_copy_and_csum_datagram_iovec); EXPORT_SYMBOL(skb_copy_datagram_iovec); -EXPORT_SYMBOL(skb_free_datagram); EXPORT_SYMBOL(skb_recv_datagram); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index d0d436d6216c..0fa9f70e4b19 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -999,9 +999,7 @@ try_again: err = ulen; out_free: - lock_sock(sk); - skb_free_datagram(sk, skb); - release_sock(sk); + skb_free_datagram_locked(sk, skb); out: return err; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 3a60f12b34ed..cf538ed5ef6a 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -288,9 +288,7 @@ try_again: err = ulen; out_free: - lock_sock(sk); - skb_free_datagram(sk, skb); - release_sock(sk); + skb_free_datagram_locked(sk, skb); out: return err; diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index ccc5e83cae5d..1c246a4f491e 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -111,7 +111,7 @@ static void svc_release_skb(struct svc_rqst *rqstp) rqstp->rq_xprt_ctxt = NULL; dprintk("svc: service %p, releasing skb %p\n", rqstp, skb); - skb_free_datagram(svsk->sk_sk, skb); + skb_free_datagram_locked(svsk->sk_sk, skb); } } @@ -578,7 +578,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp) "svc: received unknown control message %d/%d; " "dropping RPC reply datagram\n", cmh->cmsg_level, cmh->cmsg_type); - skb_free_datagram(svsk->sk_sk, skb); + skb_free_datagram_locked(svsk->sk_sk, skb); return 0; } @@ -588,18 +588,18 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp) if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) { local_bh_enable(); /* checksum error */ - skb_free_datagram(svsk->sk_sk, skb); + skb_free_datagram_locked(svsk->sk_sk, skb); return 0; } local_bh_enable(); - skb_free_datagram(svsk->sk_sk, skb); + skb_free_datagram_locked(svsk->sk_sk, skb); } else { /* we can use it in-place */ rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr); rqstp->rq_arg.head[0].iov_len = len; if (skb_checksum_complete(skb)) { - skb_free_datagram(svsk->sk_sk, skb); + skb_free_datagram_locked(svsk->sk_sk, skb); return 0; } rqstp->rq_xprt_ctxt = skb; -- cgit v1.2.3 From 3e2796a90cf349527e50b3bc4d0b2f4019b1ce7a Mon Sep 17 00:00:00 2001 From: Eric Van Hensbergen Date: Mon, 2 Nov 2009 08:39:28 -0600 Subject: 9p: fix readdir corner cases The patch below also addresses a couple of other corner cases in readdir seen with a large (e.g. 64k) msize. I'm not sure what people think of my co-opting of fid->aux here. I'd be happy to rework if there's a better way. When the size of the user supplied buffer passed to readdir is smaller than the data returned in one go by the 9P read request, v9fs_dir_readdir() currently discards extra data so that, on the next call, a 9P read request will be issued with offset < previous offset + bytes returned, which voilates the constraint described in paragraph 3 of read(5) description. This patch preseves the leftover data in fid->aux for use in the next call. Signed-off-by: Jim Garlick Signed-off-by: Eric Van Hensbergen --- fs/9p/vfs_dir.c | 94 +++++++++++++++++++++++++++++++++++-------------- include/net/9p/client.h | 7 ++-- net/9p/client.c | 5 ++- 3 files changed, 72 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c index cae53d405f21..15cce53bf61e 100644 --- a/fs/9p/vfs_dir.c +++ b/fs/9p/vfs_dir.c @@ -39,6 +39,24 @@ #include "v9fs_vfs.h" #include "fid.h" +/** + * struct p9_rdir - readdir accounting + * @mutex: mutex protecting readdir + * @head: start offset of current dirread buffer + * @tail: end offset of current dirread buffer + * @buf: dirread buffer + * + * private structure for keeping track of readdir + * allocated on demand + */ + +struct p9_rdir { + struct mutex mutex; + int head; + int tail; + uint8_t *buf; +}; + /** * dt_type - return file type * @mistat: mistat structure @@ -70,57 +88,79 @@ static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir) { int over; struct p9_wstat st; - int err; + int err = 0; struct p9_fid *fid; int buflen; - char *statbuf; - int n, i = 0; + int reclen = 0; + struct p9_rdir *rdir; P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", filp->f_path.dentry->d_name.name); fid = filp->private_data; buflen = fid->clnt->msize - P9_IOHDRSZ; - statbuf = kmalloc(buflen, GFP_KERNEL); - if (!statbuf) - return -ENOMEM; - - while (1) { - err = v9fs_file_readn(filp, statbuf, NULL, buflen, - fid->rdir_fpos); - if (err <= 0) - break; - - i = 0; - n = err; - while (i < n) { - err = p9stat_read(statbuf + i, buflen-i, &st, - fid->clnt->dotu); + + /* allocate rdir on demand */ + if (!fid->rdir) { + rdir = kmalloc(sizeof(struct p9_rdir) + buflen, GFP_KERNEL); + + if (rdir == NULL) { + err = -ENOMEM; + goto exit; + } + spin_lock(&filp->f_dentry->d_lock); + if (!fid->rdir) { + rdir->buf = (uint8_t *)rdir + sizeof(struct p9_rdir); + mutex_init(&rdir->mutex); + rdir->head = rdir->tail = 0; + fid->rdir = (void *) rdir; + rdir = NULL; + } + spin_unlock(&filp->f_dentry->d_lock); + kfree(rdir); + } + rdir = (struct p9_rdir *) fid->rdir; + + err = mutex_lock_interruptible(&rdir->mutex); + while (err == 0) { + if (rdir->tail == rdir->head) { + err = v9fs_file_readn(filp, rdir->buf, NULL, + buflen, filp->f_pos); + if (err <= 0) + goto unlock_and_exit; + + rdir->head = 0; + rdir->tail = err; + } + + while (rdir->head < rdir->tail) { + err = p9stat_read(rdir->buf + rdir->head, + buflen - rdir->head, &st, + fid->clnt->dotu); if (err) { P9_DPRINTK(P9_DEBUG_VFS, "returned %d\n", err); err = -EIO; p9stat_free(&st); - goto free_and_exit; + goto unlock_and_exit; } - - i += st.size+2; - fid->rdir_fpos += st.size+2; + reclen = st.size+2; over = filldir(dirent, st.name, strlen(st.name), filp->f_pos, v9fs_qid2ino(&st.qid), dt_type(&st)); - filp->f_pos += st.size+2; - p9stat_free(&st); if (over) { err = 0; - goto free_and_exit; + goto unlock_and_exit; } + rdir->head += reclen; + filp->f_pos += reclen; } } -free_and_exit: - kfree(statbuf); +unlock_and_exit: + mutex_unlock(&rdir->mutex); +exit: return err; } diff --git a/include/net/9p/client.h b/include/net/9p/client.h index e26812274b75..fb00b329f0d3 100644 --- a/include/net/9p/client.h +++ b/include/net/9p/client.h @@ -159,8 +159,7 @@ struct p9_client { * @qid: the &p9_qid server identifier this handle points to * @iounit: the server reported maximum transaction size for this file * @uid: the numeric uid of the local user who owns this handle - * @aux: transport specific information (unused?) - * @rdir_fpos: tracks offset of file position when reading directory contents + * @rdir: readdir accounting structure (allocated on demand) * @flist: per-client-instance fid tracking * @dlist: per-dentry fid tracking * @@ -174,9 +173,9 @@ struct p9_fid { struct p9_qid qid; u32 iounit; uid_t uid; - void *aux; - int rdir_fpos; + void *rdir; + struct list_head flist; struct list_head dlist; /* list of all fids attached to a dentry */ }; diff --git a/net/9p/client.c b/net/9p/client.c index 5bf5f227dbe0..8af95b2dddd6 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -582,11 +582,9 @@ static struct p9_fid *p9_fid_create(struct p9_client *clnt) memset(&fid->qid, 0, sizeof(struct p9_qid)); fid->mode = -1; - fid->rdir_fpos = 0; fid->uid = current_fsuid(); fid->clnt = clnt; - fid->aux = NULL; - + fid->rdir = NULL; spin_lock_irqsave(&clnt->lock, flags); list_add(&fid->flist, &clnt->fidlist); spin_unlock_irqrestore(&clnt->lock, flags); @@ -609,6 +607,7 @@ static void p9_fid_destroy(struct p9_fid *fid) spin_lock_irqsave(&clnt->lock, flags); list_del(&fid->flist); spin_unlock_irqrestore(&clnt->lock, flags); + kfree(fid->rdir); kfree(fid); } -- cgit v1.2.3 From 9905d1b411946fb3fb228e8c6529fd94afda8a92 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 3 Nov 2009 10:54:58 +0100 Subject: PM / yenta: Split resume into early and late parts (rev. 4) Commit 0c570cdeb8fdfcb354a3e9cd81bfc6a09c19de0c (PM / yenta: Fix cardbus suspend/resume regression) caused resume to fail on systems with two CardBus bridges. While the exact nature of the failure is not known at the moment, it can be worked around by splitting the yenta resume into an early part, executed during the early phase of resume, that will only resume the socket and power it up if there was a card in it during suspend, and a late part, executed during "regular" resume, that will carry out all of the remaining yenta resume operations. Fixes http://bugzilla.kernel.org/show_bug.cgi?id=14334, which is a listed regression from 2.6.31. Signed-off-by: Rafael J. Wysocki Acked-by: Dominik Brodowski Reported-by: Stephen J. Gowdy Tested-by: Jose Marino --- drivers/pcmcia/cs.c | 69 +++++++++++++++++++++++++++---------------- drivers/pcmcia/yenta_socket.c | 12 +++++++- include/pcmcia/ss.h | 4 +++ 3 files changed, 58 insertions(+), 27 deletions(-) (limited to 'include') diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c index 934d4bee39a0..698d75cda084 100644 --- a/drivers/pcmcia/cs.c +++ b/drivers/pcmcia/cs.c @@ -98,10 +98,13 @@ EXPORT_SYMBOL(pcmcia_socket_list_rwsem); * These functions check for the appropriate struct pcmcia_soket arrays, * and pass them to the low-level functions pcmcia_{suspend,resume}_socket */ +static int socket_early_resume(struct pcmcia_socket *skt); +static int socket_late_resume(struct pcmcia_socket *skt); static int socket_resume(struct pcmcia_socket *skt); static int socket_suspend(struct pcmcia_socket *skt); -int pcmcia_socket_dev_suspend(struct device *dev) +static void pcmcia_socket_dev_run(struct device *dev, + int (*cb)(struct pcmcia_socket *)) { struct pcmcia_socket *socket; @@ -110,29 +113,34 @@ int pcmcia_socket_dev_suspend(struct device *dev) if (socket->dev.parent != dev) continue; mutex_lock(&socket->skt_mutex); - socket_suspend(socket); + cb(socket); mutex_unlock(&socket->skt_mutex); } up_read(&pcmcia_socket_list_rwsem); +} +int pcmcia_socket_dev_suspend(struct device *dev) +{ + pcmcia_socket_dev_run(dev, socket_suspend); return 0; } EXPORT_SYMBOL(pcmcia_socket_dev_suspend); -int pcmcia_socket_dev_resume(struct device *dev) +void pcmcia_socket_dev_early_resume(struct device *dev) { - struct pcmcia_socket *socket; + pcmcia_socket_dev_run(dev, socket_early_resume); +} +EXPORT_SYMBOL(pcmcia_socket_dev_early_resume); - down_read(&pcmcia_socket_list_rwsem); - list_for_each_entry(socket, &pcmcia_socket_list, socket_list) { - if (socket->dev.parent != dev) - continue; - mutex_lock(&socket->skt_mutex); - socket_resume(socket); - mutex_unlock(&socket->skt_mutex); - } - up_read(&pcmcia_socket_list_rwsem); +void pcmcia_socket_dev_late_resume(struct device *dev) +{ + pcmcia_socket_dev_run(dev, socket_late_resume); +} +EXPORT_SYMBOL(pcmcia_socket_dev_late_resume); +int pcmcia_socket_dev_resume(struct device *dev) +{ + pcmcia_socket_dev_run(dev, socket_resume); return 0; } EXPORT_SYMBOL(pcmcia_socket_dev_resume); @@ -546,29 +554,24 @@ static int socket_suspend(struct pcmcia_socket *skt) return 0; } -/* - * Resume a socket. If a card is present, verify its CIS against - * our cached copy. If they are different, the card has been - * replaced, and we need to tell the drivers. - */ -static int socket_resume(struct pcmcia_socket *skt) +static int socket_early_resume(struct pcmcia_socket *skt) { - int ret; - - if (!(skt->state & SOCKET_SUSPEND)) - return -EBUSY; - skt->socket = dead_socket; skt->ops->init(skt); skt->ops->set_socket(skt, &skt->socket); + if (skt->state & SOCKET_PRESENT) + skt->resume_status = socket_setup(skt, resume_delay); + return 0; +} +static int socket_late_resume(struct pcmcia_socket *skt) +{ if (!(skt->state & SOCKET_PRESENT)) { skt->state &= ~SOCKET_SUSPEND; return socket_insert(skt); } - ret = socket_setup(skt, resume_delay); - if (ret == 0) { + if (skt->resume_status == 0) { /* * FIXME: need a better check here for cardbus cards. */ @@ -596,6 +599,20 @@ static int socket_resume(struct pcmcia_socket *skt) return 0; } +/* + * Resume a socket. If a card is present, verify its CIS against + * our cached copy. If they are different, the card has been + * replaced, and we need to tell the drivers. + */ +static int socket_resume(struct pcmcia_socket *skt) +{ + if (!(skt->state & SOCKET_SUSPEND)) + return -EBUSY; + + socket_early_resume(skt); + return socket_late_resume(skt); +} + static void socket_remove(struct pcmcia_socket *skt) { dev_printk(KERN_NOTICE, &skt->dev, diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c index abe0e44c6e9e..8be4cc447a17 100644 --- a/drivers/pcmcia/yenta_socket.c +++ b/drivers/pcmcia/yenta_socket.c @@ -1275,16 +1275,26 @@ static int yenta_dev_resume_noirq(struct device *dev) if (socket->type && socket->type->restore_state) socket->type->restore_state(socket); - return pcmcia_socket_dev_resume(dev); + pcmcia_socket_dev_early_resume(dev); + return 0; +} + +static int yenta_dev_resume(struct device *dev) +{ + pcmcia_socket_dev_late_resume(dev); + return 0; } static struct dev_pm_ops yenta_pm_ops = { .suspend_noirq = yenta_dev_suspend_noirq, .resume_noirq = yenta_dev_resume_noirq, + .resume = yenta_dev_resume, .freeze_noirq = yenta_dev_suspend_noirq, .thaw_noirq = yenta_dev_resume_noirq, + .thaw = yenta_dev_resume, .poweroff_noirq = yenta_dev_suspend_noirq, .restore_noirq = yenta_dev_resume_noirq, + .restore = yenta_dev_resume, }; #define YENTA_PM_OPS (¥ta_pm_ops) diff --git a/include/pcmcia/ss.h b/include/pcmcia/ss.h index d696a692d94a..e0f6feb8588c 100644 --- a/include/pcmcia/ss.h +++ b/include/pcmcia/ss.h @@ -262,6 +262,8 @@ struct pcmcia_socket { struct device dev; /* data internal to the socket driver */ void *driver_data; + /* status of the card during resume from a system sleep state */ + int resume_status; }; @@ -280,6 +282,8 @@ extern struct pccard_resource_ops pccard_nonstatic_ops; /* socket drivers are expected to use these callbacks in their .drv struct */ extern int pcmcia_socket_dev_suspend(struct device *dev); +extern void pcmcia_socket_dev_early_resume(struct device *dev); +extern void pcmcia_socket_dev_late_resume(struct device *dev); extern int pcmcia_socket_dev_resume(struct device *dev); /* socket drivers use this callback in their IRQ handler */ -- cgit v1.2.3 From f9dd09c7f7199685601d75882447a6598be8a3e0 Mon Sep 17 00:00:00 2001 From: Jozsef Kadlecsik Date: Fri, 6 Nov 2009 00:43:42 -0800 Subject: netfilter: nf_nat: fix NAT issue in 2.6.30.4+ Vitezslav Samel discovered that since 2.6.30.4+ active FTP can not work over NAT. The "cause" of the problem was a fix of unacknowledged data detection with NAT (commit a3a9f79e361e864f0e9d75ebe2a0cb43d17c4272). However, actually, that fix uncovered a long standing bug in TCP conntrack: when NAT was enabled, we simply updated the max of the right edge of the segments we have seen (td_end), by the offset NAT produced with changing IP/port in the data. However, we did not update the other parameter (td_maxend) which is affected by the NAT offset. Thus that could drift away from the correct value and thus resulted breaking active FTP. The patch below fixes the issue by *not* updating the conntrack parameters from NAT, but instead taking into account the NAT offsets in conntrack in a consistent way. (Updating from NAT would be more harder and expensive because it'd need to re-calculate parameters we already calculated in conntrack.) Signed-off-by: Jozsef Kadlecsik Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/net/netfilter/nf_conntrack.h | 8 ++--- include/net/netfilter/nf_nat_helper.h | 4 +++ net/ipv4/netfilter/nf_nat_core.c | 3 ++ net/ipv4/netfilter/nf_nat_helper.c | 34 ++++++++++++------ net/netfilter/nf_conntrack_core.c | 8 +++++ net/netfilter/nf_conntrack_proto_tcp.c | 64 ++++++++++++++-------------------- 6 files changed, 67 insertions(+), 54 deletions(-) (limited to 'include') diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index cbdd6284996d..5cf7270e3ffc 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -255,11 +255,9 @@ static inline bool nf_ct_kill(struct nf_conn *ct) } /* These are for NAT. Icky. */ -/* Update TCP window tracking data when NAT mangles the packet */ -extern void nf_conntrack_tcp_update(const struct sk_buff *skb, - unsigned int dataoff, - struct nf_conn *ct, int dir, - s16 offset); +extern s16 (*nf_ct_nat_offset)(const struct nf_conn *ct, + enum ip_conntrack_dir dir, + u32 seq); /* Fake conntrack entry for untracked connections */ extern struct nf_conn nf_conntrack_untracked; diff --git a/include/net/netfilter/nf_nat_helper.h b/include/net/netfilter/nf_nat_helper.h index 237a961f40e1..4222220920a5 100644 --- a/include/net/netfilter/nf_nat_helper.h +++ b/include/net/netfilter/nf_nat_helper.h @@ -32,4 +32,8 @@ extern int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb, * to port ct->master->saved_proto. */ extern void nf_nat_follow_master(struct nf_conn *ct, struct nf_conntrack_expect *this); + +extern s16 nf_nat_get_offset(const struct nf_conn *ct, + enum ip_conntrack_dir dir, + u32 seq); #endif diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c index 68afc6ecd343..fe1a64479dd0 100644 --- a/net/ipv4/netfilter/nf_nat_core.c +++ b/net/ipv4/netfilter/nf_nat_core.c @@ -750,6 +750,8 @@ static int __init nf_nat_init(void) BUG_ON(nfnetlink_parse_nat_setup_hook != NULL); rcu_assign_pointer(nfnetlink_parse_nat_setup_hook, nfnetlink_parse_nat_setup); + BUG_ON(nf_ct_nat_offset != NULL); + rcu_assign_pointer(nf_ct_nat_offset, nf_nat_get_offset); return 0; cleanup_extend: @@ -764,6 +766,7 @@ static void __exit nf_nat_cleanup(void) nf_ct_extend_unregister(&nat_extend); rcu_assign_pointer(nf_nat_seq_adjust_hook, NULL); rcu_assign_pointer(nfnetlink_parse_nat_setup_hook, NULL); + rcu_assign_pointer(nf_ct_nat_offset, NULL); synchronize_net(); } diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c index 09172a65d9b6..f9520fa3aba9 100644 --- a/net/ipv4/netfilter/nf_nat_helper.c +++ b/net/ipv4/netfilter/nf_nat_helper.c @@ -73,6 +73,28 @@ adjust_tcp_sequence(u32 seq, DUMP_OFFSET(this_way); } +/* Get the offset value, for conntrack */ +s16 nf_nat_get_offset(const struct nf_conn *ct, + enum ip_conntrack_dir dir, + u32 seq) +{ + struct nf_conn_nat *nat = nfct_nat(ct); + struct nf_nat_seq *this_way; + s16 offset; + + if (!nat) + return 0; + + this_way = &nat->seq[dir]; + spin_lock_bh(&nf_nat_seqofs_lock); + offset = after(seq, this_way->correction_pos) + ? this_way->offset_after : this_way->offset_before; + spin_unlock_bh(&nf_nat_seqofs_lock); + + return offset; +} +EXPORT_SYMBOL_GPL(nf_nat_get_offset); + /* Frobs data inside this packet, which is linear. */ static void mangle_contents(struct sk_buff *skb, unsigned int dataoff, @@ -189,11 +211,6 @@ nf_nat_mangle_tcp_packet(struct sk_buff *skb, adjust_tcp_sequence(ntohl(tcph->seq), (int)rep_len - (int)match_len, ct, ctinfo); - /* Tell TCP window tracking about seq change */ - nf_conntrack_tcp_update(skb, ip_hdrlen(skb), - ct, CTINFO2DIR(ctinfo), - (int)rep_len - (int)match_len); - nf_conntrack_event_cache(IPCT_NATSEQADJ, ct); } return 1; @@ -415,12 +432,7 @@ nf_nat_seq_adjust(struct sk_buff *skb, tcph->seq = newseq; tcph->ack_seq = newack; - if (!nf_nat_sack_adjust(skb, tcph, ct, ctinfo)) - return 0; - - nf_conntrack_tcp_update(skb, ip_hdrlen(skb), ct, dir, seqoff); - - return 1; + return nf_nat_sack_adjust(skb, tcph, ct, ctinfo); } /* Setup NAT on this expected conntrack so it follows master. */ diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 7c9ec3dee96e..0cdfb388a191 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1350,6 +1350,11 @@ err_stat: return ret; } +s16 (*nf_ct_nat_offset)(const struct nf_conn *ct, + enum ip_conntrack_dir dir, + u32 seq); +EXPORT_SYMBOL_GPL(nf_ct_nat_offset); + int nf_conntrack_init(struct net *net) { int ret; @@ -1367,6 +1372,9 @@ int nf_conntrack_init(struct net *net) /* For use by REJECT target */ rcu_assign_pointer(ip_ct_attach, nf_conntrack_attach); rcu_assign_pointer(nf_ct_destroy, destroy_conntrack); + + /* Howto get NAT offsets */ + rcu_assign_pointer(nf_ct_nat_offset, NULL); } return 0; diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 97a82ba75376..ba2b76937283 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -492,6 +492,21 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff, } } +#ifdef CONFIG_NF_NAT_NEEDED +static inline s16 nat_offset(const struct nf_conn *ct, + enum ip_conntrack_dir dir, + u32 seq) +{ + typeof(nf_ct_nat_offset) get_offset = rcu_dereference(nf_ct_nat_offset); + + return get_offset != NULL ? get_offset(ct, dir, seq) : 0; +} +#define NAT_OFFSET(pf, ct, dir, seq) \ + (pf == NFPROTO_IPV4 ? nat_offset(ct, dir, seq) : 0) +#else +#define NAT_OFFSET(pf, ct, dir, seq) 0 +#endif + static bool tcp_in_window(const struct nf_conn *ct, struct ip_ct_tcp *state, enum ip_conntrack_dir dir, @@ -506,6 +521,7 @@ static bool tcp_in_window(const struct nf_conn *ct, struct ip_ct_tcp_state *receiver = &state->seen[!dir]; const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; __u32 seq, ack, sack, end, win, swin; + s16 receiver_offset; bool res; /* @@ -519,11 +535,16 @@ static bool tcp_in_window(const struct nf_conn *ct, if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM) tcp_sack(skb, dataoff, tcph, &sack); + /* Take into account NAT sequence number mangling */ + receiver_offset = NAT_OFFSET(pf, ct, !dir, ack - 1); + ack -= receiver_offset; + sack -= receiver_offset; + pr_debug("tcp_in_window: START\n"); pr_debug("tcp_in_window: "); nf_ct_dump_tuple(tuple); - pr_debug("seq=%u ack=%u sack=%u win=%u end=%u\n", - seq, ack, sack, win, end); + pr_debug("seq=%u ack=%u+(%d) sack=%u+(%d) win=%u end=%u\n", + seq, ack, receiver_offset, sack, receiver_offset, win, end); pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i " "receiver end=%u maxend=%u maxwin=%u scale=%i\n", sender->td_end, sender->td_maxend, sender->td_maxwin, @@ -613,8 +634,8 @@ static bool tcp_in_window(const struct nf_conn *ct, pr_debug("tcp_in_window: "); nf_ct_dump_tuple(tuple); - pr_debug("seq=%u ack=%u sack =%u win=%u end=%u\n", - seq, ack, sack, win, end); + pr_debug("seq=%u ack=%u+(%d) sack=%u+(%d) win=%u end=%u\n", + seq, ack, receiver_offset, sack, receiver_offset, win, end); pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i " "receiver end=%u maxend=%u maxwin=%u scale=%i\n", sender->td_end, sender->td_maxend, sender->td_maxwin, @@ -700,7 +721,7 @@ static bool tcp_in_window(const struct nf_conn *ct, before(seq, sender->td_maxend + 1) ? after(end, sender->td_end - receiver->td_maxwin - 1) ? before(sack, receiver->td_end + 1) ? - after(ack, receiver->td_end - MAXACKWINDOW(sender)) ? "BUG" + after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG" : "ACK is under the lower bound (possible overly delayed ACK)" : "ACK is over the upper bound (ACKed data not seen yet)" : "SEQ is under the lower bound (already ACKed data retransmitted)" @@ -715,39 +736,6 @@ static bool tcp_in_window(const struct nf_conn *ct, return res; } -#ifdef CONFIG_NF_NAT_NEEDED -/* Update sender->td_end after NAT successfully mangled the packet */ -/* Caller must linearize skb at tcp header. */ -void nf_conntrack_tcp_update(const struct sk_buff *skb, - unsigned int dataoff, - struct nf_conn *ct, int dir, - s16 offset) -{ - const struct tcphdr *tcph = (const void *)skb->data + dataoff; - const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[dir]; - const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[!dir]; - __u32 end; - - end = segment_seq_plus_len(ntohl(tcph->seq), skb->len, dataoff, tcph); - - spin_lock_bh(&ct->lock); - /* - * We have to worry for the ack in the reply packet only... - */ - if (ct->proto.tcp.seen[dir].td_end + offset == end) - ct->proto.tcp.seen[dir].td_end = end; - ct->proto.tcp.last_end = end; - spin_unlock_bh(&ct->lock); - pr_debug("tcp_update: sender end=%u maxend=%u maxwin=%u scale=%i " - "receiver end=%u maxend=%u maxwin=%u scale=%i\n", - sender->td_end, sender->td_maxend, sender->td_maxwin, - sender->td_scale, - receiver->td_end, receiver->td_maxend, receiver->td_maxwin, - receiver->td_scale); -} -EXPORT_SYMBOL_GPL(nf_conntrack_tcp_update); -#endif - #define TH_FIN 0x01 #define TH_SYN 0x02 #define TH_RST 0x04 -- cgit v1.2.3 From b2b465e9280b739a528435d2916c0a5c1b4eb100 Mon Sep 17 00:00:00 2001 From: "Martin K. Petersen" Date: Thu, 29 Oct 2009 22:32:26 -0400 Subject: [SCSI] Fix incorrect reporting of host protection capabilities The advent of DIF Type 2 devices exposed some missing break statements in the protection mask switch constructs. However, rewriting the code to use an index into a small static array seemed like a more elegant solution. Signed-off-by: Martin K. Petersen Signed-off-by: James Bottomley --- include/scsi/scsi_host.h | 29 +++++++++++------------------ 1 file changed, 11 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 6e728b176904..47941fc5aba7 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -797,30 +797,23 @@ static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost) static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type) { - switch (target_type) { - case 1: - if (shost->prot_capabilities & SHOST_DIF_TYPE1_PROTECTION) - return target_type; - case 2: - if (shost->prot_capabilities & SHOST_DIF_TYPE2_PROTECTION) - return target_type; - case 3: - if (shost->prot_capabilities & SHOST_DIF_TYPE3_PROTECTION) - return target_type; - } + static unsigned char cap[] = { 0, + SHOST_DIF_TYPE1_PROTECTION, + SHOST_DIF_TYPE2_PROTECTION, + SHOST_DIF_TYPE3_PROTECTION }; - return 0; + return shost->prot_capabilities & cap[target_type] ? target_type : 0; } static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type) { #if defined(CONFIG_BLK_DEV_INTEGRITY) - switch (target_type) { - case 0: return shost->prot_capabilities & SHOST_DIX_TYPE0_PROTECTION; - case 1: return shost->prot_capabilities & SHOST_DIX_TYPE1_PROTECTION; - case 2: return shost->prot_capabilities & SHOST_DIX_TYPE2_PROTECTION; - case 3: return shost->prot_capabilities & SHOST_DIX_TYPE3_PROTECTION; - } + static unsigned char cap[] = { SHOST_DIX_TYPE0_PROTECTION, + SHOST_DIX_TYPE1_PROTECTION, + SHOST_DIX_TYPE2_PROTECTION, + SHOST_DIX_TYPE3_PROTECTION }; + + return shost->prot_capabilities & cap[target_type]; #endif return 0; } -- cgit v1.2.3 From 3806e94b0148350c72f9a3214274026b6ca03f49 Mon Sep 17 00:00:00 2001 From: Crane Cai Date: Sat, 7 Nov 2009 13:10:46 +0100 Subject: i2c-piix4: Modify code name SB900 to Hudson-2 Change SB900 to its formal code name Hudson-2. Signed-off-by: Crane Cai Signed-off-by: Jean Delvare --- Documentation/i2c/busses/i2c-piix4 | 2 +- drivers/i2c/busses/Kconfig | 2 +- drivers/i2c/busses/i2c-piix4.c | 8 ++++---- include/linux/pci_ids.h | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/Documentation/i2c/busses/i2c-piix4 b/Documentation/i2c/busses/i2c-piix4 index c5b37c570554..ac540c71c7eb 100644 --- a/Documentation/i2c/busses/i2c-piix4 +++ b/Documentation/i2c/busses/i2c-piix4 @@ -8,7 +8,7 @@ Supported adapters: Datasheet: Only available via NDA from ServerWorks * ATI IXP200, IXP300, IXP400, SB600, SB700 and SB800 southbridges Datasheet: Not publicly available - * AMD SB900 + * AMD Hudson-2 Datasheet: Not publicly available * Standard Microsystems (SMSC) SLC90E66 (Victory66) southbridge Datasheet: Publicly available at the SMSC website http://www.smsc.com diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 737335ff2b21..e8fe7f169e25 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -128,7 +128,7 @@ config I2C_PIIX4 ATI SB600 ATI SB700 ATI SB800 - AMD SB900 + AMD Hudson-2 Serverworks OSB4 Serverworks CSB5 Serverworks CSB6 diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index d26a972aacaa..1e245e9cad31 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -22,7 +22,7 @@ Intel PIIX4, 440MX Serverworks OSB4, CSB5, CSB6, HT-1000, HT-1100 ATI IXP200, IXP300, IXP400, SB600, SB700, SB800 - AMD SB900 + AMD Hudson-2 SMSC Victory66 Note: we assume there can only be one device, with one SMBus interface. @@ -233,9 +233,9 @@ static int __devinit piix4_setup_sb800(struct pci_dev *PIIX4_dev, unsigned short smba_idx = 0xcd6; u8 smba_en_lo, smba_en_hi, i2ccfg, i2ccfg_offset = 0x10, smb_en = 0x2c; - /* SB800 SMBus does not support forcing address */ + /* SB800 and later SMBus does not support forcing address */ if (force || force_addr) { - dev_err(&PIIX4_dev->dev, "SB800 SMBus does not support " + dev_err(&PIIX4_dev->dev, "SMBus does not support " "forcing address!\n"); return -EINVAL; } @@ -480,7 +480,7 @@ static struct pci_device_id piix4_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP300_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SB900_SMBUS) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4) }, { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index b0f0f3851cd4..9ceb392cb984 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -543,7 +543,7 @@ #define PCI_DEVICE_ID_AMD_8131_BRIDGE 0x7450 #define PCI_DEVICE_ID_AMD_8131_APIC 0x7451 #define PCI_DEVICE_ID_AMD_8132_BRIDGE 0x7458 -#define PCI_DEVICE_ID_AMD_SB900_SMBUS 0x780b +#define PCI_DEVICE_ID_AMD_HUDSON2_SMBUS 0x780b #define PCI_DEVICE_ID_AMD_CS5535_IDE 0x208F #define PCI_DEVICE_ID_AMD_CS5536_ISA 0x2090 #define PCI_DEVICE_ID_AMD_CS5536_FLASH 0x2091 -- cgit v1.2.3 From afa08974fe80c198b8650f73ed8ab59135ca10d0 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Sat, 7 Nov 2009 13:10:46 +0100 Subject: i2c: Add an interface to lock/unlock an I2C bus segment Some drivers need to be able to prevent access to an I2C bus segment for a specific period of time. Add an interface for them to do so without twiddling with i2c-core internals. Signed-off-by: Jean Delvare Acked-by: Ben Hutchings --- drivers/net/sfc/sfe4001.c | 4 ++-- include/linux/i2c.h | 18 ++++++++++++++++++ 2 files changed, 20 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c index cee00ad49b57..49eb91b5f50c 100644 --- a/drivers/net/sfc/sfe4001.c +++ b/drivers/net/sfc/sfe4001.c @@ -188,7 +188,7 @@ static int sfn4111t_reset(struct efx_nic *efx) efx_oword_t reg; /* GPIO 3 and the GPIO register are shared with I2C, so block that */ - mutex_lock(&efx->i2c_adap.bus_lock); + i2c_lock_adapter(&efx->i2c_adap); /* Pull RST_N (GPIO 2) low then let it up again, setting the * FLASH_CFG_1 strap (GPIO 3) appropriately. Only change the @@ -204,7 +204,7 @@ static int sfn4111t_reset(struct efx_nic *efx) falcon_write(efx, ®, GPIO_CTL_REG_KER); msleep(1); - mutex_unlock(&efx->i2c_adap.bus_lock); + i2c_unlock_adapter(&efx->i2c_adap); ssleep(1); return 0; diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 57d41b0abce2..7b40cda57a70 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -361,6 +361,24 @@ static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data) dev_set_drvdata(&dev->dev, data); } +/** + * i2c_lock_adapter - Prevent access to an I2C bus segment + * @adapter: Target I2C bus segment + */ +static inline void i2c_lock_adapter(struct i2c_adapter *adapter) +{ + mutex_lock(&adapter->bus_lock); +} + +/** + * i2c_unlock_adapter - Reauthorize access to an I2C bus segment + * @adapter: Target I2C bus segment + */ +static inline void i2c_unlock_adapter(struct i2c_adapter *adapter) +{ + mutex_unlock(&adapter->bus_lock); +} + /*flags for the client struct: */ #define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */ #define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */ -- cgit v1.2.3 From bf3204cbff7d2606e758afb0994e8da6ae1c6c26 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Fri, 6 Nov 2009 21:39:07 -0800 Subject: Input: fix locking in memoryless force-feedback devices Now that input core acquires dev->event_lock spinlock and disables interrupts when propagating input events, using spin_lock_bh() in ff-memless driver is not allowed. Actually, the timer_lock itself is not needed anymore, we should simply use dev->event_lock as well. Also do a small cleanup in force-feedback core. Reported-by: kerneloops.org Reported-by: http://www.kerneloops.org/searchweek.php?search=ml_ff_set_gain Reported-by: Arjan van de Ven Signed-off-by: Dmitry Torokhov --- drivers/input/ff-core.c | 20 +++++++++++--------- drivers/input/ff-memless.c | 26 +++++++++++--------------- include/linux/input.h | 4 ++++ 3 files changed, 26 insertions(+), 24 deletions(-) (limited to 'include') diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c index 72c63e5dd630..38df81fcdc3a 100644 --- a/drivers/input/ff-core.c +++ b/drivers/input/ff-core.c @@ -337,16 +337,16 @@ int input_ff_create(struct input_dev *dev, int max_effects) dev->ff = ff; dev->flush = flush_effects; dev->event = input_ff_event; - set_bit(EV_FF, dev->evbit); + __set_bit(EV_FF, dev->evbit); /* Copy "true" bits into ff device bitmap */ for (i = 0; i <= FF_MAX; i++) if (test_bit(i, dev->ffbit)) - set_bit(i, ff->ffbit); + __set_bit(i, ff->ffbit); /* we can emulate RUMBLE with periodic effects */ if (test_bit(FF_PERIODIC, ff->ffbit)) - set_bit(FF_RUMBLE, dev->ffbit); + __set_bit(FF_RUMBLE, dev->ffbit); return 0; } @@ -362,12 +362,14 @@ EXPORT_SYMBOL_GPL(input_ff_create); */ void input_ff_destroy(struct input_dev *dev) { - clear_bit(EV_FF, dev->evbit); - if (dev->ff) { - if (dev->ff->destroy) - dev->ff->destroy(dev->ff); - kfree(dev->ff->private); - kfree(dev->ff); + struct ff_device *ff = dev->ff; + + __clear_bit(EV_FF, dev->evbit); + if (ff) { + if (ff->destroy) + ff->destroy(ff); + kfree(ff->private); + kfree(ff); dev->ff = NULL; } } diff --git a/drivers/input/ff-memless.c b/drivers/input/ff-memless.c index 2d1415e16834..b483b2995fa9 100644 --- a/drivers/input/ff-memless.c +++ b/drivers/input/ff-memless.c @@ -61,7 +61,6 @@ struct ml_device { struct ml_effect_state states[FF_MEMLESS_EFFECTS]; int gain; struct timer_list timer; - spinlock_t timer_lock; struct input_dev *dev; int (*play_effect)(struct input_dev *dev, void *data, @@ -368,38 +367,38 @@ static void ml_effect_timer(unsigned long timer_data) { struct input_dev *dev = (struct input_dev *)timer_data; struct ml_device *ml = dev->ff->private; + unsigned long flags; debug("timer: updating effects"); - spin_lock(&ml->timer_lock); + spin_lock_irqsave(&dev->event_lock, flags); ml_play_effects(ml); - spin_unlock(&ml->timer_lock); + spin_unlock_irqrestore(&dev->event_lock, flags); } +/* + * Sets requested gain for FF effects. Called with dev->event_lock held. + */ static void ml_ff_set_gain(struct input_dev *dev, u16 gain) { struct ml_device *ml = dev->ff->private; int i; - spin_lock_bh(&ml->timer_lock); - ml->gain = gain; for (i = 0; i < FF_MEMLESS_EFFECTS; i++) __clear_bit(FF_EFFECT_PLAYING, &ml->states[i].flags); ml_play_effects(ml); - - spin_unlock_bh(&ml->timer_lock); } +/* + * Start/stop specified FF effect. Called with dev->event_lock held. + */ static int ml_ff_playback(struct input_dev *dev, int effect_id, int value) { struct ml_device *ml = dev->ff->private; struct ml_effect_state *state = &ml->states[effect_id]; - unsigned long flags; - - spin_lock_irqsave(&ml->timer_lock, flags); if (value > 0) { debug("initiated play"); @@ -425,8 +424,6 @@ static int ml_ff_playback(struct input_dev *dev, int effect_id, int value) ml_play_effects(ml); } - spin_unlock_irqrestore(&ml->timer_lock, flags); - return 0; } @@ -436,7 +433,7 @@ static int ml_ff_upload(struct input_dev *dev, struct ml_device *ml = dev->ff->private; struct ml_effect_state *state = &ml->states[effect->id]; - spin_lock_bh(&ml->timer_lock); + spin_lock_irq(&dev->event_lock); if (test_bit(FF_EFFECT_STARTED, &state->flags)) { __clear_bit(FF_EFFECT_PLAYING, &state->flags); @@ -448,7 +445,7 @@ static int ml_ff_upload(struct input_dev *dev, ml_schedule_timer(ml); } - spin_unlock_bh(&ml->timer_lock); + spin_unlock_irq(&dev->event_lock); return 0; } @@ -482,7 +479,6 @@ int input_ff_create_memless(struct input_dev *dev, void *data, ml->private = data; ml->play_effect = play_effect; ml->gain = 0xffff; - spin_lock_init(&ml->timer_lock); setup_timer(&ml->timer, ml_effect_timer, (unsigned long)dev); set_bit(FF_GAIN, dev->ffbit); diff --git a/include/linux/input.h b/include/linux/input.h index 0ccfc30cd40f..c2b1a7d244d9 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -1377,6 +1377,10 @@ extern struct class input_class; * methods; erase() is optional. set_gain() and set_autocenter() need * only be implemented if driver sets up FF_GAIN and FF_AUTOCENTER * bits. + * + * Note that playback(), set_gain() and set_autocenter() are called with + * dev->event_lock spinlock held and interrupts off and thus may not + * sleep. */ struct ff_device { int (*upload)(struct input_dev *dev, struct ff_effect *effect, -- cgit v1.2.3 From fe8bc91c4c30122b357d197117705cfd4fabaf28 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Fri, 16 Oct 2009 19:26:15 +0200 Subject: ext3: Wait for proper transaction commit on fsync We cannot rely on buffer dirty bits during fsync because pdflush can come before fsync is called and clear dirty bits without forcing a transaction commit. What we do is that we track which transaction has last changed the inode and which transaction last changed allocation and force it to disk on fsync. Signed-off-by: Jan Kara Reviewed-by: Aneesh Kumar K.V --- fs/ext3/fsync.c | 36 ++++++++++++++++-------------------- fs/ext3/inode.c | 32 +++++++++++++++++++++++++++++++- fs/ext3/super.c | 2 ++ include/linux/ext3_fs_i.h | 8 ++++++++ 4 files changed, 57 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/fs/ext3/fsync.c b/fs/ext3/fsync.c index 451d166bbe93..8209f266e9ad 100644 --- a/fs/ext3/fsync.c +++ b/fs/ext3/fsync.c @@ -46,19 +46,21 @@ int ext3_sync_file(struct file * file, struct dentry *dentry, int datasync) { struct inode *inode = dentry->d_inode; + struct ext3_inode_info *ei = EXT3_I(inode); + journal_t *journal = EXT3_SB(inode->i_sb)->s_journal; int ret = 0; + tid_t commit_tid; + + if (inode->i_sb->s_flags & MS_RDONLY) + return 0; J_ASSERT(ext3_journal_current_handle() == NULL); /* - * data=writeback: + * data=writeback,ordered: * The caller's filemap_fdatawrite()/wait will sync the data. - * sync_inode() will sync the metadata - * - * data=ordered: - * The caller's filemap_fdatawrite() will write the data and - * sync_inode() will write the inode if it is dirty. Then the caller's - * filemap_fdatawait() will wait on the pages. + * Metadata is in the journal, we wait for a proper transaction + * to commit here. * * data=journal: * filemap_fdatawrite won't do anything (the buffers are clean). @@ -73,22 +75,16 @@ int ext3_sync_file(struct file * file, struct dentry *dentry, int datasync) goto out; } - if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) - goto flush; + if (datasync) + commit_tid = atomic_read(&ei->i_datasync_tid); + else + commit_tid = atomic_read(&ei->i_sync_tid); - /* - * The VFS has written the file data. If the inode is unaltered - * then we need not start a commit. - */ - if (inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC)) { - struct writeback_control wbc = { - .sync_mode = WB_SYNC_ALL, - .nr_to_write = 0, /* sys_fsync did this */ - }; - ret = sync_inode(inode, &wbc); + if (log_start_commit(journal, commit_tid)) { + log_wait_commit(journal, commit_tid); goto out; } -flush: + /* * In case we didn't commit a transaction, we have to flush * disk caches manually so that data really is on persistent diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 069a163393b4..354ed3b47b30 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -699,8 +699,9 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, int err = 0; struct ext3_block_alloc_info *block_i; ext3_fsblk_t current_block; + struct ext3_inode_info *ei = EXT3_I(inode); - block_i = EXT3_I(inode)->i_block_alloc_info; + block_i = ei->i_block_alloc_info; /* * If we're splicing into a [td]indirect block (as opposed to the * inode) then we need to get write access to the [td]indirect block @@ -741,6 +742,8 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, inode->i_ctime = CURRENT_TIME_SEC; ext3_mark_inode_dirty(handle, inode); + /* ext3_mark_inode_dirty already updated i_sync_tid */ + atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid); /* had we spliced it onto indirect block? */ if (where->bh) { @@ -2754,6 +2757,8 @@ struct inode *ext3_iget(struct super_block *sb, unsigned long ino) struct ext3_inode_info *ei; struct buffer_head *bh; struct inode *inode; + journal_t *journal = EXT3_SB(sb)->s_journal; + transaction_t *transaction; long ret; int block; @@ -2831,6 +2836,30 @@ struct inode *ext3_iget(struct super_block *sb, unsigned long ino) ei->i_data[block] = raw_inode->i_block[block]; INIT_LIST_HEAD(&ei->i_orphan); + /* + * Set transaction id's of transactions that have to be committed + * to finish f[data]sync. We set them to currently running transaction + * as we cannot be sure that the inode or some of its metadata isn't + * part of the transaction - the inode could have been reclaimed and + * now it is reread from disk. + */ + if (journal) { + tid_t tid; + + spin_lock(&journal->j_state_lock); + if (journal->j_running_transaction) + transaction = journal->j_running_transaction; + else + transaction = journal->j_committing_transaction; + if (transaction) + tid = transaction->t_tid; + else + tid = journal->j_commit_sequence; + spin_unlock(&journal->j_state_lock); + atomic_set(&ei->i_sync_tid, tid); + atomic_set(&ei->i_datasync_tid, tid); + } + if (inode->i_ino >= EXT3_FIRST_INO(inode->i_sb) + 1 && EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) { /* @@ -3015,6 +3044,7 @@ again: err = rc; ei->i_state &= ~EXT3_STATE_NEW; + atomic_set(&ei->i_sync_tid, handle->h_transaction->t_tid); out_brelse: brelse (bh); ext3_std_error(inode->i_sb, err); diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 7a520a862f49..427496c4767c 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -466,6 +466,8 @@ static struct inode *ext3_alloc_inode(struct super_block *sb) return NULL; ei->i_block_alloc_info = NULL; ei->vfs_inode.i_version = 1; + atomic_set(&ei->i_datasync_tid, 0); + atomic_set(&ei->i_sync_tid, 0); return &ei->vfs_inode; } diff --git a/include/linux/ext3_fs_i.h b/include/linux/ext3_fs_i.h index ca1bfe90004f..93e7428156ba 100644 --- a/include/linux/ext3_fs_i.h +++ b/include/linux/ext3_fs_i.h @@ -137,6 +137,14 @@ struct ext3_inode_info { * by other means, so we have truncate_mutex. */ struct mutex truncate_mutex; + + /* + * Transactions that contain inode's metadata needed to complete + * fsync and fdatasync, respectively. + */ + atomic_t i_sync_tid; + atomic_t i_datasync_tid; + struct inode vfs_inode; }; -- cgit v1.2.3 From f5c15d0b37ab1cd3969b8ce7828ab41c79f36f77 Mon Sep 17 00:00:00 2001 From: Krzysztof Helt Date: Wed, 11 Nov 2009 14:26:22 -0800 Subject: fb: remove fb_save_state() and fb_restore_state operations Remove fb_save_state() and fb_restore_state operations from frame buffer layer. They are used only in two drivers: 1. savagefb - and cause bug #11248 2. uvesafb Usage of these operations is misunderstood in both drivers so kill these operations, fix the bug #11248 and avoid confusion in the future. Tested on Savage 3D/MV card and the patch fixes the bug #11248. The frame buffer layer uses these funtions during switch between graphics and text mode of the console, but these drivers saves state before switching of the frame buffer (in the fb_open) and after releasing it (in the fb_release). This defeats the purpose of these operations. Addresses http://bugzilla.kernel.org/show_bug.cgi?id=11248 Signed-off-by: Krzysztof Helt Reported-by: Jochen Hein Tested-by: Jochen Hein Cc: Geert Uytterhoeven Cc: Michal Januszewski Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/video/atafb.c | 6 ------ drivers/video/console/fbcon.c | 5 +---- drivers/video/savage/savagefb_driver.c | 18 ------------------ drivers/video/uvesafb.c | 28 ---------------------------- include/linux/fb.h | 6 ------ 5 files changed, 1 insertion(+), 62 deletions(-) (limited to 'include') diff --git a/drivers/video/atafb.c b/drivers/video/atafb.c index 8cd279be74e5..37624f74e88b 100644 --- a/drivers/video/atafb.c +++ b/drivers/video/atafb.c @@ -329,12 +329,6 @@ extern unsigned char fontdata_8x16[]; * * * perform fb specific mmap * * int (*fb_mmap)(struct fb_info *info, struct vm_area_struct *vma); - * - * * save current hardware state * - * void (*fb_save_state)(struct fb_info *info); - * - * * restore saved state * - * void (*fb_restore_state)(struct fb_info *info); * } ; */ diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index 5a686cea23f4..3681c6a88212 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c @@ -2311,14 +2311,11 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch) ops->graphics = 1; if (!blank) { - if (info->fbops->fb_save_state) - info->fbops->fb_save_state(info); var.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE; fb_set_var(info, &var); ops->graphics = 0; ops->var = info->var; - } else if (info->fbops->fb_restore_state) - info->fbops->fb_restore_state(info); + } } if (!fbcon_is_inactive(vc, info)) { diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c index 37b135d5d12e..0857b3b8c495 100644 --- a/drivers/video/savage/savagefb_driver.c +++ b/drivers/video/savage/savagefb_driver.c @@ -1606,22 +1606,6 @@ static int savagefb_blank(int blank, struct fb_info *info) return (blank == FB_BLANK_NORMAL) ? 1 : 0; } -static void savagefb_save_state(struct fb_info *info) -{ - struct savagefb_par *par = info->par; - - savage_get_default_par(par, &par->save); -} - -static void savagefb_restore_state(struct fb_info *info) -{ - struct savagefb_par *par = info->par; - - savagefb_blank(FB_BLANK_POWERDOWN, info); - savage_set_default_par(par, &par->save); - savagefb_blank(FB_BLANK_UNBLANK, info); -} - static int savagefb_open(struct fb_info *info, int user) { struct savagefb_par *par = info->par; @@ -1667,8 +1651,6 @@ static struct fb_ops savagefb_ops = { .fb_setcolreg = savagefb_setcolreg, .fb_pan_display = savagefb_pan_display, .fb_blank = savagefb_blank, - .fb_save_state = savagefb_save_state, - .fb_restore_state = savagefb_restore_state, #if defined(CONFIG_FB_SAVAGE_ACCEL) .fb_fillrect = savagefb_fillrect, .fb_copyarea = savagefb_copyarea, diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c index e35232a18571..54fbb2995a5f 100644 --- a/drivers/video/uvesafb.c +++ b/drivers/video/uvesafb.c @@ -1411,23 +1411,6 @@ static int uvesafb_check_var(struct fb_var_screeninfo *var, return 0; } -static void uvesafb_save_state(struct fb_info *info) -{ - struct uvesafb_par *par = info->par; - - if (par->vbe_state_saved) - kfree(par->vbe_state_saved); - - par->vbe_state_saved = uvesafb_vbe_state_save(par); -} - -static void uvesafb_restore_state(struct fb_info *info) -{ - struct uvesafb_par *par = info->par; - - uvesafb_vbe_state_restore(par, par->vbe_state_saved); -} - static struct fb_ops uvesafb_ops = { .owner = THIS_MODULE, .fb_open = uvesafb_open, @@ -1441,8 +1424,6 @@ static struct fb_ops uvesafb_ops = { .fb_imageblit = cfb_imageblit, .fb_check_var = uvesafb_check_var, .fb_set_par = uvesafb_set_par, - .fb_save_state = uvesafb_save_state, - .fb_restore_state = uvesafb_restore_state, }; static void __devinit uvesafb_init_info(struct fb_info *info, @@ -1459,15 +1440,6 @@ static void __devinit uvesafb_init_info(struct fb_info *info, info->fix.ypanstep = par->ypan ? 1 : 0; info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0; - /* - * If we were unable to get the state buffer size, disable - * functions for saving and restoring the hardware state. - */ - if (par->vbe_state_size == 0) { - info->fbops->fb_save_state = NULL; - info->fbops->fb_restore_state = NULL; - } - /* Disable blanking if the user requested so. */ if (!blank) info->fbops->fb_blank = NULL; diff --git a/include/linux/fb.h b/include/linux/fb.h index a34bdf5a9d23..de9c722e7b90 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -669,12 +669,6 @@ struct fb_ops { /* perform fb specific mmap */ int (*fb_mmap)(struct fb_info *info, struct vm_area_struct *vma); - /* save current hardware state */ - void (*fb_save_state)(struct fb_info *info); - - /* restore saved state */ - void (*fb_restore_state)(struct fb_info *info); - /* get capability given var */ void (*fb_get_caps)(struct fb_info *info, struct fb_blit_caps *caps, struct fb_var_screeninfo *var); -- cgit v1.2.3 From 71ccb83cfc487354d2647e315c8196e5872198ab Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 11 Nov 2009 14:26:27 -0800 Subject: alpha: fix F_SETOWN_EX and F_GETLK64 conflict Fix a bug in commit ba0a6c9f6fceed11c6a99e8326f0477fe383e6b5 Author: Peter Zijlstra AuthorDate: Wed Sep 23 15:57:03 2009 -0700 Commit: Linus Torvalds CommitDate: Thu Sep 24 07:21:01 2009 -0700 fcntl: add F_[SG]ETOWN_EX In asm-generic/fcntl.h, F_SETOWN_EX and F_GETLK64 both have value 12, and F_GETOWN_EX and F_SETLK64 both have value 13. Reported-by: "Joseph S. Myers" Cc: Richard Henderson Cc: Ivan Kokshaysky Cc: Andreas Schwab Signed-off-by: Peter Zijlstra Acked-by: Ulrich Drepper Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/fcntl.h | 4 ++-- include/asm-generic/fcntl.h | 19 +++++++++++-------- 2 files changed, 13 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/arch/alpha/include/asm/fcntl.h b/arch/alpha/include/asm/fcntl.h index e42823e954aa..73126e4dd639 100644 --- a/arch/alpha/include/asm/fcntl.h +++ b/arch/alpha/include/asm/fcntl.h @@ -26,8 +26,8 @@ #define F_GETOWN 6 /* for sockets. */ #define F_SETSIG 10 /* for sockets. */ #define F_GETSIG 11 /* for sockets. */ -#define F_SETOWN_EX 12 -#define F_GETOWN_EX 13 +#define F_SETOWN_EX 15 +#define F_GETOWN_EX 16 /* for posix fcntl() and lockf() */ #define F_RDLCK 1 diff --git a/include/asm-generic/fcntl.h b/include/asm-generic/fcntl.h index 0c3dd8603927..cd2d7896e34b 100644 --- a/include/asm-generic/fcntl.h +++ b/include/asm-generic/fcntl.h @@ -73,9 +73,18 @@ #define F_SETSIG 10 /* for sockets. */ #define F_GETSIG 11 /* for sockets. */ #endif + +#ifndef CONFIG_64BIT +#ifndef F_GETLK64 +#define F_GETLK64 12 /* using 'struct flock64' */ +#define F_SETLK64 13 +#define F_SETLKW64 14 +#endif +#endif + #ifndef F_SETOWN_EX -#define F_SETOWN_EX 12 -#define F_GETOWN_EX 13 +#define F_SETOWN_EX 15 +#define F_GETOWN_EX 16 #endif #define F_OWNER_TID 0 @@ -139,12 +148,6 @@ struct flock { #ifndef CONFIG_64BIT -#ifndef F_GETLK64 -#define F_GETLK64 12 /* using 'struct flock64' */ -#define F_SETLK64 13 -#define F_SETLKW64 14 -#endif - #ifndef HAVE_ARCH_STRUCT_FLOCK64 #ifndef __ARCH_FLOCK64_PAD #define __ARCH_FLOCK64_PAD -- cgit v1.2.3 From b87e5e2b8ed9336566100c8c796ab6dd52436881 Mon Sep 17 00:00:00 2001 From: Lennert Buytenhek Date: Wed, 11 Nov 2009 14:26:42 -0800 Subject: serial: add support for the Lava Quattro PCI quad-port 16550A card This seems to be a different model (with a different PCI ID) than the "Quatro" card that is also in the list. Signed-off-by: Lennert Buytenhek Cc: Alan Cox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/serial/8250_pci.c | 6 ++++++ include/linux/pci_ids.h | 2 ++ 2 files changed, 8 insertions(+) (limited to 'include') diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c index b2d65b208e55..b28af13c45a1 100644 --- a/drivers/serial/8250_pci.c +++ b/drivers/serial/8250_pci.c @@ -3139,6 +3139,12 @@ static struct pci_device_id serial_pci_tbl[] = { { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUATRO_B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_2_115200 }, + { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUATTRO_A, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_2_115200 }, + { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUATTRO_B, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_2_115200 }, { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_OCTO_A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_4_460800 }, diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 9ceb392cb984..84cf1f3b7838 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1956,6 +1956,8 @@ #define PCI_DEVICE_ID_LAVA_DSERIAL 0x0100 /* 2x 16550 */ #define PCI_DEVICE_ID_LAVA_QUATRO_A 0x0101 /* 2x 16550, half of 4 port */ #define PCI_DEVICE_ID_LAVA_QUATRO_B 0x0102 /* 2x 16550, half of 4 port */ +#define PCI_DEVICE_ID_LAVA_QUATTRO_A 0x0120 /* 2x 16550A, half of 4 port */ +#define PCI_DEVICE_ID_LAVA_QUATTRO_B 0x0121 /* 2x 16550A, half of 4 port */ #define PCI_DEVICE_ID_LAVA_OCTO_A 0x0180 /* 4x 16550A, half of 8 port */ #define PCI_DEVICE_ID_LAVA_OCTO_B 0x0181 /* 4x 16550A, half of 8 port */ #define PCI_DEVICE_ID_LAVA_PORT_PLUS 0x0200 /* 2x 16650 */ -- cgit v1.2.3 From 409b95aff3583c05ac7a9247fa3d8c9aa7f9cae3 Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Tue, 10 Nov 2009 08:57:34 +0000 Subject: sctp: Set source addresses on the association before adding transports Recent commit 8da645e101a8c20c6073efda3c7cc74eec01b87f sctp: Get rid of an extra routing lookup when adding a transport introduced a regression in the connection setup. The behavior was different between IPv4 and IPv6. IPv4 case ended up working because the route lookup routing returned a NULL route, which triggered another route lookup later in the output patch that succeeded. In the IPv6 case, a valid route was returned for first call, but we could not find a valid source address at the time since the source addresses were not set on the association yet. Thus resulted in a hung connection. The solution is to set the source addresses on the association prior to adding peers. Signed-off-by: Vlad Yasevich Signed-off-by: David S. Miller --- include/net/sctp/structs.h | 2 +- net/sctp/associola.c | 4 +--- net/sctp/sm_statefuns.c | 15 +++++++++------ net/sctp/socket.c | 22 ++++++++++++---------- 4 files changed, 23 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 6e5f0e0c7967..cd2e18778f81 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -1980,7 +1980,7 @@ void sctp_assoc_set_primary(struct sctp_association *, void sctp_assoc_del_nonprimary_peers(struct sctp_association *, struct sctp_transport *); int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *, - gfp_t); + sctp_scope_t, gfp_t); int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *, struct sctp_cookie*, gfp_t gfp); diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 8450960df24f..7eed77a39d0d 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -1485,15 +1485,13 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len) * local endpoint and the remote peer. */ int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, - gfp_t gfp) + sctp_scope_t scope, gfp_t gfp) { - sctp_scope_t scope; int flags; /* Use scoping rules to determine the subset of addresses from * the endpoint. */ - scope = sctp_scope(&asoc->peer.active_path->ipaddr); flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0; if (asoc->peer.ipv4_address) flags |= SCTP_ADDR4_PEERSUPP; diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index c8fae1983dd1..d4df45022ffa 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -384,6 +384,11 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep, if (!new_asoc) goto nomem; + if (sctp_assoc_set_bind_addr_from_ep(new_asoc, + sctp_scope(sctp_source(chunk)), + GFP_ATOMIC) < 0) + goto nomem_init; + /* The call, sctp_process_init(), can fail on memory allocation. */ if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type, sctp_source(chunk), @@ -401,9 +406,6 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep, len = ntohs(err_chunk->chunk_hdr->length) - sizeof(sctp_chunkhdr_t); - if (sctp_assoc_set_bind_addr_from_ep(new_asoc, GFP_ATOMIC) < 0) - goto nomem_init; - repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len); if (!repl) goto nomem_init; @@ -1452,6 +1454,10 @@ static sctp_disposition_t sctp_sf_do_unexpected_init( if (!new_asoc) goto nomem; + if (sctp_assoc_set_bind_addr_from_ep(new_asoc, + sctp_scope(sctp_source(chunk)), GFP_ATOMIC) < 0) + goto nomem; + /* In the outbound INIT ACK the endpoint MUST copy its current * Verification Tag and Peers Verification tag into a reserved * place (local tie-tag and per tie-tag) within the state cookie. @@ -1488,9 +1494,6 @@ static sctp_disposition_t sctp_sf_do_unexpected_init( sizeof(sctp_chunkhdr_t); } - if (sctp_assoc_set_bind_addr_from_ep(new_asoc, GFP_ATOMIC) < 0) - goto nomem; - repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len); if (!repl) goto nomem; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index c8d05758661d..bf705ba97231 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1080,6 +1080,13 @@ static int __sctp_connect(struct sock* sk, err = -ENOMEM; goto out_free; } + + err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, + GFP_KERNEL); + if (err < 0) { + goto out_free; + } + } /* Prime the peer's transport structures. */ @@ -1095,11 +1102,6 @@ static int __sctp_connect(struct sock* sk, walk_size += af->sockaddr_len; } - err = sctp_assoc_set_bind_addr_from_ep(asoc, GFP_KERNEL); - if (err < 0) { - goto out_free; - } - /* In case the user of sctp_connectx() wants an association * id back, assign one now. */ @@ -1689,6 +1691,11 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, goto out_unlock; } asoc = new_asoc; + err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL); + if (err < 0) { + err = -ENOMEM; + goto out_free; + } /* If the SCTP_INIT ancillary data is specified, set all * the association init values accordingly. @@ -1718,11 +1725,6 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, err = -ENOMEM; goto out_free; } - err = sctp_assoc_set_bind_addr_from_ep(asoc, GFP_KERNEL); - if (err < 0) { - err = -ENOMEM; - goto out_free; - } } /* ASSERT: we have a valid association at this point. */ -- cgit v1.2.3 From 1a7af63108f07637504300671a72432c34e10021 Mon Sep 17 00:00:00 2001 From: Jiro SEKIBA Date: Sun, 15 Nov 2009 13:49:44 +0900 Subject: nilfs2: deleted struct nilfs_dat_group_desc struct nilfs_dat_group_desc is not used both in kernel and user spaces. struct nilfs_palloc_group_desc is used instead. Signed-off-by: Jiro SEKIBA Signed-off-by: Ryusuke Konishi --- include/linux/nilfs2_fs.h | 9 --------- 1 file changed, 9 deletions(-) (limited to 'include') diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h index 79fec6af3f9f..ce520402e840 100644 --- a/include/linux/nilfs2_fs.h +++ b/include/linux/nilfs2_fs.h @@ -424,15 +424,6 @@ struct nilfs_dat_entry { __le64 de_rsv; }; -/** - * struct nilfs_dat_group_desc - block group descriptor - * @dg_nfrees: number of free virtual block numbers in block group - */ -struct nilfs_dat_group_desc { - __le32 dg_nfrees; -}; - - /** * struct nilfs_snapshot_list - snapshot list * @ssl_next: next checkpoint number on snapshot list -- cgit v1.2.3 From e29d4363174949a7a4e46f670993d7ff43342c1c Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 15 Nov 2009 22:23:47 -0800 Subject: Revert "isdn: isdn_ppp: Use SKB list facilities instead of home-grown implementation." This reverts commit 38783e671399b5405f1fd177d602c400a9577ae6. It causes kernel bugzilla #14594 Signed-off-by: David S. Miller --- drivers/isdn/i4l/isdn_ppp.c | 352 ++++++++++++++++++++------------------------ include/linux/isdn_ppp.h | 2 +- 2 files changed, 164 insertions(+), 190 deletions(-) (limited to 'include') diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c index 2d14b64202a3..642d5aaf53ce 100644 --- a/drivers/isdn/i4l/isdn_ppp.c +++ b/drivers/isdn/i4l/isdn_ppp.c @@ -1535,10 +1535,8 @@ static int isdn_ppp_mp_bundle_array_init(void) int sz = ISDN_MAX_CHANNELS*sizeof(ippp_bundle); if( (isdn_ppp_bundle_arr = kzalloc(sz, GFP_KERNEL)) == NULL ) return -ENOMEM; - for (i = 0; i < ISDN_MAX_CHANNELS; i++) { + for( i = 0; i < ISDN_MAX_CHANNELS; i++ ) spin_lock_init(&isdn_ppp_bundle_arr[i].lock); - skb_queue_head_init(&isdn_ppp_bundle_arr[i].frags); - } return 0; } @@ -1571,7 +1569,7 @@ static int isdn_ppp_mp_init( isdn_net_local * lp, ippp_bundle * add_to ) if ((lp->netdev->pb = isdn_ppp_mp_bundle_alloc()) == NULL) return -ENOMEM; lp->next = lp->last = lp; /* nobody else in a queue */ - skb_queue_head_init(&lp->netdev->pb->frags); + lp->netdev->pb->frags = NULL; lp->netdev->pb->frames = 0; lp->netdev->pb->seq = UINT_MAX; } @@ -1583,29 +1581,28 @@ static int isdn_ppp_mp_init( isdn_net_local * lp, ippp_bundle * add_to ) static u32 isdn_ppp_mp_get_seq( int short_seq, struct sk_buff * skb, u32 last_seq ); -static void isdn_ppp_mp_discard(ippp_bundle *mp, struct sk_buff *from, - struct sk_buff *to); -static void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp, - struct sk_buff *from, struct sk_buff *to, - u32 lastseq); -static void isdn_ppp_mp_free_skb(ippp_bundle *mp, struct sk_buff *skb); +static struct sk_buff * isdn_ppp_mp_discard( ippp_bundle * mp, + struct sk_buff * from, struct sk_buff * to ); +static void isdn_ppp_mp_reassembly( isdn_net_dev * net_dev, isdn_net_local * lp, + struct sk_buff * from, struct sk_buff * to ); +static void isdn_ppp_mp_free_skb( ippp_bundle * mp, struct sk_buff * skb ); static void isdn_ppp_mp_print_recv_pkt( int slot, struct sk_buff * skb ); static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp, - struct sk_buff *skb) + struct sk_buff *skb) { - struct sk_buff *newfrag, *frag, *start, *nextf; - u32 newseq, minseq, thisseq; - isdn_mppp_stats *stats; struct ippp_struct *is; + isdn_net_local * lpq; + ippp_bundle * mp; + isdn_mppp_stats * stats; + struct sk_buff * newfrag, * frag, * start, *nextf; + u32 newseq, minseq, thisseq; unsigned long flags; - isdn_net_local *lpq; - ippp_bundle *mp; int slot; spin_lock_irqsave(&net_dev->pb->lock, flags); - mp = net_dev->pb; - stats = &mp->stats; + mp = net_dev->pb; + stats = &mp->stats; slot = lp->ppp_slot; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: lp->ppp_slot(%d)\n", @@ -1616,19 +1613,20 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp, return; } is = ippp_table[slot]; - if (++mp->frames > stats->max_queue_len) + if( ++mp->frames > stats->max_queue_len ) stats->max_queue_len = mp->frames; - + if (is->debug & 0x8) isdn_ppp_mp_print_recv_pkt(lp->ppp_slot, skb); - newseq = isdn_ppp_mp_get_seq(is->mpppcfg & SC_IN_SHORT_SEQ, - skb, is->last_link_seqno); + newseq = isdn_ppp_mp_get_seq(is->mpppcfg & SC_IN_SHORT_SEQ, + skb, is->last_link_seqno); + /* if this packet seq # is less than last already processed one, * toss it right away, but check for sequence start case first */ - if (mp->seq > MP_LONGSEQ_MAX && (newseq & MP_LONGSEQ_MAXBIT)) { + if( mp->seq > MP_LONGSEQ_MAX && (newseq & MP_LONGSEQ_MAXBIT) ) { mp->seq = newseq; /* the first packet: required for * rfc1990 non-compliant clients -- * prevents constant packet toss */ @@ -1638,7 +1636,7 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp, spin_unlock_irqrestore(&mp->lock, flags); return; } - + /* find the minimum received sequence number over all links */ is->last_link_seqno = minseq = newseq; for (lpq = net_dev->queue;;) { @@ -1659,31 +1657,22 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp, * packets */ newfrag = skb; - /* Insert new fragment into the proper sequence slot. */ - skb_queue_walk(&mp->frags, frag) { - if (MP_SEQ(frag) == newseq) { - isdn_ppp_mp_free_skb(mp, newfrag); - newfrag = NULL; - break; - } - if (MP_LT(newseq, MP_SEQ(frag))) { - __skb_queue_before(&mp->frags, frag, newfrag); - newfrag = NULL; - break; - } - } - if (newfrag) - __skb_queue_tail(&mp->frags, newfrag); + /* if this new fragment is before the first one, then enqueue it now. */ + if ((frag = mp->frags) == NULL || MP_LT(newseq, MP_SEQ(frag))) { + newfrag->next = frag; + mp->frags = frag = newfrag; + newfrag = NULL; + } - frag = skb_peek(&mp->frags); - start = ((MP_FLAGS(frag) & MP_BEGIN_FRAG) && - (MP_SEQ(frag) == mp->seq)) ? frag : NULL; - if (!start) - goto check_overflow; + start = MP_FLAGS(frag) & MP_BEGIN_FRAG && + MP_SEQ(frag) == mp->seq ? frag : NULL; - /* main fragment traversing loop + /* + * main fragment traversing loop * * try to accomplish several tasks: + * - insert new fragment into the proper sequence slot (once that's done + * newfrag will be set to NULL) * - reassemble any complete fragment sequence (non-null 'start' * indicates there is a continguous sequence present) * - discard any incomplete sequences that are below minseq -- due @@ -1692,46 +1681,71 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp, * come to complete such sequence and it should be discarded * * loop completes when we accomplished the following tasks: + * - new fragment is inserted in the proper sequence ('newfrag' is + * set to NULL) * - we hit a gap in the sequence, so no reassembly/processing is * possible ('start' would be set to NULL) * * algorithm for this code is derived from code in the book * 'PPP Design And Debugging' by James Carlson (Addison-Wesley) */ - skb_queue_walk_safe(&mp->frags, frag, nextf) { - thisseq = MP_SEQ(frag); - - /* check for misplaced start */ - if (start != frag && (MP_FLAGS(frag) & MP_BEGIN_FRAG)) { - printk(KERN_WARNING"isdn_mppp(seq %d): new " - "BEGIN flag with no prior END", thisseq); - stats->seqerrs++; - stats->frame_drops++; - isdn_ppp_mp_discard(mp, start, frag); - start = frag; - } else if (MP_LE(thisseq, minseq)) { - if (MP_FLAGS(frag) & MP_BEGIN_FRAG) + while (start != NULL || newfrag != NULL) { + + thisseq = MP_SEQ(frag); + nextf = frag->next; + + /* drop any duplicate fragments */ + if (newfrag != NULL && thisseq == newseq) { + isdn_ppp_mp_free_skb(mp, newfrag); + newfrag = NULL; + } + + /* insert new fragment before next element if possible. */ + if (newfrag != NULL && (nextf == NULL || + MP_LT(newseq, MP_SEQ(nextf)))) { + newfrag->next = nextf; + frag->next = nextf = newfrag; + newfrag = NULL; + } + + if (start != NULL) { + /* check for misplaced start */ + if (start != frag && (MP_FLAGS(frag) & MP_BEGIN_FRAG)) { + printk(KERN_WARNING"isdn_mppp(seq %d): new " + "BEGIN flag with no prior END", thisseq); + stats->seqerrs++; + stats->frame_drops++; + start = isdn_ppp_mp_discard(mp, start,frag); + nextf = frag->next; + } + } else if (MP_LE(thisseq, minseq)) { + if (MP_FLAGS(frag) & MP_BEGIN_FRAG) start = frag; - else { + else { if (MP_FLAGS(frag) & MP_END_FRAG) - stats->frame_drops++; - __skb_unlink(skb, &mp->frags); + stats->frame_drops++; + if( mp->frags == frag ) + mp->frags = nextf; isdn_ppp_mp_free_skb(mp, frag); + frag = nextf; continue; - } + } } - - /* if we have end fragment, then we have full reassembly - * sequence -- reassemble and process packet now + + /* if start is non-null and we have end fragment, then + * we have full reassembly sequence -- reassemble + * and process packet now */ - if (MP_FLAGS(frag) & MP_END_FRAG) { - minseq = mp->seq = (thisseq+1) & MP_LONGSEQ_MASK; - /* Reassemble the packet then dispatch it */ - isdn_ppp_mp_reassembly(net_dev, lp, start, frag, thisseq); + if (start != NULL && (MP_FLAGS(frag) & MP_END_FRAG)) { + minseq = mp->seq = (thisseq+1) & MP_LONGSEQ_MASK; + /* Reassemble the packet then dispatch it */ + isdn_ppp_mp_reassembly(net_dev, lp, start, nextf); + + start = NULL; + frag = NULL; - start = NULL; - frag = NULL; - } + mp->frags = nextf; + } /* check if need to update start pointer: if we just * reassembled the packet and sequence is contiguous @@ -1742,25 +1756,26 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp, * below low watermark and set start to the next frag or * clear start ptr. */ - if (nextf != (struct sk_buff *)&mp->frags && + if (nextf != NULL && ((thisseq+1) & MP_LONGSEQ_MASK) == MP_SEQ(nextf)) { - /* if we just reassembled and the next one is here, - * then start another reassembly. - */ - if (frag == NULL) { + /* if we just reassembled and the next one is here, + * then start another reassembly. */ + + if (frag == NULL) { if (MP_FLAGS(nextf) & MP_BEGIN_FRAG) - start = nextf; - else { - printk(KERN_WARNING"isdn_mppp(seq %d):" - " END flag with no following " - "BEGIN", thisseq); + start = nextf; + else + { + printk(KERN_WARNING"isdn_mppp(seq %d):" + " END flag with no following " + "BEGIN", thisseq); stats->seqerrs++; } } - } else { - if (nextf != (struct sk_buff *)&mp->frags && - frag != NULL && - MP_LT(thisseq, minseq)) { + + } else { + if ( nextf != NULL && frag != NULL && + MP_LT(thisseq, minseq)) { /* we've got a break in the sequence * and we not at the end yet * and we did not just reassembled @@ -1769,39 +1784,41 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp, * discard all the frames below low watermark * and start over */ stats->frame_drops++; - isdn_ppp_mp_discard(mp, start, nextf); + mp->frags = isdn_ppp_mp_discard(mp,start,nextf); } /* break in the sequence, no reassembly */ - start = NULL; - } - if (!start) - break; - } - -check_overflow: + start = NULL; + } + + frag = nextf; + } /* while -- main loop */ + + if (mp->frags == NULL) + mp->frags = frag; + /* rather straighforward way to deal with (not very) possible - * queue overflow - */ + * queue overflow */ if (mp->frames > MP_MAX_QUEUE_LEN) { stats->overflows++; - skb_queue_walk_safe(&mp->frags, frag, nextf) { - if (mp->frames <= MP_MAX_QUEUE_LEN) - break; - __skb_unlink(frag, &mp->frags); - isdn_ppp_mp_free_skb(mp, frag); + while (mp->frames > MP_MAX_QUEUE_LEN) { + frag = mp->frags->next; + isdn_ppp_mp_free_skb(mp, mp->frags); + mp->frags = frag; } } spin_unlock_irqrestore(&mp->lock, flags); } -static void isdn_ppp_mp_cleanup(isdn_net_local *lp) +static void isdn_ppp_mp_cleanup( isdn_net_local * lp ) { - struct sk_buff *skb, *tmp; - - skb_queue_walk_safe(&lp->netdev->pb->frags, skb, tmp) { - __skb_unlink(skb, &lp->netdev->pb->frags); - isdn_ppp_mp_free_skb(lp->netdev->pb, skb); - } + struct sk_buff * frag = lp->netdev->pb->frags; + struct sk_buff * nextfrag; + while( frag ) { + nextfrag = frag->next; + isdn_ppp_mp_free_skb(lp->netdev->pb, frag); + frag = nextfrag; + } + lp->netdev->pb->frags = NULL; } static u32 isdn_ppp_mp_get_seq( int short_seq, @@ -1838,115 +1855,72 @@ static u32 isdn_ppp_mp_get_seq( int short_seq, return seq; } -static void isdn_ppp_mp_discard(ippp_bundle *mp, struct sk_buff *from, - struct sk_buff *to) +struct sk_buff * isdn_ppp_mp_discard( ippp_bundle * mp, + struct sk_buff * from, struct sk_buff * to ) { - if (from) { - struct sk_buff *skb, *tmp; - int freeing = 0; - - skb_queue_walk_safe(&mp->frags, skb, tmp) { - if (skb == to) - break; - if (skb == from) - freeing = 1; - if (!freeing) - continue; - __skb_unlink(skb, &mp->frags); - isdn_ppp_mp_free_skb(mp, skb); + if( from ) + while (from != to) { + struct sk_buff * next = from->next; + isdn_ppp_mp_free_skb(mp, from); + from = next; } - } -} - -static unsigned int calc_tot_len(struct sk_buff_head *queue, - struct sk_buff *from, struct sk_buff *to) -{ - unsigned int tot_len = 0; - struct sk_buff *skb; - int found_start = 0; - - skb_queue_walk(queue, skb) { - if (skb == from) - found_start = 1; - if (!found_start) - continue; - tot_len += skb->len - MP_HEADER_LEN; - if (skb == to) - break; - } - return tot_len; + return from; } -/* Reassemble packet using fragments in the reassembly queue from - * 'from' until 'to', inclusive. - */ -static void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp, - struct sk_buff *from, struct sk_buff *to, - u32 lastseq) +void isdn_ppp_mp_reassembly( isdn_net_dev * net_dev, isdn_net_local * lp, + struct sk_buff * from, struct sk_buff * to ) { - ippp_bundle *mp = net_dev->pb; - unsigned int tot_len; - struct sk_buff *skb; + ippp_bundle * mp = net_dev->pb; int proto; + struct sk_buff * skb; + unsigned int tot_len; if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n", __func__, lp->ppp_slot); return; } - - tot_len = calc_tot_len(&mp->frags, from, to); - - if (MP_FLAGS(from) == (MP_BEGIN_FRAG | MP_END_FRAG)) { - if (ippp_table[lp->ppp_slot]->debug & 0x40) + if( MP_FLAGS(from) == (MP_BEGIN_FRAG | MP_END_FRAG) ) { + if( ippp_table[lp->ppp_slot]->debug & 0x40 ) printk(KERN_DEBUG "isdn_mppp: reassembly: frame %d, " - "len %d\n", MP_SEQ(from), from->len); + "len %d\n", MP_SEQ(from), from->len ); skb = from; skb_pull(skb, MP_HEADER_LEN); - __skb_unlink(skb, &mp->frags); mp->frames--; } else { - struct sk_buff *walk, *tmp; - int found_start = 0; + struct sk_buff * frag; + int n; - if (ippp_table[lp->ppp_slot]->debug & 0x40) - printk(KERN_DEBUG"isdn_mppp: reassembling frames %d " - "to %d, len %d\n", MP_SEQ(from), lastseq, - tot_len); + for(tot_len=n=0, frag=from; frag != to; frag=frag->next, n++) + tot_len += frag->len - MP_HEADER_LEN; - skb = dev_alloc_skb(tot_len); - if (!skb) + if( ippp_table[lp->ppp_slot]->debug & 0x40 ) + printk(KERN_DEBUG"isdn_mppp: reassembling frames %d " + "to %d, len %d\n", MP_SEQ(from), + (MP_SEQ(from)+n-1) & MP_LONGSEQ_MASK, tot_len ); + if( (skb = dev_alloc_skb(tot_len)) == NULL ) { printk(KERN_ERR "isdn_mppp: cannot allocate sk buff " - "of size %d\n", tot_len); - - found_start = 0; - skb_queue_walk_safe(&mp->frags, walk, tmp) { - if (walk == from) - found_start = 1; - if (!found_start) - continue; + "of size %d\n", tot_len); + isdn_ppp_mp_discard(mp, from, to); + return; + } - if (skb) { - unsigned int len = walk->len - MP_HEADER_LEN; - skb_copy_from_linear_data_offset(walk, MP_HEADER_LEN, - skb_put(skb, len), - len); - } - __skb_unlink(walk, &mp->frags); - isdn_ppp_mp_free_skb(mp, walk); + while( from != to ) { + unsigned int len = from->len - MP_HEADER_LEN; - if (walk == to) - break; + skb_copy_from_linear_data_offset(from, MP_HEADER_LEN, + skb_put(skb,len), + len); + frag = from->next; + isdn_ppp_mp_free_skb(mp, from); + from = frag; } } - if (!skb) - return; - proto = isdn_ppp_strip_proto(skb); isdn_ppp_push_higher(net_dev, lp, skb, proto); } -static void isdn_ppp_mp_free_skb(ippp_bundle *mp, struct sk_buff *skb) +static void isdn_ppp_mp_free_skb(ippp_bundle * mp, struct sk_buff * skb) { dev_kfree_skb(skb); mp->frames--; diff --git a/include/linux/isdn_ppp.h b/include/linux/isdn_ppp.h index 4c218ee7587a..8687a7dc0632 100644 --- a/include/linux/isdn_ppp.h +++ b/include/linux/isdn_ppp.h @@ -157,7 +157,7 @@ typedef struct { typedef struct { int mp_mrru; /* unused */ - struct sk_buff_head frags; /* fragments sl list */ + struct sk_buff * frags; /* fragments sl list -- use skb->next */ long frames; /* number of frames in the frame list */ unsigned int seq; /* last processed packet seq #: any packets * with smaller seq # will be dropped -- cgit v1.2.3 From 6ad696d2cf535772dff659298ec7e7260e344595 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Tue, 17 Nov 2009 14:06:22 -0800 Subject: mm: allow memory hotplug and hibernation in the same kernel Allow memory hotplug and hibernation in the same kernel Memory hotplug and hibernation were exclusive in Kconfig. This is obviously a problem for distribution kernels who want to support both in the same image. After some discussions with Rafael and others the only problem is with parallel memory hotadd or removal while a hibernation operation is in process. It was also working for s390 before. This patch removes the Kconfig level exclusion, and simply makes the memory add / remove functions grab the pm_mutex to exclude against hibernation. Fixes a regression - old kernels didn't exclude memory hotadd and hibernation. Signed-off-by: Andi Kleen Cc: Gerald Schaefer Cc: KOSAKI Motohiro Cc: Yasunori Goto Acked-by: Rafael J. Wysocki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/suspend.h | 21 +++++++++++++++++++-- mm/Kconfig | 5 +---- mm/memory_hotplug.c | 21 +++++++++++++++++---- 3 files changed, 37 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/linux/suspend.h b/include/linux/suspend.h index cd15df6c63cd..5e781d824e6d 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -301,6 +301,8 @@ static inline int unregister_pm_notifier(struct notifier_block *nb) #define pm_notifier(fn, pri) do { (void)(fn); } while (0) #endif /* !CONFIG_PM_SLEEP */ +extern struct mutex pm_mutex; + #ifndef CONFIG_HIBERNATION static inline void register_nosave_region(unsigned long b, unsigned long e) { @@ -308,8 +310,23 @@ static inline void register_nosave_region(unsigned long b, unsigned long e) static inline void register_nosave_region_late(unsigned long b, unsigned long e) { } -#endif -extern struct mutex pm_mutex; +static inline void lock_system_sleep(void) {} +static inline void unlock_system_sleep(void) {} + +#else + +/* Let some subsystems like memory hotadd exclude hibernation */ + +static inline void lock_system_sleep(void) +{ + mutex_lock(&pm_mutex); +} + +static inline void unlock_system_sleep(void) +{ + mutex_unlock(&pm_mutex); +} +#endif #endif /* _LINUX_SUSPEND_H */ diff --git a/mm/Kconfig b/mm/Kconfig index fd3386242cf0..44cf6f0a3a6d 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -128,12 +128,9 @@ config SPARSEMEM_VMEMMAP config MEMORY_HOTPLUG bool "Allow for memory hot-add" depends on SPARSEMEM || X86_64_ACPI_NUMA - depends on HOTPLUG && !(HIBERNATION && !S390) && ARCH_ENABLE_MEMORY_HOTPLUG + depends on HOTPLUG && ARCH_ENABLE_MEMORY_HOTPLUG depends on (IA64 || X86 || PPC_BOOK3S_64 || SUPERH || S390) -comment "Memory hotplug is currently incompatible with Software Suspend" - depends on SPARSEMEM && HOTPLUG && HIBERNATION && !S390 - config MEMORY_HOTPLUG_SPARSE def_bool y depends on SPARSEMEM && MEMORY_HOTPLUG diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 380aef45c2cf..2047465cd27c 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -26,6 +26,7 @@ #include #include #include +#include #include @@ -485,14 +486,18 @@ int __ref add_memory(int nid, u64 start, u64 size) struct resource *res; int ret; + lock_system_sleep(); + res = register_memory_resource(start, size); + ret = -EEXIST; if (!res) - return -EEXIST; + goto out; if (!node_online(nid)) { pgdat = hotadd_new_pgdat(nid, start); + ret = -ENOMEM; if (!pgdat) - return -ENOMEM; + goto out; new_pgdat = 1; } @@ -515,7 +520,8 @@ int __ref add_memory(int nid, u64 start, u64 size) BUG_ON(ret); } - return ret; + goto out; + error: /* rollback pgdat allocation and others */ if (new_pgdat) @@ -523,6 +529,8 @@ error: if (res) release_memory_resource(res); +out: + unlock_system_sleep(); return ret; } EXPORT_SYMBOL_GPL(add_memory); @@ -759,6 +767,8 @@ int offline_pages(unsigned long start_pfn, if (!test_pages_in_a_zone(start_pfn, end_pfn)) return -EINVAL; + lock_system_sleep(); + zone = page_zone(pfn_to_page(start_pfn)); node = zone_to_nid(zone); nr_pages = end_pfn - start_pfn; @@ -766,7 +776,7 @@ int offline_pages(unsigned long start_pfn, /* set above range as isolated */ ret = start_isolate_page_range(start_pfn, end_pfn); if (ret) - return ret; + goto out; arg.start_pfn = start_pfn; arg.nr_pages = nr_pages; @@ -844,6 +854,7 @@ repeat: writeback_set_ratelimit(); memory_notify(MEM_OFFLINE, &arg); + unlock_system_sleep(); return 0; failed_removal: @@ -853,6 +864,8 @@ failed_removal: /* pushback to free area */ undo_isolate_page_range(start_pfn, end_pfn); +out: + unlock_system_sleep(); return ret; } -- cgit v1.2.3 From 978b4053aefd422713f289f2a315ce2acba62018 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 17 Nov 2009 14:06:24 -0800 Subject: fcntl: rename F_OWNER_GID to F_OWNER_PGRP This is for consistency with various ioctl() operations that include the suffix "PGRP" in their names, and also for consistency with PRIO_PGRP, used with setpriority() and getpriority(). Also, using PGRP instead of GID avoids confusion with the common abbreviation of "group ID". I'm fine with anything that makes it more consistent, and if PGRP is what is the predominant abbreviation then I see no need to further confuse matters by adding a third one. Signed-off-by: Peter Zijlstra Acked-by: Michael Kerrisk Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/fcntl.c | 4 ++-- include/asm-generic/fcntl.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/fs/fcntl.c b/fs/fcntl.c index fc089f2f7f56..2cf93ec40a67 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -284,7 +284,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg) type = PIDTYPE_PID; break; - case F_OWNER_GID: + case F_OWNER_PGRP: type = PIDTYPE_PGID; break; @@ -321,7 +321,7 @@ static int f_getown_ex(struct file *filp, unsigned long arg) break; case PIDTYPE_PGID: - owner.type = F_OWNER_GID; + owner.type = F_OWNER_PGRP; break; default: diff --git a/include/asm-generic/fcntl.h b/include/asm-generic/fcntl.h index cd2d7896e34b..495dc8af4044 100644 --- a/include/asm-generic/fcntl.h +++ b/include/asm-generic/fcntl.h @@ -89,7 +89,7 @@ #define F_OWNER_TID 0 #define F_OWNER_PID 1 -#define F_OWNER_GID 2 +#define F_OWNER_PGRP 2 struct f_owner_ex { int type; -- cgit v1.2.3 From 3d7a641e544e428191667e8b1f83f96fa46dbd65 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 19 Nov 2009 18:10:23 +0000 Subject: SLOW_WORK: Wait for outstanding work items belonging to a module to clear Wait for outstanding slow work items belonging to a module to clear when unregistering that module as a user of the facility. This prevents the put_ref code of a work item from being taken away before it returns. Signed-off-by: David Howells --- Documentation/slow-work.txt | 13 ++++- fs/fscache/main.c | 6 +- fs/fscache/object.c | 1 + fs/fscache/operation.c | 1 + fs/gfs2/main.c | 4 +- fs/gfs2/recovery.c | 1 + include/linux/slow-work.h | 8 ++- kernel/slow-work.c | 132 ++++++++++++++++++++++++++++++++++++++++++-- 8 files changed, 150 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/Documentation/slow-work.txt b/Documentation/slow-work.txt index ebc50f808ea4..f12fda31dcdc 100644 --- a/Documentation/slow-work.txt +++ b/Documentation/slow-work.txt @@ -64,9 +64,11 @@ USING SLOW WORK ITEMS Firstly, a module or subsystem wanting to make use of slow work items must register its interest: - int ret = slow_work_register_user(); + int ret = slow_work_register_user(struct module *module); -This will return 0 if successful, or a -ve error upon failure. +This will return 0 if successful, or a -ve error upon failure. The module +pointer should be the module interested in using this facility (almost +certainly THIS_MODULE). Slow work items may then be set up by: @@ -110,7 +112,12 @@ operation. When all a module's slow work items have been processed, and the module has no further interest in the facility, it should unregister its interest: - slow_work_unregister_user(); + slow_work_unregister_user(struct module *module); + +The module pointer is used to wait for all outstanding work items for that +module before completing the unregistration. This prevents the put_ref() code +from being taken away before it completes. module should almost certainly be +THIS_MODULE. =============== diff --git a/fs/fscache/main.c b/fs/fscache/main.c index 4de41b597499..add6bdb53f04 100644 --- a/fs/fscache/main.c +++ b/fs/fscache/main.c @@ -48,7 +48,7 @@ static int __init fscache_init(void) { int ret; - ret = slow_work_register_user(); + ret = slow_work_register_user(THIS_MODULE); if (ret < 0) goto error_slow_work; @@ -80,7 +80,7 @@ error_kobj: error_cookie_jar: fscache_proc_cleanup(); error_proc: - slow_work_unregister_user(); + slow_work_unregister_user(THIS_MODULE); error_slow_work: return ret; } @@ -97,7 +97,7 @@ static void __exit fscache_exit(void) kobject_put(fscache_root); kmem_cache_destroy(fscache_cookie_jar); fscache_proc_cleanup(); - slow_work_unregister_user(); + slow_work_unregister_user(THIS_MODULE); printk(KERN_NOTICE "FS-Cache: Unloaded\n"); } diff --git a/fs/fscache/object.c b/fs/fscache/object.c index 392a41b1b79d..d236eb1d6f37 100644 --- a/fs/fscache/object.c +++ b/fs/fscache/object.c @@ -45,6 +45,7 @@ static void fscache_enqueue_dependents(struct fscache_object *); static void fscache_dequeue_object(struct fscache_object *); const struct slow_work_ops fscache_object_slow_work_ops = { + .owner = THIS_MODULE, .get_ref = fscache_object_slow_work_get_ref, .put_ref = fscache_object_slow_work_put_ref, .execute = fscache_object_slow_work_execute, diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c index e7f8d53b8b6b..f1a2857b2ff5 100644 --- a/fs/fscache/operation.c +++ b/fs/fscache/operation.c @@ -453,6 +453,7 @@ static void fscache_op_execute(struct slow_work *work) } const struct slow_work_ops fscache_op_slow_work_ops = { + .owner = THIS_MODULE, .get_ref = fscache_op_get_ref, .put_ref = fscache_op_put_ref, .execute = fscache_op_execute, diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index eacd78a5d082..5b31f7741a8f 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c @@ -114,7 +114,7 @@ static int __init init_gfs2_fs(void) if (error) goto fail_unregister; - error = slow_work_register_user(); + error = slow_work_register_user(THIS_MODULE); if (error) goto fail_slow; @@ -163,7 +163,7 @@ static void __exit exit_gfs2_fs(void) gfs2_unregister_debugfs(); unregister_filesystem(&gfs2_fs_type); unregister_filesystem(&gfs2meta_fs_type); - slow_work_unregister_user(); + slow_work_unregister_user(THIS_MODULE); kmem_cache_destroy(gfs2_quotad_cachep); kmem_cache_destroy(gfs2_rgrpd_cachep); diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c index 59d2695509d3..b2bb779f09ed 100644 --- a/fs/gfs2/recovery.c +++ b/fs/gfs2/recovery.c @@ -593,6 +593,7 @@ fail: } struct slow_work_ops gfs2_recover_ops = { + .owner = THIS_MODULE, .get_ref = gfs2_recover_get_ref, .put_ref = gfs2_recover_put_ref, .execute = gfs2_recover_work, diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h index b65c8881f07a..9adb2b30754f 100644 --- a/include/linux/slow-work.h +++ b/include/linux/slow-work.h @@ -24,6 +24,9 @@ struct slow_work; * The operations used to support slow work items */ struct slow_work_ops { + /* owner */ + struct module *owner; + /* get a ref on a work item * - return 0 if successful, -ve if not */ @@ -42,6 +45,7 @@ struct slow_work_ops { * queued */ struct slow_work { + struct module *owner; /* the owning module */ unsigned long flags; #define SLOW_WORK_PENDING 0 /* item pending (further) execution */ #define SLOW_WORK_EXECUTING 1 /* item currently executing */ @@ -84,8 +88,8 @@ static inline void vslow_work_init(struct slow_work *work, } extern int slow_work_enqueue(struct slow_work *work); -extern int slow_work_register_user(void); -extern void slow_work_unregister_user(void); +extern int slow_work_register_user(struct module *owner); +extern void slow_work_unregister_user(struct module *owner); #ifdef CONFIG_SYSCTL extern ctl_table slow_work_sysctls[]; diff --git a/kernel/slow-work.c b/kernel/slow-work.c index 0d31135efbf4..dd08f376e406 100644 --- a/kernel/slow-work.c +++ b/kernel/slow-work.c @@ -22,6 +22,8 @@ #define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after * OOM */ +#define SLOW_WORK_THREAD_LIMIT 255 /* abs maximum number of slow-work threads */ + static void slow_work_cull_timeout(unsigned long); static void slow_work_oom_timeout(unsigned long); @@ -46,7 +48,7 @@ static unsigned vslow_work_proportion = 50; /* % of threads that may process #ifdef CONFIG_SYSCTL static const int slow_work_min_min_threads = 2; -static int slow_work_max_max_threads = 255; +static int slow_work_max_max_threads = SLOW_WORK_THREAD_LIMIT; static const int slow_work_min_vslow = 1; static const int slow_work_max_vslow = 99; @@ -97,6 +99,23 @@ static DEFINE_TIMER(slow_work_cull_timer, slow_work_cull_timeout, 0, 0); static DEFINE_TIMER(slow_work_oom_timer, slow_work_oom_timeout, 0, 0); static struct slow_work slow_work_new_thread; /* new thread starter */ +/* + * slow work ID allocation (use slow_work_queue_lock) + */ +static DECLARE_BITMAP(slow_work_ids, SLOW_WORK_THREAD_LIMIT); + +/* + * Unregistration tracking to prevent put_ref() from disappearing during module + * unload + */ +#ifdef CONFIG_MODULES +static struct module *slow_work_thread_processing[SLOW_WORK_THREAD_LIMIT]; +static struct module *slow_work_unreg_module; +static struct slow_work *slow_work_unreg_work_item; +static DECLARE_WAIT_QUEUE_HEAD(slow_work_unreg_wq); +static DEFINE_MUTEX(slow_work_unreg_sync_lock); +#endif + /* * The queues of work items and the lock governing access to them. These are * shared between all the CPUs. It doesn't make sense to have per-CPU queues @@ -149,8 +168,11 @@ static unsigned slow_work_calc_vsmax(void) * Attempt to execute stuff queued on a slow thread. Return true if we managed * it, false if there was nothing to do. */ -static bool slow_work_execute(void) +static bool slow_work_execute(int id) { +#ifdef CONFIG_MODULES + struct module *module; +#endif struct slow_work *work = NULL; unsigned vsmax; bool very_slow; @@ -186,6 +208,12 @@ static bool slow_work_execute(void) } else { very_slow = false; /* avoid the compiler warning */ } + +#ifdef CONFIG_MODULES + if (work) + slow_work_thread_processing[id] = work->owner; +#endif + spin_unlock_irq(&slow_work_queue_lock); if (!work) @@ -219,7 +247,18 @@ static bool slow_work_execute(void) spin_unlock_irq(&slow_work_queue_lock); } + /* sort out the race between module unloading and put_ref() */ work->ops->put_ref(work); + +#ifdef CONFIG_MODULES + module = slow_work_thread_processing[id]; + slow_work_thread_processing[id] = NULL; + smp_mb(); + if (slow_work_unreg_work_item == work || + slow_work_unreg_module == module) + wake_up_all(&slow_work_unreg_wq); +#endif + return true; auto_requeue: @@ -232,6 +271,7 @@ auto_requeue: else list_add_tail(&work->link, &slow_work_queue); spin_unlock_irq(&slow_work_queue_lock); + slow_work_thread_processing[id] = NULL; return true; } @@ -368,13 +408,22 @@ static inline bool slow_work_available(int vsmax) */ static int slow_work_thread(void *_data) { - int vsmax; + int vsmax, id; DEFINE_WAIT(wait); set_freezable(); set_user_nice(current, -5); + /* allocate ourselves an ID */ + spin_lock_irq(&slow_work_queue_lock); + id = find_first_zero_bit(slow_work_ids, SLOW_WORK_THREAD_LIMIT); + BUG_ON(id < 0 || id >= SLOW_WORK_THREAD_LIMIT); + __set_bit(id, slow_work_ids); + spin_unlock_irq(&slow_work_queue_lock); + + sprintf(current->comm, "kslowd%03u", id); + for (;;) { vsmax = vslow_work_proportion; vsmax *= atomic_read(&slow_work_thread_count); @@ -395,7 +444,7 @@ static int slow_work_thread(void *_data) vsmax *= atomic_read(&slow_work_thread_count); vsmax /= 100; - if (slow_work_available(vsmax) && slow_work_execute()) { + if (slow_work_available(vsmax) && slow_work_execute(id)) { cond_resched(); if (list_empty(&slow_work_queue) && list_empty(&vslow_work_queue) && @@ -412,6 +461,10 @@ static int slow_work_thread(void *_data) break; } + spin_lock_irq(&slow_work_queue_lock); + __clear_bit(id, slow_work_ids); + spin_unlock_irq(&slow_work_queue_lock); + if (atomic_dec_and_test(&slow_work_thread_count)) complete_and_exit(&slow_work_last_thread_exited, 0); return 0; @@ -475,6 +528,7 @@ static void slow_work_new_thread_execute(struct slow_work *work) } static const struct slow_work_ops slow_work_new_thread_ops = { + .owner = THIS_MODULE, .get_ref = slow_work_new_thread_get_ref, .put_ref = slow_work_new_thread_put_ref, .execute = slow_work_new_thread_execute, @@ -546,12 +600,13 @@ static int slow_work_max_threads_sysctl(struct ctl_table *table, int write, /** * slow_work_register_user - Register a user of the facility + * @module: The module about to make use of the facility * * Register a user of the facility, starting up the initial threads if there * aren't any other users at this point. This will return 0 if successful, or * an error if not. */ -int slow_work_register_user(void) +int slow_work_register_user(struct module *module) { struct task_struct *p; int loop; @@ -598,14 +653,79 @@ error: } EXPORT_SYMBOL(slow_work_register_user); +/* + * wait for all outstanding items from the calling module to complete + * - note that more items may be queued whilst we're waiting + */ +static void slow_work_wait_for_items(struct module *module) +{ + DECLARE_WAITQUEUE(myself, current); + struct slow_work *work; + int loop; + + mutex_lock(&slow_work_unreg_sync_lock); + add_wait_queue(&slow_work_unreg_wq, &myself); + + for (;;) { + spin_lock_irq(&slow_work_queue_lock); + + /* first of all, we wait for the last queued item in each list + * to be processed */ + list_for_each_entry_reverse(work, &vslow_work_queue, link) { + if (work->owner == module) { + set_current_state(TASK_UNINTERRUPTIBLE); + slow_work_unreg_work_item = work; + goto do_wait; + } + } + list_for_each_entry_reverse(work, &slow_work_queue, link) { + if (work->owner == module) { + set_current_state(TASK_UNINTERRUPTIBLE); + slow_work_unreg_work_item = work; + goto do_wait; + } + } + + /* then we wait for the items being processed to finish */ + slow_work_unreg_module = module; + smp_mb(); + for (loop = 0; loop < SLOW_WORK_THREAD_LIMIT; loop++) { + if (slow_work_thread_processing[loop] == module) + goto do_wait; + } + spin_unlock_irq(&slow_work_queue_lock); + break; /* okay, we're done */ + + do_wait: + spin_unlock_irq(&slow_work_queue_lock); + schedule(); + slow_work_unreg_work_item = NULL; + slow_work_unreg_module = NULL; + } + + remove_wait_queue(&slow_work_unreg_wq, &myself); + mutex_unlock(&slow_work_unreg_sync_lock); +} + /** * slow_work_unregister_user - Unregister a user of the facility + * @module: The module whose items should be cleared * * Unregister a user of the facility, killing all the threads if this was the * last one. + * + * This waits for all the work items belonging to the nominated module to go + * away before proceeding. */ -void slow_work_unregister_user(void) +void slow_work_unregister_user(struct module *module) { + /* first of all, wait for all outstanding items from the calling module + * to complete */ + if (module) + slow_work_wait_for_items(module); + + /* then we can actually go about shutting down the facility if need + * be */ mutex_lock(&slow_work_user_lock); BUG_ON(slow_work_user_count <= 0); -- cgit v1.2.3 From 0160950297c08f8233c89b9f9e7dd59cfb080809 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 19 Nov 2009 18:10:43 +0000 Subject: SLOW_WORK: Add support for cancellation of slow work Add support for cancellation of queued slow work and delayed slow work items. The cancellation functions will wait for items that are pending or undergoing execution to be discarded by the slow work facility. Attempting to enqueue work that is in the process of being cancelled will result in ECANCELED. Signed-off-by: Jens Axboe Signed-off-by: David Howells --- Documentation/slow-work.txt | 12 ++++++- include/linux/slow-work.h | 2 ++ kernel/slow-work.c | 81 +++++++++++++++++++++++++++++++++++++++++---- 3 files changed, 88 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/Documentation/slow-work.txt b/Documentation/slow-work.txt index c655c517fc68..2e384bd4dead 100644 --- a/Documentation/slow-work.txt +++ b/Documentation/slow-work.txt @@ -108,7 +108,17 @@ on the item, 0 otherwise. The items are reference counted, so there ought to be no need for a flush -operation. When all a module's slow work items have been processed, and the +operation. But as the reference counting is optional, means to cancel +existing work items are also included: + + cancel_slow_work(&myitem); + +can be used to cancel pending work. The above cancel function waits for +existing work to have been executed (or prevent execution of them, depending +on timing). + + +When all a module's slow work items have been processed, and the module has no further interest in the facility, it should unregister its interest: diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h index 9adb2b30754f..eef20182d5b4 100644 --- a/include/linux/slow-work.h +++ b/include/linux/slow-work.h @@ -51,6 +51,7 @@ struct slow_work { #define SLOW_WORK_EXECUTING 1 /* item currently executing */ #define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */ #define SLOW_WORK_VERY_SLOW 3 /* item is very slow */ +#define SLOW_WORK_CANCELLING 4 /* item is being cancelled, don't enqueue */ const struct slow_work_ops *ops; /* operations table for this item */ struct list_head link; /* link in queue */ }; @@ -88,6 +89,7 @@ static inline void vslow_work_init(struct slow_work *work, } extern int slow_work_enqueue(struct slow_work *work); +extern void slow_work_cancel(struct slow_work *work); extern int slow_work_register_user(struct module *owner); extern void slow_work_unregister_user(struct module *owner); diff --git a/kernel/slow-work.c b/kernel/slow-work.c index fccf421eb5c1..671cc434532a 100644 --- a/kernel/slow-work.c +++ b/kernel/slow-work.c @@ -236,12 +236,17 @@ static bool slow_work_execute(int id) if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags)) BUG(); - work->ops->execute(work); + /* don't execute if the work is in the process of being cancelled */ + if (!test_bit(SLOW_WORK_CANCELLING, &work->flags)) + work->ops->execute(work); if (very_slow) atomic_dec(&vslow_work_executing_count); clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags); + /* wake up anyone waiting for this work to be complete */ + wake_up_bit(&work->flags, SLOW_WORK_EXECUTING); + /* if someone tried to enqueue the item whilst we were executing it, * then it'll be left unenqueued to avoid multiple threads trying to * execute it simultaneously @@ -314,11 +319,16 @@ auto_requeue: * allowed to pick items to execute. This ensures that very slow items won't * overly block ones that are just ordinarily slow. * - * Returns 0 if successful, -EAGAIN if not. + * Returns 0 if successful, -EAGAIN if not (or -ECANCELED if cancelled work is + * attempted queued) */ int slow_work_enqueue(struct slow_work *work) { unsigned long flags; + int ret; + + if (test_bit(SLOW_WORK_CANCELLING, &work->flags)) + return -ECANCELED; BUG_ON(slow_work_user_count <= 0); BUG_ON(!work); @@ -335,6 +345,9 @@ int slow_work_enqueue(struct slow_work *work) if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) { spin_lock_irqsave(&slow_work_queue_lock, flags); + if (unlikely(test_bit(SLOW_WORK_CANCELLING, &work->flags))) + goto cancelled; + /* we promise that we will not attempt to execute the work * function in more than one thread simultaneously * @@ -352,8 +365,9 @@ int slow_work_enqueue(struct slow_work *work) if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) { set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); } else { - if (slow_work_get_ref(work) < 0) - goto cant_get_ref; + ret = slow_work_get_ref(work); + if (ret < 0) + goto failed; if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) list_add_tail(&work->link, &vslow_work_queue); else @@ -365,12 +379,67 @@ int slow_work_enqueue(struct slow_work *work) } return 0; -cant_get_ref: +cancelled: + ret = -ECANCELED; +failed: spin_unlock_irqrestore(&slow_work_queue_lock, flags); - return -EAGAIN; + return ret; } EXPORT_SYMBOL(slow_work_enqueue); +static int slow_work_wait(void *word) +{ + schedule(); + return 0; +} + +/** + * slow_work_cancel - Cancel a slow work item + * @work: The work item to cancel + * + * This function will cancel a previously enqueued work item. If we cannot + * cancel the work item, it is guarenteed to have run when this function + * returns. + */ +void slow_work_cancel(struct slow_work *work) +{ + bool wait = true, put = false; + + set_bit(SLOW_WORK_CANCELLING, &work->flags); + + spin_lock_irq(&slow_work_queue_lock); + + if (test_bit(SLOW_WORK_PENDING, &work->flags) && + !list_empty(&work->link)) { + /* the link in the pending queue holds a reference on the item + * that we will need to release */ + list_del_init(&work->link); + wait = false; + put = true; + clear_bit(SLOW_WORK_PENDING, &work->flags); + + } else if (test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags)) { + /* the executor is holding our only reference on the item, so + * we merely need to wait for it to finish executing */ + clear_bit(SLOW_WORK_PENDING, &work->flags); + } + + spin_unlock_irq(&slow_work_queue_lock); + + /* the EXECUTING flag is set by the executor whilst the spinlock is set + * and before the item is dequeued - so assuming the above doesn't + * actually dequeue it, simply waiting for the EXECUTING flag to be + * released here should be sufficient */ + if (wait) + wait_on_bit(&work->flags, SLOW_WORK_EXECUTING, slow_work_wait, + TASK_UNINTERRUPTIBLE); + + clear_bit(SLOW_WORK_CANCELLING, &work->flags); + if (put) + slow_work_put_ref(work); +} +EXPORT_SYMBOL(slow_work_cancel); + /* * Schedule a cull of the thread pool at some time in the near future */ -- cgit v1.2.3 From 6b8268b17a1ffc942bc72d7d00274e433d6b6719 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 19 Nov 2009 18:10:47 +0000 Subject: SLOW_WORK: Add delayed_slow_work support This adds support for starting slow work with a delay, similar to the functionality we have for workqueues. Signed-off-by: Jens Axboe Signed-off-by: David Howells --- Documentation/slow-work.txt | 16 +++++- include/linux/slow-work.h | 29 ++++++++++ kernel/slow-work.c | 129 +++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 171 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/Documentation/slow-work.txt b/Documentation/slow-work.txt index 2e384bd4dead..a9d1b0ffdded 100644 --- a/Documentation/slow-work.txt +++ b/Documentation/slow-work.txt @@ -41,6 +41,13 @@ expand files, provided the time taken to do so isn't too long. Operations of both types may sleep during execution, thus tying up the thread loaned to it. +A further class of work item is available, based on the slow work item class: + + (*) Delayed slow work items. + +These are slow work items that have a timer to defer queueing of the item for +a while. + THREAD-TO-CLASS ALLOCATION -------------------------- @@ -93,6 +100,10 @@ Slow work items may then be set up by: slow_work_init(&myitem, &myitem_ops); + or: + + delayed_slow_work_init(&myitem, &myitem_ops); + or: vslow_work_init(&myitem, &myitem_ops); @@ -104,7 +115,9 @@ A suitably set up work item can then be enqueued for processing: int ret = slow_work_enqueue(&myitem); This will return a -ve error if the thread pool is unable to gain a reference -on the item, 0 otherwise. +on the item, 0 otherwise, or (for delayed work): + + int ret = delayed_slow_work_enqueue(&myitem, my_jiffy_delay); The items are reference counted, so there ought to be no need for a flush @@ -112,6 +125,7 @@ operation. But as the reference counting is optional, means to cancel existing work items are also included: cancel_slow_work(&myitem); + cancel_delayed_slow_work(&myitem); can be used to cancel pending work. The above cancel function waits for existing work to have been executed (or prevent execution of them, depending diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h index eef20182d5b4..b245b9a9cc0b 100644 --- a/include/linux/slow-work.h +++ b/include/linux/slow-work.h @@ -17,6 +17,7 @@ #ifdef CONFIG_SLOW_WORK #include +#include struct slow_work; @@ -52,10 +53,16 @@ struct slow_work { #define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */ #define SLOW_WORK_VERY_SLOW 3 /* item is very slow */ #define SLOW_WORK_CANCELLING 4 /* item is being cancelled, don't enqueue */ +#define SLOW_WORK_DELAYED 5 /* item is struct delayed_slow_work with active timer */ const struct slow_work_ops *ops; /* operations table for this item */ struct list_head link; /* link in queue */ }; +struct delayed_slow_work { + struct slow_work work; + struct timer_list timer; +}; + /** * slow_work_init - Initialise a slow work item * @work: The work item to initialise @@ -71,6 +78,20 @@ static inline void slow_work_init(struct slow_work *work, INIT_LIST_HEAD(&work->link); } +/** + * slow_work_init - Initialise a delayed slow work item + * @work: The work item to initialise + * @ops: The operations to use to handle the slow work item + * + * Initialise a delayed slow work item. + */ +static inline void delayed_slow_work_init(struct delayed_slow_work *dwork, + const struct slow_work_ops *ops) +{ + init_timer(&dwork->timer); + slow_work_init(&dwork->work, ops); +} + /** * vslow_work_init - Initialise a very slow work item * @work: The work item to initialise @@ -93,6 +114,14 @@ extern void slow_work_cancel(struct slow_work *work); extern int slow_work_register_user(struct module *owner); extern void slow_work_unregister_user(struct module *owner); +extern int delayed_slow_work_enqueue(struct delayed_slow_work *dwork, + unsigned long delay); + +static inline void delayed_slow_work_cancel(struct delayed_slow_work *dwork) +{ + slow_work_cancel(&dwork->work); +} + #ifdef CONFIG_SYSCTL extern ctl_table slow_work_sysctls[]; #endif diff --git a/kernel/slow-work.c b/kernel/slow-work.c index 671cc434532a..f67e1daae93d 100644 --- a/kernel/slow-work.c +++ b/kernel/slow-work.c @@ -406,11 +406,40 @@ void slow_work_cancel(struct slow_work *work) bool wait = true, put = false; set_bit(SLOW_WORK_CANCELLING, &work->flags); + smp_mb(); + + /* if the work item is a delayed work item with an active timer, we + * need to wait for the timer to finish _before_ getting the spinlock, + * lest we deadlock against the timer routine + * + * the timer routine will leave DELAYED set if it notices the + * CANCELLING flag in time + */ + if (test_bit(SLOW_WORK_DELAYED, &work->flags)) { + struct delayed_slow_work *dwork = + container_of(work, struct delayed_slow_work, work); + del_timer_sync(&dwork->timer); + } spin_lock_irq(&slow_work_queue_lock); - if (test_bit(SLOW_WORK_PENDING, &work->flags) && - !list_empty(&work->link)) { + if (test_bit(SLOW_WORK_DELAYED, &work->flags)) { + /* the timer routine aborted or never happened, so we are left + * holding the timer's reference on the item and should just + * drop the pending flag and wait for any ongoing execution to + * finish */ + struct delayed_slow_work *dwork = + container_of(work, struct delayed_slow_work, work); + + BUG_ON(timer_pending(&dwork->timer)); + BUG_ON(!list_empty(&work->link)); + + clear_bit(SLOW_WORK_DELAYED, &work->flags); + put = true; + clear_bit(SLOW_WORK_PENDING, &work->flags); + + } else if (test_bit(SLOW_WORK_PENDING, &work->flags) && + !list_empty(&work->link)) { /* the link in the pending queue holds a reference on the item * that we will need to release */ list_del_init(&work->link); @@ -440,6 +469,102 @@ void slow_work_cancel(struct slow_work *work) } EXPORT_SYMBOL(slow_work_cancel); +/* + * Handle expiry of the delay timer, indicating that a delayed slow work item + * should now be queued if not cancelled + */ +static void delayed_slow_work_timer(unsigned long data) +{ + struct slow_work *work = (struct slow_work *) data; + unsigned long flags; + bool queued = false, put = false; + + spin_lock_irqsave(&slow_work_queue_lock, flags); + if (likely(!test_bit(SLOW_WORK_CANCELLING, &work->flags))) { + clear_bit(SLOW_WORK_DELAYED, &work->flags); + + if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) { + /* we discard the reference the timer was holding in + * favour of the one the executor holds */ + set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); + put = true; + } else { + if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) + list_add_tail(&work->link, &vslow_work_queue); + else + list_add_tail(&work->link, &slow_work_queue); + queued = true; + } + } + + spin_unlock_irqrestore(&slow_work_queue_lock, flags); + if (put) + slow_work_put_ref(work); + if (queued) + wake_up(&slow_work_thread_wq); +} + +/** + * delayed_slow_work_enqueue - Schedule a delayed slow work item for processing + * @dwork: The delayed work item to queue + * @delay: When to start executing the work, in jiffies from now + * + * This is similar to slow_work_enqueue(), but it adds a delay before the work + * is actually queued for processing. + * + * The item can have delayed processing requested on it whilst it is being + * executed. The delay will begin immediately, and if it expires before the + * item finishes executing, the item will be placed back on the queue when it + * has done executing. + */ +int delayed_slow_work_enqueue(struct delayed_slow_work *dwork, + unsigned long delay) +{ + struct slow_work *work = &dwork->work; + unsigned long flags; + int ret; + + if (delay == 0) + return slow_work_enqueue(&dwork->work); + + BUG_ON(slow_work_user_count <= 0); + BUG_ON(!work); + BUG_ON(!work->ops); + + if (test_bit(SLOW_WORK_CANCELLING, &work->flags)) + return -ECANCELED; + + if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) { + spin_lock_irqsave(&slow_work_queue_lock, flags); + + if (test_bit(SLOW_WORK_CANCELLING, &work->flags)) + goto cancelled; + + /* the timer holds a reference whilst it is pending */ + ret = work->ops->get_ref(work); + if (ret < 0) + goto cant_get_ref; + + if (test_and_set_bit(SLOW_WORK_DELAYED, &work->flags)) + BUG(); + dwork->timer.expires = jiffies + delay; + dwork->timer.data = (unsigned long) work; + dwork->timer.function = delayed_slow_work_timer; + add_timer(&dwork->timer); + + spin_unlock_irqrestore(&slow_work_queue_lock, flags); + } + + return 0; + +cancelled: + ret = -ECANCELED; +cant_get_ref: + spin_unlock_irqrestore(&slow_work_queue_lock, flags); + return ret; +} +EXPORT_SYMBOL(delayed_slow_work_enqueue); + /* * Schedule a cull of the thread pool at some time in the near future */ -- cgit v1.2.3 From 8fba10a42d191de612e60e7009c8f0313f90a9b3 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 19 Nov 2009 18:10:51 +0000 Subject: SLOW_WORK: Allow the work items to be viewed through a /proc file Allow the executing and queued work items to be viewed through a /proc file for debugging purposes. The contents look something like the following: THR PID ITEM ADDR FL MARK DESC === ===== ================ == ===== ========== 0 3005 ffff880023f52348 a 952ms FSC: OBJ17d3: LOOK 1 3006 ffff880024e33668 2 160ms FSC: OBJ17e5 OP60d3b: Write1/Store fl=2 2 3165 ffff8800296dd180 a 424ms FSC: OBJ17e4: LOOK 3 4089 ffff8800262c8d78 a 212ms FSC: OBJ17ea: CRTN 4 4090 ffff88002792bed8 2 388ms FSC: OBJ17e8 OP60d36: Write1/Store fl=2 5 4092 ffff88002a0ef308 2 388ms FSC: OBJ17e7 OP60d2e: Write1/Store fl=2 6 4094 ffff88002abaf4b8 2 132ms FSC: OBJ17e2 OP60d4e: Write1/Store fl=2 7 4095 ffff88002bb188e0 a 388ms FSC: OBJ17e9: CRTN vsq - ffff880023d99668 1 308ms FSC: OBJ17e0 OP60f91: Write1/EnQ fl=2 vsq - ffff8800295d1740 1 212ms FSC: OBJ16be OP4d4b6: Write1/EnQ fl=2 vsq - ffff880025ba3308 1 160ms FSC: OBJ179a OP58dec: Write1/EnQ fl=2 vsq - ffff880024ec83e0 1 160ms FSC: OBJ17ae OP599f2: Write1/EnQ fl=2 vsq - ffff880026618e00 1 160ms FSC: OBJ17e6 OP60d33: Write1/EnQ fl=2 vsq - ffff880025a2a4b8 1 132ms FSC: OBJ16a2 OP4d583: Write1/EnQ fl=2 vsq - ffff880023cbe6d8 9 212ms FSC: OBJ17eb: LOOK vsq - ffff880024d37590 9 212ms FSC: OBJ17ec: LOOK vsq - ffff880027746cb0 9 212ms FSC: OBJ17ed: LOOK vsq - ffff880024d37ae8 9 212ms FSC: OBJ17ee: LOOK vsq - ffff880024d37cb0 9 212ms FSC: OBJ17ef: LOOK vsq - ffff880025036550 9 212ms FSC: OBJ17f0: LOOK vsq - ffff8800250368e0 9 212ms FSC: OBJ17f1: LOOK vsq - ffff880025036aa8 9 212ms FSC: OBJ17f2: LOOK In the 'THR' column, executing items show the thread they're occupying and queued threads indicate which queue they're on. 'PID' shows the process ID of a slow-work thread that's executing something. 'FL' shows the work item flags. 'MARK' indicates how long since an item was queued or began executing. Lastly, the 'DESC' column permits the owner of an item to give some information. Signed-off-by: David Howells --- Documentation/slow-work.txt | 60 +++++++++++- include/linux/slow-work.h | 11 +++ init/Kconfig | 10 ++ kernel/Makefile | 1 + kernel/slow-work-proc.c | 227 ++++++++++++++++++++++++++++++++++++++++++++ kernel/slow-work.c | 44 ++++++--- kernel/slow-work.h | 72 ++++++++++++++ 7 files changed, 413 insertions(+), 12 deletions(-) create mode 100644 kernel/slow-work-proc.c create mode 100644 kernel/slow-work.h (limited to 'include') diff --git a/Documentation/slow-work.txt b/Documentation/slow-work.txt index a9d1b0ffdded..f120238e70fe 100644 --- a/Documentation/slow-work.txt +++ b/Documentation/slow-work.txt @@ -149,7 +149,8 @@ ITEM OPERATIONS =============== Each work item requires a table of operations of type struct slow_work_ops. -Only ->execute() is required, getting and putting of a reference are optional. +Only ->execute() is required; the getting and putting of a reference and the +describing of an item are all optional. (*) Get a reference on an item: @@ -179,6 +180,16 @@ Only ->execute() is required, getting and putting of a reference are optional. This should perform the work required of the item. It may sleep, it may perform disk I/O and it may wait for locks. + (*) View an item through /proc: + + void (*desc)(struct slow_work *work, struct seq_file *m); + + If supplied, this should print to 'm' a small string describing the work + the item is to do. This should be no more than about 40 characters, and + shouldn't include a newline character. + + See the 'Viewing executing and queued items' section below. + ================== POOL CONFIGURATION @@ -203,3 +214,50 @@ The slow-work thread pool has a number of configurables: is bounded to between 1 and one fewer than the number of active threads. This ensures there is always at least one thread that can process very slow work items, and always at least one thread that won't. + + +================================== +VIEWING EXECUTING AND QUEUED ITEMS +================================== + +If CONFIG_SLOW_WORK_PROC is enabled, a proc file is made available: + + /proc/slow_work_rq + +through which the list of work items being executed and the queues of items to +be executed may be viewed. The owner of a work item is given the chance to +add some information of its own. + +The contents look something like the following: + + THR PID ITEM ADDR FL MARK DESC + === ===== ================ == ===== ========== + 0 3005 ffff880023f52348 a 952ms FSC: OBJ17d3: LOOK + 1 3006 ffff880024e33668 2 160ms FSC: OBJ17e5 OP60d3b: Write1/Store fl=2 + 2 3165 ffff8800296dd180 a 424ms FSC: OBJ17e4: LOOK + 3 4089 ffff8800262c8d78 a 212ms FSC: OBJ17ea: CRTN + 4 4090 ffff88002792bed8 2 388ms FSC: OBJ17e8 OP60d36: Write1/Store fl=2 + 5 4092 ffff88002a0ef308 2 388ms FSC: OBJ17e7 OP60d2e: Write1/Store fl=2 + 6 4094 ffff88002abaf4b8 2 132ms FSC: OBJ17e2 OP60d4e: Write1/Store fl=2 + 7 4095 ffff88002bb188e0 a 388ms FSC: OBJ17e9: CRTN + vsq - ffff880023d99668 1 308ms FSC: OBJ17e0 OP60f91: Write1/EnQ fl=2 + vsq - ffff8800295d1740 1 212ms FSC: OBJ16be OP4d4b6: Write1/EnQ fl=2 + vsq - ffff880025ba3308 1 160ms FSC: OBJ179a OP58dec: Write1/EnQ fl=2 + vsq - ffff880024ec83e0 1 160ms FSC: OBJ17ae OP599f2: Write1/EnQ fl=2 + vsq - ffff880026618e00 1 160ms FSC: OBJ17e6 OP60d33: Write1/EnQ fl=2 + vsq - ffff880025a2a4b8 1 132ms FSC: OBJ16a2 OP4d583: Write1/EnQ fl=2 + vsq - ffff880023cbe6d8 9 212ms FSC: OBJ17eb: LOOK + vsq - ffff880024d37590 9 212ms FSC: OBJ17ec: LOOK + vsq - ffff880027746cb0 9 212ms FSC: OBJ17ed: LOOK + vsq - ffff880024d37ae8 9 212ms FSC: OBJ17ee: LOOK + vsq - ffff880024d37cb0 9 212ms FSC: OBJ17ef: LOOK + vsq - ffff880025036550 9 212ms FSC: OBJ17f0: LOOK + vsq - ffff8800250368e0 9 212ms FSC: OBJ17f1: LOOK + vsq - ffff880025036aa8 9 212ms FSC: OBJ17f2: LOOK + +In the 'THR' column, executing items show the thread they're occupying and +queued threads indicate which queue they're on. 'PID' shows the process ID of +a slow-work thread that's executing something. 'FL' shows the work item flags. +'MARK' indicates how long since an item was queued or began executing. Lastly, +the 'DESC' column permits the owner of an item to give some information. + diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h index b245b9a9cc0b..f41485145ed1 100644 --- a/include/linux/slow-work.h +++ b/include/linux/slow-work.h @@ -20,6 +20,9 @@ #include struct slow_work; +#ifdef CONFIG_SLOW_WORK_PROC +struct seq_file; +#endif /* * The operations used to support slow work items @@ -38,6 +41,11 @@ struct slow_work_ops { /* execute a work item */ void (*execute)(struct slow_work *work); + +#ifdef CONFIG_SLOW_WORK_PROC + /* describe a work item for /proc */ + void (*desc)(struct slow_work *work, struct seq_file *m); +#endif }; /* @@ -56,6 +64,9 @@ struct slow_work { #define SLOW_WORK_DELAYED 5 /* item is struct delayed_slow_work with active timer */ const struct slow_work_ops *ops; /* operations table for this item */ struct list_head link; /* link in queue */ +#ifdef CONFIG_SLOW_WORK_PROC + struct timespec mark; /* jiffies at which queued or exec begun */ +#endif }; struct delayed_slow_work { diff --git a/init/Kconfig b/init/Kconfig index 9e03ef8b311e..ab5c64801fe5 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1098,6 +1098,16 @@ config SLOW_WORK See Documentation/slow-work.txt. +config SLOW_WORK_PROC + bool "Slow work debugging through /proc" + default n + depends on SLOW_WORK && PROC_FS + help + Display the contents of the slow work run queue through /proc, + including items currently executing. + + See Documentation/slow-work.txt. + endmenu # General setup config HAVE_GENERIC_DMA_COHERENT diff --git a/kernel/Makefile b/kernel/Makefile index b8d4cd8ac0b9..776ffed1556d 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -94,6 +94,7 @@ obj-$(CONFIG_X86_DS) += trace/ obj-$(CONFIG_RING_BUFFER) += trace/ obj-$(CONFIG_SMP) += sched_cpupri.o obj-$(CONFIG_SLOW_WORK) += slow-work.o +obj-$(CONFIG_SLOW_WORK_PROC) += slow-work-proc.o obj-$(CONFIG_PERF_EVENTS) += perf_event.o ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) diff --git a/kernel/slow-work-proc.c b/kernel/slow-work-proc.c new file mode 100644 index 000000000000..3988032571f5 --- /dev/null +++ b/kernel/slow-work-proc.c @@ -0,0 +1,227 @@ +/* Slow work debugging + * + * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include "slow-work.h" + +#define ITERATOR_SHIFT (BITS_PER_LONG - 4) +#define ITERATOR_SELECTOR (0xfUL << ITERATOR_SHIFT) +#define ITERATOR_COUNTER (~ITERATOR_SELECTOR) + +void slow_work_new_thread_desc(struct slow_work *work, struct seq_file *m) +{ + seq_puts(m, "Slow-work: New thread"); +} + +/* + * Render the time mark field on a work item into a 5-char time with units plus + * a space + */ +static void slow_work_print_mark(struct seq_file *m, struct slow_work *work) +{ + struct timespec now, diff; + + now = CURRENT_TIME; + diff = timespec_sub(now, work->mark); + + if (diff.tv_sec < 0) + seq_puts(m, " -ve "); + else if (diff.tv_sec == 0 && diff.tv_nsec < 1000) + seq_printf(m, "%3luns ", diff.tv_nsec); + else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000) + seq_printf(m, "%3luus ", diff.tv_nsec / 1000); + else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000000) + seq_printf(m, "%3lums ", diff.tv_nsec / 1000000); + else if (diff.tv_sec <= 1) + seq_puts(m, " 1s "); + else if (diff.tv_sec < 60) + seq_printf(m, "%4lus ", diff.tv_sec); + else if (diff.tv_sec < 60 * 60) + seq_printf(m, "%4lum ", diff.tv_sec / 60); + else if (diff.tv_sec < 60 * 60 * 24) + seq_printf(m, "%4luh ", diff.tv_sec / 3600); + else + seq_puts(m, "exces "); +} + +/* + * Describe a slow work item for /proc + */ +static int slow_work_runqueue_show(struct seq_file *m, void *v) +{ + struct slow_work *work; + struct list_head *p = v; + unsigned long id; + + switch ((unsigned long) v) { + case 1: + seq_puts(m, "THR PID ITEM ADDR FL MARK DESC\n"); + return 0; + case 2: + seq_puts(m, "=== ===== ================ == ===== ==========\n"); + return 0; + + case 3 ... 3 + SLOW_WORK_THREAD_LIMIT - 1: + id = (unsigned long) v - 3; + + read_lock(&slow_work_execs_lock); + work = slow_work_execs[id]; + if (work) { + smp_read_barrier_depends(); + + seq_printf(m, "%3lu %5d %16p %2lx ", + id, slow_work_pids[id], work, work->flags); + slow_work_print_mark(m, work); + + if (work->ops->desc) + work->ops->desc(work, m); + seq_putc(m, '\n'); + } + read_unlock(&slow_work_execs_lock); + return 0; + + default: + work = list_entry(p, struct slow_work, link); + seq_printf(m, "%3s - %16p %2lx ", + work->flags & SLOW_WORK_VERY_SLOW ? "vsq" : "sq", + work, work->flags); + slow_work_print_mark(m, work); + + if (work->ops->desc) + work->ops->desc(work, m); + seq_putc(m, '\n'); + return 0; + } +} + +/* + * map the iterator to a work item + */ +static void *slow_work_runqueue_index(struct seq_file *m, loff_t *_pos) +{ + struct list_head *p; + unsigned long count, id; + + switch (*_pos >> ITERATOR_SHIFT) { + case 0x0: + if (*_pos == 0) + *_pos = 1; + if (*_pos < 3) + return (void *)(unsigned long) *_pos; + if (*_pos < 3 + SLOW_WORK_THREAD_LIMIT) + for (id = *_pos - 3; + id < SLOW_WORK_THREAD_LIMIT; + id++, (*_pos)++) + if (slow_work_execs[id]) + return (void *)(unsigned long) *_pos; + *_pos = 0x1UL << ITERATOR_SHIFT; + + case 0x1: + count = *_pos & ITERATOR_COUNTER; + list_for_each(p, &slow_work_queue) { + if (count == 0) + return p; + count--; + } + *_pos = 0x2UL << ITERATOR_SHIFT; + + case 0x2: + count = *_pos & ITERATOR_COUNTER; + list_for_each(p, &vslow_work_queue) { + if (count == 0) + return p; + count--; + } + *_pos = 0x3UL << ITERATOR_SHIFT; + + default: + return NULL; + } +} + +/* + * set up the iterator to start reading from the first line + */ +static void *slow_work_runqueue_start(struct seq_file *m, loff_t *_pos) +{ + spin_lock_irq(&slow_work_queue_lock); + return slow_work_runqueue_index(m, _pos); +} + +/* + * move to the next line + */ +static void *slow_work_runqueue_next(struct seq_file *m, void *v, loff_t *_pos) +{ + struct list_head *p = v; + unsigned long selector = *_pos >> ITERATOR_SHIFT; + + (*_pos)++; + switch (selector) { + case 0x0: + return slow_work_runqueue_index(m, _pos); + + case 0x1: + if (*_pos >> ITERATOR_SHIFT == 0x1) { + p = p->next; + if (p != &slow_work_queue) + return p; + } + *_pos = 0x2UL << ITERATOR_SHIFT; + p = &vslow_work_queue; + + case 0x2: + if (*_pos >> ITERATOR_SHIFT == 0x2) { + p = p->next; + if (p != &vslow_work_queue) + return p; + } + *_pos = 0x3UL << ITERATOR_SHIFT; + + default: + return NULL; + } +} + +/* + * clean up after reading + */ +static void slow_work_runqueue_stop(struct seq_file *m, void *v) +{ + spin_unlock_irq(&slow_work_queue_lock); +} + +static const struct seq_operations slow_work_runqueue_ops = { + .start = slow_work_runqueue_start, + .stop = slow_work_runqueue_stop, + .next = slow_work_runqueue_next, + .show = slow_work_runqueue_show, +}; + +/* + * open "/proc/slow_work_rq" to list queue contents + */ +static int slow_work_runqueue_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &slow_work_runqueue_ops); +} + +const struct file_operations slow_work_runqueue_fops = { + .owner = THIS_MODULE, + .open = slow_work_runqueue_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; diff --git a/kernel/slow-work.c b/kernel/slow-work.c index f67e1daae93d..b763bc2d2670 100644 --- a/kernel/slow-work.c +++ b/kernel/slow-work.c @@ -16,13 +16,8 @@ #include #include #include - -#define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of - * things to do */ -#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after - * OOM */ - -#define SLOW_WORK_THREAD_LIMIT 255 /* abs maximum number of slow-work threads */ +#include +#include "slow-work.h" static void slow_work_cull_timeout(unsigned long); static void slow_work_oom_timeout(unsigned long); @@ -116,6 +111,15 @@ static DECLARE_WAIT_QUEUE_HEAD(slow_work_unreg_wq); static DEFINE_MUTEX(slow_work_unreg_sync_lock); #endif +/* + * Data for tracking currently executing items for indication through /proc + */ +#ifdef CONFIG_SLOW_WORK_PROC +struct slow_work *slow_work_execs[SLOW_WORK_THREAD_LIMIT]; +pid_t slow_work_pids[SLOW_WORK_THREAD_LIMIT]; +DEFINE_RWLOCK(slow_work_execs_lock); +#endif + /* * The queues of work items and the lock governing access to them. These are * shared between all the CPUs. It doesn't make sense to have per-CPU queues @@ -124,9 +128,9 @@ static DEFINE_MUTEX(slow_work_unreg_sync_lock); * There are two queues of work items: one for slow work items, and one for * very slow work items. */ -static LIST_HEAD(slow_work_queue); -static LIST_HEAD(vslow_work_queue); -static DEFINE_SPINLOCK(slow_work_queue_lock); +LIST_HEAD(slow_work_queue); +LIST_HEAD(vslow_work_queue); +DEFINE_SPINLOCK(slow_work_queue_lock); /* * The thread controls. A variable used to signal to the threads that they @@ -182,7 +186,7 @@ static unsigned slow_work_calc_vsmax(void) * Attempt to execute stuff queued on a slow thread. Return true if we managed * it, false if there was nothing to do. */ -static bool slow_work_execute(int id) +static noinline bool slow_work_execute(int id) { #ifdef CONFIG_MODULES struct module *module; @@ -227,6 +231,10 @@ static bool slow_work_execute(int id) if (work) slow_work_thread_processing[id] = work->owner; #endif + if (work) { + slow_work_mark_time(work); + slow_work_begin_exec(id, work); + } spin_unlock_irq(&slow_work_queue_lock); @@ -247,6 +255,8 @@ static bool slow_work_execute(int id) /* wake up anyone waiting for this work to be complete */ wake_up_bit(&work->flags, SLOW_WORK_EXECUTING); + slow_work_end_exec(id, work); + /* if someone tried to enqueue the item whilst we were executing it, * then it'll be left unenqueued to avoid multiple threads trying to * execute it simultaneously @@ -285,6 +295,7 @@ auto_requeue: * - we transfer our ref on the item back to the appropriate queue * - don't wake another thread up as we're awake already */ + slow_work_mark_time(work); if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) list_add_tail(&work->link, &vslow_work_queue); else @@ -368,6 +379,7 @@ int slow_work_enqueue(struct slow_work *work) ret = slow_work_get_ref(work); if (ret < 0) goto failed; + slow_work_mark_time(work); if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) list_add_tail(&work->link, &vslow_work_queue); else @@ -489,6 +501,7 @@ static void delayed_slow_work_timer(unsigned long data) set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); put = true; } else { + slow_work_mark_time(work); if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) list_add_tail(&work->link, &vslow_work_queue); else @@ -627,6 +640,7 @@ static int slow_work_thread(void *_data) id = find_first_zero_bit(slow_work_ids, SLOW_WORK_THREAD_LIMIT); BUG_ON(id < 0 || id >= SLOW_WORK_THREAD_LIMIT); __set_bit(id, slow_work_ids); + slow_work_set_thread_pid(id, current->pid); spin_unlock_irq(&slow_work_queue_lock); sprintf(current->comm, "kslowd%03u", id); @@ -669,6 +683,7 @@ static int slow_work_thread(void *_data) } spin_lock_irq(&slow_work_queue_lock); + slow_work_set_thread_pid(id, 0); __clear_bit(id, slow_work_ids); spin_unlock_irq(&slow_work_queue_lock); @@ -722,6 +737,9 @@ static void slow_work_new_thread_execute(struct slow_work *work) static const struct slow_work_ops slow_work_new_thread_ops = { .owner = THIS_MODULE, .execute = slow_work_new_thread_execute, +#ifdef CONFIG_SLOW_WORK_PROC + .desc = slow_work_new_thread_desc, +#endif }; /* @@ -948,6 +966,10 @@ static int __init init_slow_work(void) #ifdef CONFIG_SYSCTL if (slow_work_max_max_threads < nr_cpus * 2) slow_work_max_max_threads = nr_cpus * 2; +#endif +#ifdef CONFIG_SLOW_WORK_PROC + proc_create("slow_work_rq", S_IFREG | 0400, NULL, + &slow_work_runqueue_fops); #endif return 0; } diff --git a/kernel/slow-work.h b/kernel/slow-work.h new file mode 100644 index 000000000000..3c2f007f3ad6 --- /dev/null +++ b/kernel/slow-work.h @@ -0,0 +1,72 @@ +/* Slow work private definitions + * + * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of + * things to do */ +#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after + * OOM */ + +#define SLOW_WORK_THREAD_LIMIT 255 /* abs maximum number of slow-work threads */ + +/* + * slow-work.c + */ +#ifdef CONFIG_SLOW_WORK_PROC +extern struct slow_work *slow_work_execs[]; +extern pid_t slow_work_pids[]; +extern rwlock_t slow_work_execs_lock; +#endif + +extern struct list_head slow_work_queue; +extern struct list_head vslow_work_queue; +extern spinlock_t slow_work_queue_lock; + +/* + * slow-work-proc.c + */ +#ifdef CONFIG_SLOW_WORK_PROC +extern const struct file_operations slow_work_runqueue_fops; + +extern void slow_work_new_thread_desc(struct slow_work *, struct seq_file *); +#endif + +/* + * Helper functions + */ +static inline void slow_work_set_thread_pid(int id, pid_t pid) +{ +#ifdef CONFIG_SLOW_WORK_PROC + slow_work_pids[id] = pid; +#endif +} + +static inline void slow_work_mark_time(struct slow_work *work) +{ +#ifdef CONFIG_SLOW_WORK_PROC + work->mark = CURRENT_TIME; +#endif +} + +static inline void slow_work_begin_exec(int id, struct slow_work *work) +{ +#ifdef CONFIG_SLOW_WORK_PROC + slow_work_execs[id] = work; +#endif +} + +static inline void slow_work_end_exec(int id, struct slow_work *work) +{ +#ifdef CONFIG_SLOW_WORK_PROC + write_lock(&slow_work_execs_lock); + slow_work_execs[id] = NULL; + write_unlock(&slow_work_execs_lock); +#endif +} -- cgit v1.2.3 From 31ba99d304494cb28fa8671ccc769c5543e1165d Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 19 Nov 2009 18:10:53 +0000 Subject: SLOW_WORK: Allow the owner of a work item to determine if it is queued or not Add a function (slow_work_is_queued()) to permit the owner of a work item to determine if the item is queued or not. The work item is counted as being queued if it is actually on the queue, not just if it is pending. If it is executing and pending, then it is not on the queue, but will rather be put back on the queue when execution finishes. This permits a caller to quickly work out if it may be able to put another, dependent work item on the queue behind it, or whether it will have to wait till that is finished. This can be used by CacheFiles to work out whether the creation a new object can be immediately deferred when it has to wait for an old object to be deleted, or whether a wait must take place. If a wait is necessary, then the slow-work thread can otherwise get blocked, preventing the deletion from taking place. Signed-off-by: David Howells --- Documentation/slow-work.txt | 15 +++++++++++++++ include/linux/slow-work.h | 19 +++++++++++++++++++ 2 files changed, 34 insertions(+) (limited to 'include') diff --git a/Documentation/slow-work.txt b/Documentation/slow-work.txt index f120238e70fe..0169c9d9dd16 100644 --- a/Documentation/slow-work.txt +++ b/Documentation/slow-work.txt @@ -144,6 +144,21 @@ from being taken away before it completes. module should almost certainly be THIS_MODULE. +================ +HELPER FUNCTIONS +================ + +The slow-work facility provides a function by which it can be determined +whether or not an item is queued for later execution: + + bool queued = slow_work_is_queued(struct slow_work *work); + +If it returns false, then the item is not on the queue (it may be executing +with a requeue pending). This can be used to work out whether an item on which +another depends is on the queue, thus allowing a dependent item to be queued +after it. + + =============== ITEM OPERATIONS =============== diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h index f41485145ed1..bfd3ab4c8898 100644 --- a/include/linux/slow-work.h +++ b/include/linux/slow-work.h @@ -120,6 +120,25 @@ static inline void vslow_work_init(struct slow_work *work, INIT_LIST_HEAD(&work->link); } +/** + * slow_work_is_queued - Determine if a slow work item is on the work queue + * work: The work item to test + * + * Determine if the specified slow-work item is on the work queue. This + * returns true if it is actually on the queue. + * + * If the item is executing and has been marked for requeue when execution + * finishes, then false will be returned. + * + * Anyone wishing to wait for completion of execution can wait on the + * SLOW_WORK_EXECUTING bit. + */ +static inline bool slow_work_is_queued(struct slow_work *work) +{ + unsigned long flags = work->flags; + return flags & SLOW_WORK_PENDING && !(flags & SLOW_WORK_EXECUTING); +} + extern int slow_work_enqueue(struct slow_work *work); extern void slow_work_cancel(struct slow_work *work); extern int slow_work_register_user(struct module *owner); -- cgit v1.2.3 From 3bde31a4ac225cb5805be02eff6eaaf7e0766ccd Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 19 Nov 2009 18:10:57 +0000 Subject: SLOW_WORK: Allow a requeueable work item to sleep till the thread is needed Add a function to allow a requeueable work item to sleep till the thread processing it is needed by the slow-work facility to perform other work. Sometimes a work item can't progress immediately, but must wait for the completion of another work item that's currently being processed by another slow-work thread. In some circumstances, the waiting item could instead - theoretically - put itself back on the queue and yield its thread back to the slow-work facility, thus waiting till it gets processing time again before attempting to progress. This would allow other work items processing time on that thread. However, this only works if there is something on the queue for it to queue behind - otherwise it will just get a thread again immediately, and will end up cycling between the queue and the thread, eating up valuable CPU time. So, slow_work_sleep_till_thread_needed() is provided such that an item can put itself on a wait queue that will wake it up when the event it is actually interested in occurs, then call this function in lieu of calling schedule(). This function will then sleep until either the item's event occurs or another work item appears on the queue. If another work item is queued, but the item's event hasn't occurred, then the work item should requeue itself and yield the thread back to the slow-work facility by returning. This can be used by CacheFiles for an object that is being created on one thread to wait for an object being deleted on another thread where there is nothing on the queue for the creation to go and wait behind. As soon as an item appears on the queue that could be given thread time instead, CacheFiles can stick the creating object back on the queue and return to the slow-work facility - assuming the object deletion didn't also complete. Signed-off-by: David Howells --- Documentation/slow-work.txt | 44 +++++++++++++++++++++ include/linux/slow-work.h | 3 ++ kernel/slow-work.c | 94 ++++++++++++++++++++++++++++++++++++++++----- 3 files changed, 132 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/Documentation/slow-work.txt b/Documentation/slow-work.txt index 0169c9d9dd16..52bc31433723 100644 --- a/Documentation/slow-work.txt +++ b/Documentation/slow-work.txt @@ -158,6 +158,50 @@ with a requeue pending). This can be used to work out whether an item on which another depends is on the queue, thus allowing a dependent item to be queued after it. +If the above shows an item on which another depends not to be queued, then the +owner of the dependent item might need to wait. However, to avoid locking up +the threads unnecessarily be sleeping in them, it can make sense under some +circumstances to return the work item to the queue, thus deferring it until +some other items have had a chance to make use of the yielded thread. + +To yield a thread and defer an item, the work function should simply enqueue +the work item again and return. However, this doesn't work if there's nothing +actually on the queue, as the thread just vacated will jump straight back into +the item's work function, thus busy waiting on a CPU. + +Instead, the item should use the thread to wait for the dependency to go away, +but rather than using schedule() or schedule_timeout() to sleep, it should use +the following function: + + bool requeue = slow_work_sleep_till_thread_needed( + struct slow_work *work, + signed long *_timeout); + +This will add a second wait and then sleep, such that it will be woken up if +either something appears on the queue that could usefully make use of the +thread - and behind which this item can be queued, or if the event the caller +set up to wait for happens. True will be returned if something else appeared +on the queue and this work function should perhaps return, of false if +something else woke it up. The timeout is as for schedule_timeout(). + +For example: + + wq = bit_waitqueue(&my_flags, MY_BIT); + init_wait(&wait); + requeue = false; + do { + prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); + if (!test_bit(MY_BIT, &my_flags)) + break; + requeue = slow_work_sleep_till_thread_needed(&my_work, + &timeout); + } while (timeout > 0 && !requeue); + finish_wait(wq, &wait); + if (!test_bit(MY_BIT, &my_flags) + goto do_my_thing; + if (requeue) + return; // to slow_work + =============== ITEM OPERATIONS diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h index bfd3ab4c8898..5035a2691739 100644 --- a/include/linux/slow-work.h +++ b/include/linux/slow-work.h @@ -152,6 +152,9 @@ static inline void delayed_slow_work_cancel(struct delayed_slow_work *dwork) slow_work_cancel(&dwork->work); } +extern bool slow_work_sleep_till_thread_needed(struct slow_work *work, + signed long *_timeout); + #ifdef CONFIG_SYSCTL extern ctl_table slow_work_sysctls[]; #endif diff --git a/kernel/slow-work.c b/kernel/slow-work.c index b763bc2d2670..da94f3c101af 100644 --- a/kernel/slow-work.c +++ b/kernel/slow-work.c @@ -132,6 +132,15 @@ LIST_HEAD(slow_work_queue); LIST_HEAD(vslow_work_queue); DEFINE_SPINLOCK(slow_work_queue_lock); +/* + * The following are two wait queues that get pinged when a work item is placed + * on an empty queue. These allow work items that are hogging a thread by + * sleeping in a way that could be deferred to yield their thread and enqueue + * themselves. + */ +static DECLARE_WAIT_QUEUE_HEAD(slow_work_queue_waits_for_occupation); +static DECLARE_WAIT_QUEUE_HEAD(vslow_work_queue_waits_for_occupation); + /* * The thread controls. A variable used to signal to the threads that they * should exit when the queue is empty, a waitqueue used by the threads to wait @@ -305,6 +314,50 @@ auto_requeue: return true; } +/** + * slow_work_sleep_till_thread_needed - Sleep till thread needed by other work + * work: The work item under execution that wants to sleep + * _timeout: Scheduler sleep timeout + * + * Allow a requeueable work item to sleep on a slow-work processor thread until + * that thread is needed to do some other work or the sleep is interrupted by + * some other event. + * + * The caller must set up a wake up event before calling this and must have set + * the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own + * condition before calling this function as no test is made here. + * + * False is returned if there is nothing on the queue; true is returned if the + * work item should be requeued + */ +bool slow_work_sleep_till_thread_needed(struct slow_work *work, + signed long *_timeout) +{ + wait_queue_head_t *wfo_wq; + struct list_head *queue; + + DEFINE_WAIT(wait); + + if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) { + wfo_wq = &vslow_work_queue_waits_for_occupation; + queue = &vslow_work_queue; + } else { + wfo_wq = &slow_work_queue_waits_for_occupation; + queue = &slow_work_queue; + } + + if (!list_empty(queue)) + return true; + + add_wait_queue_exclusive(wfo_wq, &wait); + if (list_empty(queue)) + *_timeout = schedule_timeout(*_timeout); + finish_wait(wfo_wq, &wait); + + return !list_empty(queue); +} +EXPORT_SYMBOL(slow_work_sleep_till_thread_needed); + /** * slow_work_enqueue - Schedule a slow work item for processing * @work: The work item to queue @@ -335,6 +388,8 @@ auto_requeue: */ int slow_work_enqueue(struct slow_work *work) { + wait_queue_head_t *wfo_wq; + struct list_head *queue; unsigned long flags; int ret; @@ -354,6 +409,14 @@ int slow_work_enqueue(struct slow_work *work) * maintaining our promise */ if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) { + if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) { + wfo_wq = &vslow_work_queue_waits_for_occupation; + queue = &vslow_work_queue; + } else { + wfo_wq = &slow_work_queue_waits_for_occupation; + queue = &slow_work_queue; + } + spin_lock_irqsave(&slow_work_queue_lock, flags); if (unlikely(test_bit(SLOW_WORK_CANCELLING, &work->flags))) @@ -380,11 +443,13 @@ int slow_work_enqueue(struct slow_work *work) if (ret < 0) goto failed; slow_work_mark_time(work); - if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) - list_add_tail(&work->link, &vslow_work_queue); - else - list_add_tail(&work->link, &slow_work_queue); + list_add_tail(&work->link, queue); wake_up(&slow_work_thread_wq); + + /* if someone who could be requeued is sleeping on a + * thread, then ask them to yield their thread */ + if (work->link.prev == queue) + wake_up(wfo_wq); } spin_unlock_irqrestore(&slow_work_queue_lock, flags); @@ -487,9 +552,19 @@ EXPORT_SYMBOL(slow_work_cancel); */ static void delayed_slow_work_timer(unsigned long data) { + wait_queue_head_t *wfo_wq; + struct list_head *queue; struct slow_work *work = (struct slow_work *) data; unsigned long flags; - bool queued = false, put = false; + bool queued = false, put = false, first = false; + + if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) { + wfo_wq = &vslow_work_queue_waits_for_occupation; + queue = &vslow_work_queue; + } else { + wfo_wq = &slow_work_queue_waits_for_occupation; + queue = &slow_work_queue; + } spin_lock_irqsave(&slow_work_queue_lock, flags); if (likely(!test_bit(SLOW_WORK_CANCELLING, &work->flags))) { @@ -502,17 +577,18 @@ static void delayed_slow_work_timer(unsigned long data) put = true; } else { slow_work_mark_time(work); - if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) - list_add_tail(&work->link, &vslow_work_queue); - else - list_add_tail(&work->link, &slow_work_queue); + list_add_tail(&work->link, queue); queued = true; + if (work->link.prev == queue) + first = true; } } spin_unlock_irqrestore(&slow_work_queue_lock, flags); if (put) slow_work_put_ref(work); + if (first) + wake_up(wfo_wq); if (queued) wake_up(&slow_work_thread_wq); } -- cgit v1.2.3 From 440f0affe247e9990c8f8778f1861da4fd7d5e50 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 19 Nov 2009 18:11:01 +0000 Subject: FS-Cache: Annotate slow-work runqueue proc lines for FS-Cache work items Annotate slow-work runqueue proc lines for FS-Cache work items. Objects include the object ID and the state. Operations include the object ID, the operation ID and the operation type and state. Signed-off-by: David Howells --- fs/fscache/object.c | 41 ++++++++++++++++++++++++++++++++++++++++- fs/fscache/operation.c | 29 +++++++++++++++++++++++++++++ fs/fscache/page.c | 27 ++++++++++++++++++++++----- include/linux/fscache-cache.h | 12 ++++++++++++ 4 files changed, 103 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/fs/fscache/object.c b/fs/fscache/object.c index d236eb1d6f37..615b63dd9ecc 100644 --- a/fs/fscache/object.c +++ b/fs/fscache/object.c @@ -14,9 +14,10 @@ #define FSCACHE_DEBUG_LEVEL COOKIE #include +#include #include "internal.h" -const char *fscache_object_states[] = { +const char *fscache_object_states[FSCACHE_OBJECT__NSTATES] = { [FSCACHE_OBJECT_INIT] = "OBJECT_INIT", [FSCACHE_OBJECT_LOOKING_UP] = "OBJECT_LOOKING_UP", [FSCACHE_OBJECT_CREATING] = "OBJECT_CREATING", @@ -33,9 +34,28 @@ const char *fscache_object_states[] = { }; EXPORT_SYMBOL(fscache_object_states); +static const char fscache_object_states_short[FSCACHE_OBJECT__NSTATES][5] = { + [FSCACHE_OBJECT_INIT] = "INIT", + [FSCACHE_OBJECT_LOOKING_UP] = "LOOK", + [FSCACHE_OBJECT_CREATING] = "CRTN", + [FSCACHE_OBJECT_AVAILABLE] = "AVBL", + [FSCACHE_OBJECT_ACTIVE] = "ACTV", + [FSCACHE_OBJECT_UPDATING] = "UPDT", + [FSCACHE_OBJECT_DYING] = "DYNG", + [FSCACHE_OBJECT_LC_DYING] = "LCDY", + [FSCACHE_OBJECT_ABORT_INIT] = "ABTI", + [FSCACHE_OBJECT_RELEASING] = "RELS", + [FSCACHE_OBJECT_RECYCLING] = "RCYC", + [FSCACHE_OBJECT_WITHDRAWING] = "WTHD", + [FSCACHE_OBJECT_DEAD] = "DEAD", +}; + static void fscache_object_slow_work_put_ref(struct slow_work *); static int fscache_object_slow_work_get_ref(struct slow_work *); static void fscache_object_slow_work_execute(struct slow_work *); +#ifdef CONFIG_SLOW_WORK_PROC +static void fscache_object_slow_work_desc(struct slow_work *, struct seq_file *); +#endif static void fscache_initialise_object(struct fscache_object *); static void fscache_lookup_object(struct fscache_object *); static void fscache_object_available(struct fscache_object *); @@ -49,6 +69,9 @@ const struct slow_work_ops fscache_object_slow_work_ops = { .get_ref = fscache_object_slow_work_get_ref, .put_ref = fscache_object_slow_work_put_ref, .execute = fscache_object_slow_work_execute, +#ifdef CONFIG_SLOW_WORK_PROC + .desc = fscache_object_slow_work_desc, +#endif }; EXPORT_SYMBOL(fscache_object_slow_work_ops); @@ -326,6 +349,22 @@ static void fscache_object_slow_work_execute(struct slow_work *work) fscache_enqueue_object(object); } +/* + * describe an object for slow-work debugging + */ +#ifdef CONFIG_SLOW_WORK_PROC +static void fscache_object_slow_work_desc(struct slow_work *work, + struct seq_file *m) +{ + struct fscache_object *object = + container_of(work, struct fscache_object, work); + + seq_printf(m, "FSC: OBJ%x: %s", + object->debug_id, + fscache_object_states_short[object->state]); +} +#endif + /* * initialise an object * - check the specified object's parent to see if we can make use of it diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c index f1a2857b2ff5..91bbe6f0377c 100644 --- a/fs/fscache/operation.c +++ b/fs/fscache/operation.c @@ -13,6 +13,7 @@ #define FSCACHE_DEBUG_LEVEL OPERATION #include +#include #include "internal.h" atomic_t fscache_op_debug_id; @@ -31,6 +32,8 @@ void fscache_enqueue_operation(struct fscache_operation *op) _enter("{OBJ%x OP%x,%u}", op->object->debug_id, op->debug_id, atomic_read(&op->usage)); + fscache_set_op_state(op, "EnQ"); + ASSERT(op->processor != NULL); ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE); ASSERTCMP(atomic_read(&op->usage), >, 0); @@ -67,6 +70,8 @@ EXPORT_SYMBOL(fscache_enqueue_operation); static void fscache_run_op(struct fscache_object *object, struct fscache_operation *op) { + fscache_set_op_state(op, "Run"); + object->n_in_progress++; if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags)) wake_up_bit(&op->flags, FSCACHE_OP_WAITING); @@ -87,6 +92,8 @@ int fscache_submit_exclusive_op(struct fscache_object *object, _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id); + fscache_set_op_state(op, "SubmitX"); + spin_lock(&object->lock); ASSERTCMP(object->n_ops, >=, object->n_in_progress); ASSERTCMP(object->n_ops, >=, object->n_exclusive); @@ -190,6 +197,8 @@ int fscache_submit_op(struct fscache_object *object, ASSERTCMP(atomic_read(&op->usage), >, 0); + fscache_set_op_state(op, "Submit"); + spin_lock(&object->lock); ASSERTCMP(object->n_ops, >=, object->n_in_progress); ASSERTCMP(object->n_ops, >=, object->n_exclusive); @@ -298,6 +307,8 @@ void fscache_put_operation(struct fscache_operation *op) if (!atomic_dec_and_test(&op->usage)) return; + fscache_set_op_state(op, "Put"); + _debug("PUT OP"); if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags)) BUG(); @@ -452,9 +463,27 @@ static void fscache_op_execute(struct slow_work *work) _leave(""); } +/* + * describe an operation for slow-work debugging + */ +#ifdef CONFIG_SLOW_WORK_PROC +static void fscache_op_desc(struct slow_work *work, struct seq_file *m) +{ + struct fscache_operation *op = + container_of(work, struct fscache_operation, slow_work); + + seq_printf(m, "FSC: OBJ%x OP%x: %s/%s fl=%lx", + op->object->debug_id, op->debug_id, + op->name, op->state, op->flags); +} +#endif + const struct slow_work_ops fscache_op_slow_work_ops = { .owner = THIS_MODULE, .get_ref = fscache_op_get_ref, .put_ref = fscache_op_put_ref, .execute = fscache_op_execute, +#ifdef CONFIG_SLOW_WORK_PROC + .desc = fscache_op_desc, +#endif }; diff --git a/fs/fscache/page.c b/fs/fscache/page.c index 2568e0eb644f..e8bbc395cef6 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c @@ -63,14 +63,19 @@ static void fscache_end_page_write(struct fscache_cookie *cookie, struct page *p static void fscache_attr_changed_op(struct fscache_operation *op) { struct fscache_object *object = op->object; + int ret; _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id); fscache_stat(&fscache_n_attr_changed_calls); - if (fscache_object_is_active(object) && - object->cache->ops->attr_changed(object) < 0) - fscache_abort_object(object); + if (fscache_object_is_active(object)) { + fscache_set_op_state(op, "CallFS"); + ret = object->cache->ops->attr_changed(object); + fscache_set_op_state(op, "Done"); + if (ret < 0) + fscache_abort_object(object); + } _leave(""); } @@ -99,6 +104,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie) fscache_operation_init(op, NULL); fscache_operation_init_slow(op, fscache_attr_changed_op); op->flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_EXCLUSIVE); + fscache_set_op_name(op, "Attr"); spin_lock(&cookie->lock); @@ -184,6 +190,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval( op->start_time = jiffies; INIT_WORK(&op->op.fast_work, fscache_retrieval_work); INIT_LIST_HEAD(&op->to_do); + fscache_set_op_name(&op->op, "Retr"); return op; } @@ -257,6 +264,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, _leave(" = -ENOMEM"); return -ENOMEM; } + fscache_set_op_name(&op->op, "RetrRA1"); spin_lock(&cookie->lock); @@ -369,6 +377,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, op = fscache_alloc_retrieval(mapping, end_io_func, context); if (!op) return -ENOMEM; + fscache_set_op_name(&op->op, "RetrRAN"); spin_lock(&cookie->lock); @@ -461,6 +470,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie, op = fscache_alloc_retrieval(page->mapping, NULL, NULL); if (!op) return -ENOMEM; + fscache_set_op_name(&op->op, "RetrAL1"); spin_lock(&cookie->lock); @@ -529,6 +539,8 @@ static void fscache_write_op(struct fscache_operation *_op) _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage)); + fscache_set_op_state(&op->op, "GetPage"); + spin_lock(&cookie->lock); spin_lock(&object->lock); @@ -559,13 +571,17 @@ static void fscache_write_op(struct fscache_operation *_op) spin_unlock(&cookie->lock); if (page) { + fscache_set_op_state(&op->op, "Store"); ret = object->cache->ops->write_page(op, page); + fscache_set_op_state(&op->op, "EndWrite"); fscache_end_page_write(cookie, page); page_cache_release(page); - if (ret < 0) + if (ret < 0) { + fscache_set_op_state(&op->op, "Abort"); fscache_abort_object(object); - else + } else { fscache_enqueue_operation(&op->op); + } } _leave(""); @@ -634,6 +650,7 @@ int __fscache_write_page(struct fscache_cookie *cookie, fscache_operation_init(&op->op, fscache_release_write_op); fscache_operation_init_slow(&op->op, fscache_write_op); op->op.flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_WAITING); + fscache_set_op_name(&op->op, "Write1"); ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM); if (ret < 0) diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 84d3532dd3ea..7a9847ccd192 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -102,6 +102,16 @@ struct fscache_operation { /* operation releaser */ fscache_operation_release_t release; + +#ifdef CONFIG_SLOW_WORK_PROC + const char *name; /* operation name */ + const char *state; /* operation state */ +#define fscache_set_op_name(OP, N) do { (OP)->name = (N); } while(0) +#define fscache_set_op_state(OP, S) do { (OP)->state = (S); } while(0) +#else +#define fscache_set_op_name(OP, N) do { } while(0) +#define fscache_set_op_state(OP, S) do { } while(0) +#endif }; extern atomic_t fscache_op_debug_id; @@ -125,6 +135,7 @@ static inline void fscache_operation_init(struct fscache_operation *op, op->debug_id = atomic_inc_return(&fscache_op_debug_id); op->release = release; INIT_LIST_HEAD(&op->pend_link); + fscache_set_op_state(op, "Init"); } /** @@ -337,6 +348,7 @@ struct fscache_object { FSCACHE_OBJECT_RECYCLING, /* retiring object */ FSCACHE_OBJECT_WITHDRAWING, /* withdrawing object */ FSCACHE_OBJECT_DEAD, /* object is now dead */ + FSCACHE_OBJECT__NSTATES } state; int debug_id; /* debugging ID */ -- cgit v1.2.3 From 4fbf4291aa15926cd4fdca0ffe9122e89d0459db Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 19 Nov 2009 18:11:04 +0000 Subject: FS-Cache: Allow the current state of all objects to be dumped Allow the current state of all fscache objects to be dumped by doing: cat /proc/fs/fscache/objects By default, all objects and all fields will be shown. This can be restricted by adding a suitable key to one of the caller's keyrings (such as the session keyring): keyctl add user fscache:objlist "" @s The are: K Show hexdump of object key (don't show if not given) A Show hexdump of object aux data (don't show if not given) And paired restrictions: C Show objects that have a cookie c Show objects that don't have a cookie B Show objects that are busy b Show objects that aren't busy W Show objects that have pending writes w Show objects that don't have pending writes R Show objects that have outstanding reads r Show objects that don't have outstanding reads S Show objects that have slow work queued s Show objects that don't have slow work queued If neither side of a restriction pair is given, then both are implied. For example: keyctl add user fscache:objlist KB @s shows objects that are busy, and lists their object keys, but does not dump their auxiliary data. It also implies "CcWwRrSs", but as 'B' is given, 'b' is not implied. Signed-off-by: David Howells --- Documentation/filesystems/caching/fscache.txt | 81 +++++ fs/cachefiles/interface.c | 1 + fs/cachefiles/rdwr.c | 6 +- fs/fscache/Kconfig | 7 + fs/fscache/Makefile | 1 + fs/fscache/cache.c | 1 + fs/fscache/cookie.c | 2 + fs/fscache/internal.h | 13 + fs/fscache/object-list.c | 432 ++++++++++++++++++++++++++ fs/fscache/object.c | 2 +- fs/fscache/operation.c | 3 + fs/fscache/page.c | 6 + fs/fscache/proc.c | 13 + include/linux/fscache-cache.h | 13 + 14 files changed, 578 insertions(+), 3 deletions(-) create mode 100644 fs/fscache/object-list.c (limited to 'include') diff --git a/Documentation/filesystems/caching/fscache.txt b/Documentation/filesystems/caching/fscache.txt index 9e94b9491d89..cac09e11ca30 100644 --- a/Documentation/filesystems/caching/fscache.txt +++ b/Documentation/filesystems/caching/fscache.txt @@ -299,6 +299,87 @@ proc files. jiffy range covered, and the SECS field the equivalent number of seconds. +=========== +OBJECT LIST +=========== + +If CONFIG_FSCACHE_OBJECT_LIST is enabled, the FS-Cache facility will maintain a +list of all the objects currently allocated and allow them to be viewed +through: + + /proc/fs/fscache/objects + +This will look something like: + + [root@andromeda ~]# head /proc/fs/fscache/objects + OBJECT PARENT STAT CHLDN OPS OOP IPR EX READS EM EV F S | NETFS_COOKIE_DEF TY FL NETFS_DATA OBJECT_KEY, AUX_DATA + ======== ======== ==== ===== === === === == ===== == == = = | ================ == == ================ ================ + 17e4b 2 ACTV 0 0 0 0 0 0 7b 4 0 8 | NFS.fh DT 0 ffff88001dd82820 010006017edcf8bbc93b43298fdfbe71e50b57b13a172c0117f38472, e567634700000000000000000000000063f2404a000000000000000000000000c9030000000000000000000063f2404a + 1693a 2 ACTV 0 0 0 0 0 0 7b 4 0 8 | NFS.fh DT 0 ffff88002db23380 010006017edcf8bbc93b43298fdfbe71e50b57b1e0162c01a2df0ea6, 420ebc4a000000000000000000000000420ebc4a0000000000000000000000000e1801000000000000000000420ebc4a + +where the first set of columns before the '|' describe the object: + + COLUMN DESCRIPTION + ======= =============================================================== + OBJECT Object debugging ID (appears as OBJ%x in some debug messages) + PARENT Debugging ID of parent object + STAT Object state + CHLDN Number of child objects of this object + OPS Number of outstanding operations on this object + OOP Number of outstanding child object management operations + IPR + EX Number of outstanding exclusive operations + READS Number of outstanding read operations + EM Object's event mask + EV Events raised on this object + F Object flags + S Object slow-work work item flags + +and the second set of columns describe the object's cookie, if present: + + COLUMN DESCRIPTION + =============== ======================================================= + NETFS_COOKIE_DEF Name of netfs cookie definition + TY Cookie type (IX - index, DT - data, hex - special) + FL Cookie flags + NETFS_DATA Netfs private data stored in the cookie + OBJECT_KEY Object key } 1 column, with separating comma + AUX_DATA Object aux data } presence may be configured + +The data shown may be filtered by attaching the a key to an appropriate keyring +before viewing the file. Something like: + + keyctl add user fscache:objlist @s + +where are a selection of the following letters: + + K Show hexdump of object key (don't show if not given) + A Show hexdump of object aux data (don't show if not given) + +and the following paired letters: + + C Show objects that have a cookie + c Show objects that don't have a cookie + B Show objects that are busy + b Show objects that aren't busy + W Show objects that have pending writes + w Show objects that don't have pending writes + R Show objects that have outstanding reads + r Show objects that don't have outstanding reads + S Show objects that have slow work queued + s Show objects that don't have slow work queued + +If neither side of a letter pair is given, then both are implied. For example: + + keyctl add user fscache:objlist KB @s + +shows objects that are busy, and lists their object keys, but does not dump +their auxiliary data. It also implies "CcWwRrSs", but as 'B' is given, 'b' is +not implied. + +By default all objects and all fields will be shown. + + ========= DEBUGGING ========= diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c index 431accd475a7..dd7f852746cb 100644 --- a/fs/cachefiles/interface.c +++ b/fs/cachefiles/interface.c @@ -331,6 +331,7 @@ static void cachefiles_put_object(struct fscache_object *_object) } cache = object->fscache.cache; + fscache_object_destroy(&object->fscache); kmem_cache_free(cachefiles_object_jar, object); fscache_object_destroyed(cache); } diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c index a69787e7dd96..3304646dae84 100644 --- a/fs/cachefiles/rdwr.c +++ b/fs/cachefiles/rdwr.c @@ -333,7 +333,8 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op, shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits; - op->op.flags = FSCACHE_OP_FAST; + op->op.flags &= FSCACHE_OP_KEEP_FLAGS; + op->op.flags |= FSCACHE_OP_FAST; op->op.processor = cachefiles_read_copier; pagevec_init(&pagevec, 0); @@ -639,7 +640,8 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op, pagevec_init(&pagevec, 0); - op->op.flags = FSCACHE_OP_FAST; + op->op.flags &= FSCACHE_OP_KEEP_FLAGS; + op->op.flags |= FSCACHE_OP_FAST; op->op.processor = cachefiles_read_copier; INIT_LIST_HEAD(&backpages); diff --git a/fs/fscache/Kconfig b/fs/fscache/Kconfig index 9bbb8ce7bea0..864dac20a242 100644 --- a/fs/fscache/Kconfig +++ b/fs/fscache/Kconfig @@ -54,3 +54,10 @@ config FSCACHE_DEBUG enabled by setting bits in /sys/modules/fscache/parameter/debug. See Documentation/filesystems/caching/fscache.txt for more information. + +config FSCACHE_OBJECT_LIST + bool "Maintain global object list for debugging purposes" + depends on FSCACHE && PROC_FS + help + Maintain a global list of active fscache objects that can be + retrieved through /proc/fs/fscache/objects for debugging purposes diff --git a/fs/fscache/Makefile b/fs/fscache/Makefile index 91571b95aacc..6d561531cb36 100644 --- a/fs/fscache/Makefile +++ b/fs/fscache/Makefile @@ -15,5 +15,6 @@ fscache-y := \ fscache-$(CONFIG_PROC_FS) += proc.o fscache-$(CONFIG_FSCACHE_STATS) += stats.o fscache-$(CONFIG_FSCACHE_HISTOGRAM) += histogram.o +fscache-$(CONFIG_FSCACHE_OBJECT_LIST) += object-list.o obj-$(CONFIG_FSCACHE) := fscache.o diff --git a/fs/fscache/cache.c b/fs/fscache/cache.c index e21985bbb1fb..724384ef96de 100644 --- a/fs/fscache/cache.c +++ b/fs/fscache/cache.c @@ -263,6 +263,7 @@ int fscache_add_cache(struct fscache_cache *cache, spin_lock(&cache->object_list_lock); list_add_tail(&ifsdef->cache_link, &cache->object_list); spin_unlock(&cache->object_list_lock); + fscache_objlist_add(ifsdef); /* add the cache's netfs definition index object to the top level index * cookie as a known backing object */ diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c index 72fd18f6c71f..9b5187328230 100644 --- a/fs/fscache/cookie.c +++ b/fs/fscache/cookie.c @@ -349,6 +349,8 @@ static int fscache_attach_object(struct fscache_cookie *cookie, object->cookie = cookie; atomic_inc(&cookie->usage); hlist_add_head(&object->cookie_link, &cookie->backing_objects); + + fscache_objlist_add(object); ret = 0; cant_attach_object: diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h index 1c341304621f..fe02973a9516 100644 --- a/fs/fscache/internal.h +++ b/fs/fscache/internal.h @@ -88,10 +88,23 @@ extern int fscache_wait_bit_interruptible(void *); /* * object.c */ +extern const char fscache_object_states_short[FSCACHE_OBJECT__NSTATES][5]; + extern void fscache_withdrawing_object(struct fscache_cache *, struct fscache_object *); extern void fscache_enqueue_object(struct fscache_object *); +/* + * object-list.c + */ +#ifdef CONFIG_FSCACHE_OBJECT_LIST +extern const struct file_operations fscache_objlist_fops; + +extern void fscache_objlist_add(struct fscache_object *); +#else +#define fscache_objlist_add(object) do {} while(0) +#endif + /* * operation.c */ diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c new file mode 100644 index 000000000000..e590242fa41a --- /dev/null +++ b/fs/fscache/object-list.c @@ -0,0 +1,432 @@ +/* Global fscache object list maintainer and viewer + * + * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#define FSCACHE_DEBUG_LEVEL COOKIE +#include +#include +#include +#include +#include "internal.h" + +static struct rb_root fscache_object_list; +static DEFINE_RWLOCK(fscache_object_list_lock); + +struct fscache_objlist_data { + unsigned long config; /* display configuration */ +#define FSCACHE_OBJLIST_CONFIG_KEY 0x00000001 /* show object keys */ +#define FSCACHE_OBJLIST_CONFIG_AUX 0x00000002 /* show object auxdata */ +#define FSCACHE_OBJLIST_CONFIG_COOKIE 0x00000004 /* show objects with cookies */ +#define FSCACHE_OBJLIST_CONFIG_NOCOOKIE 0x00000008 /* show objects without cookies */ +#define FSCACHE_OBJLIST_CONFIG_BUSY 0x00000010 /* show busy objects */ +#define FSCACHE_OBJLIST_CONFIG_IDLE 0x00000020 /* show idle objects */ +#define FSCACHE_OBJLIST_CONFIG_PENDWR 0x00000040 /* show objects with pending writes */ +#define FSCACHE_OBJLIST_CONFIG_NOPENDWR 0x00000080 /* show objects without pending writes */ +#define FSCACHE_OBJLIST_CONFIG_READS 0x00000100 /* show objects with active reads */ +#define FSCACHE_OBJLIST_CONFIG_NOREADS 0x00000200 /* show objects without active reads */ +#define FSCACHE_OBJLIST_CONFIG_EVENTS 0x00000400 /* show objects with events */ +#define FSCACHE_OBJLIST_CONFIG_NOEVENTS 0x00000800 /* show objects without no events */ +#define FSCACHE_OBJLIST_CONFIG_WORK 0x00001000 /* show objects with slow work */ +#define FSCACHE_OBJLIST_CONFIG_NOWORK 0x00002000 /* show objects without slow work */ + + u8 buf[512]; /* key and aux data buffer */ +}; + +/* + * Add an object to the object list + * - we use the address of the fscache_object structure as the key into the + * tree + */ +void fscache_objlist_add(struct fscache_object *obj) +{ + struct fscache_object *xobj; + struct rb_node **p = &fscache_object_list.rb_node, *parent = NULL; + + write_lock(&fscache_object_list_lock); + + while (*p) { + parent = *p; + xobj = rb_entry(parent, struct fscache_object, objlist_link); + + if (obj < xobj) + p = &(*p)->rb_left; + else if (obj > xobj) + p = &(*p)->rb_right; + else + BUG(); + } + + rb_link_node(&obj->objlist_link, parent, p); + rb_insert_color(&obj->objlist_link, &fscache_object_list); + + write_unlock(&fscache_object_list_lock); +} + +/** + * fscache_object_destroy - Note that a cache object is about to be destroyed + * @object: The object to be destroyed + * + * Note the imminent destruction and deallocation of a cache object record. + */ +void fscache_object_destroy(struct fscache_object *obj) +{ + write_lock(&fscache_object_list_lock); + + BUG_ON(RB_EMPTY_ROOT(&fscache_object_list)); + rb_erase(&obj->objlist_link, &fscache_object_list); + + write_unlock(&fscache_object_list_lock); +} +EXPORT_SYMBOL(fscache_object_destroy); + +/* + * find the object in the tree on or after the specified index + */ +static struct fscache_object *fscache_objlist_lookup(loff_t *_pos) +{ + struct fscache_object *pobj, *obj, *minobj = NULL; + struct rb_node *p; + unsigned long pos; + + if (*_pos >= (unsigned long) ERR_PTR(-ENOENT)) + return NULL; + pos = *_pos; + + /* banners (can't represent line 0 by pos 0 as that would involve + * returning a NULL pointer) */ + if (pos == 0) + return (struct fscache_object *) ++(*_pos); + if (pos < 3) + return (struct fscache_object *)pos; + + pobj = (struct fscache_object *)pos; + p = fscache_object_list.rb_node; + while (p) { + obj = rb_entry(p, struct fscache_object, objlist_link); + if (pobj < obj) { + if (!minobj || minobj > obj) + minobj = obj; + p = p->rb_left; + } else if (pobj > obj) { + p = p->rb_right; + } else { + minobj = obj; + break; + } + obj = NULL; + } + + if (!minobj) + *_pos = (unsigned long) ERR_PTR(-ENOENT); + else if (minobj != obj) + *_pos = (unsigned long) minobj; + return minobj; +} + +/* + * set up the iterator to start reading from the first line + */ +static void *fscache_objlist_start(struct seq_file *m, loff_t *_pos) + __acquires(&fscache_object_list_lock) +{ + read_lock(&fscache_object_list_lock); + return fscache_objlist_lookup(_pos); +} + +/* + * move to the next line + */ +static void *fscache_objlist_next(struct seq_file *m, void *v, loff_t *_pos) +{ + (*_pos)++; + return fscache_objlist_lookup(_pos); +} + +/* + * clean up after reading + */ +static void fscache_objlist_stop(struct seq_file *m, void *v) + __releases(&fscache_object_list_lock) +{ + read_unlock(&fscache_object_list_lock); +} + +/* + * display an object + */ +static int fscache_objlist_show(struct seq_file *m, void *v) +{ + struct fscache_objlist_data *data = m->private; + struct fscache_object *obj = v; + unsigned long config = data->config; + uint16_t keylen, auxlen; + char _type[3], *type; + bool no_cookie; + u8 *buf = data->buf, *p; + + if ((unsigned long) v == 1) { + seq_puts(m, "OBJECT PARENT STAT CHLDN OPS OOP IPR EX READS" + " EM EV F S" + " | NETFS_COOKIE_DEF TY FL NETFS_DATA"); + if (config & (FSCACHE_OBJLIST_CONFIG_KEY | + FSCACHE_OBJLIST_CONFIG_AUX)) + seq_puts(m, " "); + if (config & FSCACHE_OBJLIST_CONFIG_KEY) + seq_puts(m, "OBJECT_KEY"); + if ((config & (FSCACHE_OBJLIST_CONFIG_KEY | + FSCACHE_OBJLIST_CONFIG_AUX)) == + (FSCACHE_OBJLIST_CONFIG_KEY | FSCACHE_OBJLIST_CONFIG_AUX)) + seq_puts(m, ", "); + if (config & FSCACHE_OBJLIST_CONFIG_AUX) + seq_puts(m, "AUX_DATA"); + seq_puts(m, "\n"); + return 0; + } + + if ((unsigned long) v == 2) { + seq_puts(m, "======== ======== ==== ===== === === === == =====" + " == == = =" + " | ================ == == ================"); + if (config & (FSCACHE_OBJLIST_CONFIG_KEY | + FSCACHE_OBJLIST_CONFIG_AUX)) + seq_puts(m, " ================"); + seq_puts(m, "\n"); + return 0; + } + + /* filter out any unwanted objects */ +#define FILTER(criterion, _yes, _no) \ + do { \ + unsigned long yes = FSCACHE_OBJLIST_CONFIG_##_yes; \ + unsigned long no = FSCACHE_OBJLIST_CONFIG_##_no; \ + if (criterion) { \ + if (!(config & yes)) \ + return 0; \ + } else { \ + if (!(config & no)) \ + return 0; \ + } \ + } while(0) + + if (~config) { + FILTER(obj->cookie, + COOKIE, NOCOOKIE); + FILTER(obj->state != FSCACHE_OBJECT_ACTIVE || + obj->n_ops != 0 || + obj->n_obj_ops != 0 || + obj->flags || + !list_empty(&obj->dependents), + BUSY, IDLE); + FILTER(test_bit(FSCACHE_OBJECT_PENDING_WRITE, &obj->flags), + PENDWR, NOPENDWR); + FILTER(atomic_read(&obj->n_reads), + READS, NOREADS); + FILTER(obj->events & obj->event_mask, + EVENTS, NOEVENTS); + FILTER(obj->work.flags & ~(1UL << SLOW_WORK_VERY_SLOW), + WORK, NOWORK); + } + + seq_printf(m, + "%8x %8x %s %5u %3u %3u %3u %2u %5u %2lx %2lx %1lx %1lx | ", + obj->debug_id, + obj->parent ? obj->parent->debug_id : -1, + fscache_object_states_short[obj->state], + obj->n_children, + obj->n_ops, + obj->n_obj_ops, + obj->n_in_progress, + obj->n_exclusive, + atomic_read(&obj->n_reads), + obj->event_mask & FSCACHE_OBJECT_EVENTS_MASK, + obj->events, + obj->flags, + obj->work.flags); + + no_cookie = true; + keylen = auxlen = 0; + if (obj->cookie) { + spin_lock(&obj->lock); + if (obj->cookie) { + switch (obj->cookie->def->type) { + case 0: + type = "IX"; + break; + case 1: + type = "DT"; + break; + default: + sprintf(_type, "%02u", + obj->cookie->def->type); + type = _type; + break; + } + + seq_printf(m, "%-16s %s %2lx %16p", + obj->cookie->def->name, + type, + obj->cookie->flags, + obj->cookie->netfs_data); + + if (obj->cookie->def->get_key && + config & FSCACHE_OBJLIST_CONFIG_KEY) + keylen = obj->cookie->def->get_key( + obj->cookie->netfs_data, + buf, 400); + + if (obj->cookie->def->get_aux && + config & FSCACHE_OBJLIST_CONFIG_AUX) + auxlen = obj->cookie->def->get_aux( + obj->cookie->netfs_data, + buf + keylen, 512 - keylen); + + no_cookie = false; + } + spin_unlock(&obj->lock); + + if (!no_cookie && (keylen > 0 || auxlen > 0)) { + seq_printf(m, " "); + for (p = buf; keylen > 0; keylen--) + seq_printf(m, "%02x", *p++); + if (auxlen > 0) { + if (config & FSCACHE_OBJLIST_CONFIG_KEY) + seq_printf(m, ", "); + for (; auxlen > 0; auxlen--) + seq_printf(m, "%02x", *p++); + } + } + } + + if (no_cookie) + seq_printf(m, "\n"); + else + seq_printf(m, "\n"); + return 0; +} + +static const struct seq_operations fscache_objlist_ops = { + .start = fscache_objlist_start, + .stop = fscache_objlist_stop, + .next = fscache_objlist_next, + .show = fscache_objlist_show, +}; + +/* + * get the configuration for filtering the list + */ +static void fscache_objlist_config(struct fscache_objlist_data *data) +{ +#ifdef CONFIG_KEYS + struct user_key_payload *confkey; + unsigned long config; + struct key *key; + const char *buf; + int len; + + key = request_key(&key_type_user, "fscache:objlist", NULL); + if (IS_ERR(key)) + goto no_config; + + config = 0; + rcu_read_lock(); + + confkey = key->payload.data; + buf = confkey->data; + + for (len = confkey->datalen - 1; len >= 0; len--) { + switch (buf[len]) { + case 'K': config |= FSCACHE_OBJLIST_CONFIG_KEY; break; + case 'A': config |= FSCACHE_OBJLIST_CONFIG_AUX; break; + case 'C': config |= FSCACHE_OBJLIST_CONFIG_COOKIE; break; + case 'c': config |= FSCACHE_OBJLIST_CONFIG_NOCOOKIE; break; + case 'B': config |= FSCACHE_OBJLIST_CONFIG_BUSY; break; + case 'b': config |= FSCACHE_OBJLIST_CONFIG_IDLE; break; + case 'W': config |= FSCACHE_OBJLIST_CONFIG_PENDWR; break; + case 'w': config |= FSCACHE_OBJLIST_CONFIG_NOPENDWR; break; + case 'R': config |= FSCACHE_OBJLIST_CONFIG_READS; break; + case 'r': config |= FSCACHE_OBJLIST_CONFIG_NOREADS; break; + case 'S': config |= FSCACHE_OBJLIST_CONFIG_WORK; break; + case 's': config |= FSCACHE_OBJLIST_CONFIG_NOWORK; break; + } + } + + rcu_read_unlock(); + key_put(key); + + if (!(config & (FSCACHE_OBJLIST_CONFIG_COOKIE | FSCACHE_OBJLIST_CONFIG_NOCOOKIE))) + config |= FSCACHE_OBJLIST_CONFIG_COOKIE | FSCACHE_OBJLIST_CONFIG_NOCOOKIE; + if (!(config & (FSCACHE_OBJLIST_CONFIG_BUSY | FSCACHE_OBJLIST_CONFIG_IDLE))) + config |= FSCACHE_OBJLIST_CONFIG_BUSY | FSCACHE_OBJLIST_CONFIG_IDLE; + if (!(config & (FSCACHE_OBJLIST_CONFIG_PENDWR | FSCACHE_OBJLIST_CONFIG_NOPENDWR))) + config |= FSCACHE_OBJLIST_CONFIG_PENDWR | FSCACHE_OBJLIST_CONFIG_NOPENDWR; + if (!(config & (FSCACHE_OBJLIST_CONFIG_READS | FSCACHE_OBJLIST_CONFIG_NOREADS))) + config |= FSCACHE_OBJLIST_CONFIG_READS | FSCACHE_OBJLIST_CONFIG_NOREADS; + if (!(config & (FSCACHE_OBJLIST_CONFIG_EVENTS | FSCACHE_OBJLIST_CONFIG_NOEVENTS))) + config |= FSCACHE_OBJLIST_CONFIG_EVENTS | FSCACHE_OBJLIST_CONFIG_NOEVENTS; + if (!(config & (FSCACHE_OBJLIST_CONFIG_WORK | FSCACHE_OBJLIST_CONFIG_NOWORK))) + config |= FSCACHE_OBJLIST_CONFIG_WORK | FSCACHE_OBJLIST_CONFIG_NOWORK; + + data->config = config; + return; + +no_config: +#endif + data->config = ULONG_MAX; +} + +/* + * open "/proc/fs/fscache/objects" to provide a list of active objects + * - can be configured by a user-defined key added to the caller's keyrings + */ +static int fscache_objlist_open(struct inode *inode, struct file *file) +{ + struct fscache_objlist_data *data; + struct seq_file *m; + int ret; + + ret = seq_open(file, &fscache_objlist_ops); + if (ret < 0) + return ret; + + m = file->private_data; + + /* buffer for key extraction */ + data = kmalloc(sizeof(struct fscache_objlist_data), GFP_KERNEL); + if (!data) { + seq_release(inode, file); + return -ENOMEM; + } + + /* get the configuration key */ + fscache_objlist_config(data); + + m->private = data; + return 0; +} + +/* + * clean up on close + */ +static int fscache_objlist_release(struct inode *inode, struct file *file) +{ + struct seq_file *m = file->private_data; + + kfree(m->private); + m->private = NULL; + return seq_release(inode, file); +} + +const struct file_operations fscache_objlist_fops = { + .owner = THIS_MODULE, + .open = fscache_objlist_open, + .read = seq_read, + .llseek = seq_lseek, + .release = fscache_objlist_release, +}; diff --git a/fs/fscache/object.c b/fs/fscache/object.c index 615b63dd9ecc..ad1644f073bd 100644 --- a/fs/fscache/object.c +++ b/fs/fscache/object.c @@ -34,7 +34,7 @@ const char *fscache_object_states[FSCACHE_OBJECT__NSTATES] = { }; EXPORT_SYMBOL(fscache_object_states); -static const char fscache_object_states_short[FSCACHE_OBJECT__NSTATES][5] = { +const char fscache_object_states_short[FSCACHE_OBJECT__NSTATES][5] = { [FSCACHE_OBJECT_INIT] = "INIT", [FSCACHE_OBJECT_LOOKING_UP] = "LOOK", [FSCACHE_OBJECT_CREATING] = "CRTN", diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c index 91bbe6f0377c..09e43b6e822f 100644 --- a/fs/fscache/operation.c +++ b/fs/fscache/operation.c @@ -322,6 +322,9 @@ void fscache_put_operation(struct fscache_operation *op) object = op->object; + if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags)) + atomic_dec(&object->n_reads); + /* now... we may get called with the object spinlock held, so we * complete the cleanup here only if we can immediately acquire the * lock, and defer it otherwise */ diff --git a/fs/fscache/page.c b/fs/fscache/page.c index e8bbc395cef6..c5973e38ce39 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c @@ -275,6 +275,9 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP); + atomic_inc(&object->n_reads); + set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags); + if (fscache_submit_op(object, &op->op) < 0) goto nobufs_unlock; spin_unlock(&cookie->lock); @@ -386,6 +389,9 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, object = hlist_entry(cookie->backing_objects.first, struct fscache_object, cookie_link); + atomic_inc(&object->n_reads); + set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags); + if (fscache_submit_op(object, &op->op) < 0) goto nobufs_unlock; spin_unlock(&cookie->lock); diff --git a/fs/fscache/proc.c b/fs/fscache/proc.c index beeab44bc31a..1d9e4951a597 100644 --- a/fs/fscache/proc.c +++ b/fs/fscache/proc.c @@ -37,10 +37,20 @@ int __init fscache_proc_init(void) goto error_histogram; #endif +#ifdef CONFIG_FSCACHE_OBJECT_LIST + if (!proc_create("fs/fscache/objects", S_IFREG | 0444, NULL, + &fscache_objlist_fops)) + goto error_objects; +#endif + _leave(" = 0"); return 0; +#ifdef CONFIG_FSCACHE_OBJECT_LIST +error_objects: +#endif #ifdef CONFIG_FSCACHE_HISTOGRAM + remove_proc_entry("fs/fscache/histogram", NULL); error_histogram: #endif #ifdef CONFIG_FSCACHE_STATS @@ -58,6 +68,9 @@ error_dir: */ void fscache_proc_cleanup(void) { +#ifdef CONFIG_FSCACHE_OBJECT_LIST + remove_proc_entry("fs/fscache/objects", NULL); +#endif #ifdef CONFIG_FSCACHE_HISTOGRAM remove_proc_entry("fs/fscache/histogram", NULL); #endif diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 7a9847ccd192..184cbdfbcc99 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -91,6 +91,8 @@ struct fscache_operation { #define FSCACHE_OP_WAITING 4 /* cleared when op is woken */ #define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */ #define FSCACHE_OP_DEAD 6 /* op is now dead */ +#define FSCACHE_OP_DEC_READ_CNT 7 /* decrement object->n_reads on destruction */ +#define FSCACHE_OP_KEEP_FLAGS 0xc0 /* flags to keep when repurposing an op */ atomic_t usage; unsigned debug_id; /* debugging ID */ @@ -357,6 +359,7 @@ struct fscache_object { int n_obj_ops; /* number of object ops outstanding on object */ int n_in_progress; /* number of ops in progress */ int n_exclusive; /* number of exclusive ops queued */ + atomic_t n_reads; /* number of read ops in progress */ spinlock_t lock; /* state and operations lock */ unsigned long lookup_jif; /* time at which lookup started */ @@ -370,6 +373,7 @@ struct fscache_object { #define FSCACHE_OBJECT_EV_RELEASE 4 /* T if netfs requested object release */ #define FSCACHE_OBJECT_EV_RETIRE 5 /* T if netfs requested object retirement */ #define FSCACHE_OBJECT_EV_WITHDRAW 6 /* T if cache requested object withdrawal */ +#define FSCACHE_OBJECT_EVENTS_MASK 0x7f /* mask of all events*/ unsigned long flags; #define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */ @@ -385,6 +389,9 @@ struct fscache_object { struct list_head dependents; /* FIFO of dependent objects */ struct list_head dep_link; /* link in parent's dependents list */ struct list_head pending_ops; /* unstarted operations on this object */ +#ifdef CONFIG_FSCACHE_OBJECT_LIST + struct rb_node objlist_link; /* link in global object list */ +#endif pgoff_t store_limit; /* current storage limit */ }; @@ -434,6 +441,12 @@ void fscache_object_init(struct fscache_object *object, extern void fscache_object_lookup_negative(struct fscache_object *object); extern void fscache_obtained_object(struct fscache_object *object); +#ifdef CONFIG_FSCACHE_OBJECT_LIST +extern void fscache_object_destroy(struct fscache_object *object); +#else +#define fscache_object_destroy(object) do {} while(0) +#endif + /** * fscache_object_destroyed - Note destruction of an object in a cache * @cache: The cache from which the object came -- cgit v1.2.3 From 1bccf513ac49d44604ba1cddcc29f5886e70f1b6 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 19 Nov 2009 18:11:25 +0000 Subject: FS-Cache: Fix lock misorder in fscache_write_op() FS-Cache has two structs internally for keeping track of the internal state of a cached file: the fscache_cookie struct, which represents the netfs's state, and fscache_object struct, which represents the cache's state. Each has a pointer that points to the other (when both are in existence), and each has a spinlock for pointer maintenance. Since netfs operations approach these structures from the cookie side, they get the cookie lock first, then the object lock. Cache operations, on the other hand, approach from the object side, and get the object lock first. It is not then permitted for a cache operation to get the cookie lock whilst it is holding the object lock lest deadlock occur; instead, it must do one of two things: (1) increment the cookie usage counter, drop the object lock and then get both locks in order, or (2) simply hold the object lock as certain parts of the cookie may not be altered whilst the object lock is held. It is also not permitted to follow either pointer without holding the lock at the end you start with. To break the pointers between the cookie and the object, both locks must be held. fscache_write_op(), however, violates the locking rules: It attempts to get the cookie lock without (a) checking that the cookie pointer is a valid pointer, and (b) holding the object lock to protect the cookie pointer whilst it follows it. This is so that it can access the pending page store tree without interference from __fscache_write_page(). This is fixed by splitting the cookie lock, such that the page store tracking tree is protected by its own lock, and checking that the cookie pointer is non-NULL before we attempt to follow it whilst holding the object lock. The new lock is subordinate to both the cookie lock and the object lock, and so should be taken after those. Signed-off-by: David Howells --- Documentation/filesystems/caching/fscache.txt | 3 ++ fs/fscache/cookie.c | 1 + fs/fscache/internal.h | 4 +++ fs/fscache/page.c | 52 ++++++++++++++++++--------- fs/fscache/stats.c | 10 ++++-- include/linux/fscache-cache.h | 1 + 6 files changed, 52 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/Documentation/filesystems/caching/fscache.txt b/Documentation/filesystems/caching/fscache.txt index 0a77868f4977..9cf2cfbc81c9 100644 --- a/Documentation/filesystems/caching/fscache.txt +++ b/Documentation/filesystems/caching/fscache.txt @@ -269,6 +269,9 @@ proc files. oom=N Number of store reqs failed -ENOMEM ops=N Number of store reqs submitted run=N Number of store reqs granted CPU time + pgs=N Number of pages given store req processing time + rxd=N Number of store reqs deleted from tracking tree + olm=N Number of store reqs over store limit Ops pend=N Number of times async ops added to pending queues run=N Number of times async ops given CPU time enq=N Number of times async ops queued for processing diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c index e6854f5222f5..f979659c1b3f 100644 --- a/fs/fscache/cookie.c +++ b/fs/fscache/cookie.c @@ -36,6 +36,7 @@ void fscache_cookie_init_once(void *_cookie) memset(cookie, 0, sizeof(*cookie)); spin_lock_init(&cookie->lock); + spin_lock_init(&cookie->stores_lock); INIT_HLIST_HEAD(&cookie->backing_objects); } diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h index 50324ad2b194..ba1853fa1ff9 100644 --- a/fs/fscache/internal.h +++ b/fs/fscache/internal.h @@ -17,6 +17,7 @@ * - cache->object_list_lock * - object->lock * - object->parent->lock + * - cookie->stores_lock * - fscache_thread_lock * */ @@ -174,6 +175,9 @@ extern atomic_t fscache_n_stores_nobufs; extern atomic_t fscache_n_stores_oom; extern atomic_t fscache_n_store_ops; extern atomic_t fscache_n_store_calls; +extern atomic_t fscache_n_store_pages; +extern atomic_t fscache_n_store_radix_deletes; +extern atomic_t fscache_n_store_pages_over_limit; extern atomic_t fscache_n_marks; extern atomic_t fscache_n_uncaches; diff --git a/fs/fscache/page.c b/fs/fscache/page.c index e6f2e61133a1..3ea8897bc217 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c @@ -45,16 +45,26 @@ EXPORT_SYMBOL(__fscache_wait_on_page_write); /* * note that a page has finished being written to the cache */ -static void fscache_end_page_write(struct fscache_cookie *cookie, struct page *page) +static void fscache_end_page_write(struct fscache_object *object, + struct page *page) { - struct page *xpage; + struct fscache_cookie *cookie; + struct page *xpage = NULL; - spin_lock(&cookie->lock); - xpage = radix_tree_delete(&cookie->stores, page->index); - spin_unlock(&cookie->lock); - ASSERT(xpage != NULL); - - wake_up_bit(&cookie->flags, 0); + spin_lock(&object->lock); + cookie = object->cookie; + if (cookie) { + /* delete the page from the tree if it is now no longer + * pending */ + spin_lock(&cookie->stores_lock); + fscache_stat(&fscache_n_store_radix_deletes); + xpage = radix_tree_delete(&cookie->stores, page->index); + spin_unlock(&cookie->stores_lock); + wake_up_bit(&cookie->flags, 0); + } + spin_unlock(&object->lock); + if (xpage) + page_cache_release(xpage); } /* @@ -591,7 +601,7 @@ static void fscache_write_op(struct fscache_operation *_op) struct fscache_storage *op = container_of(_op, struct fscache_storage, op); struct fscache_object *object = op->op.object; - struct fscache_cookie *cookie = object->cookie; + struct fscache_cookie *cookie; struct page *page; unsigned n; void *results[1]; @@ -601,16 +611,17 @@ static void fscache_write_op(struct fscache_operation *_op) fscache_set_op_state(&op->op, "GetPage"); - spin_lock(&cookie->lock); spin_lock(&object->lock); + cookie = object->cookie; - if (!fscache_object_is_active(object)) { + if (!fscache_object_is_active(object) || !cookie) { spin_unlock(&object->lock); - spin_unlock(&cookie->lock); _leave(""); return; } + spin_lock(&cookie->stores_lock); + fscache_stat(&fscache_n_store_calls); /* find a page to store */ @@ -621,23 +632,25 @@ static void fscache_write_op(struct fscache_operation *_op) goto superseded; page = results[0]; _debug("gang %d [%lx]", n, page->index); - if (page->index > op->store_limit) + if (page->index > op->store_limit) { + fscache_stat(&fscache_n_store_pages_over_limit); goto superseded; + } radix_tree_tag_clear(&cookie->stores, page->index, FSCACHE_COOKIE_PENDING_TAG); + spin_unlock(&cookie->stores_lock); spin_unlock(&object->lock); - spin_unlock(&cookie->lock); if (page) { fscache_set_op_state(&op->op, "Store"); + fscache_stat(&fscache_n_store_pages); fscache_stat(&fscache_n_cop_write_page); ret = object->cache->ops->write_page(op, page); fscache_stat_d(&fscache_n_cop_write_page); fscache_set_op_state(&op->op, "EndWrite"); - fscache_end_page_write(cookie, page); - page_cache_release(page); + fscache_end_page_write(object, page); if (ret < 0) { fscache_set_op_state(&op->op, "Abort"); fscache_abort_object(object); @@ -653,9 +666,9 @@ superseded: /* this writer is going away and there aren't any more things to * write */ _debug("cease"); + spin_unlock(&cookie->stores_lock); clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); spin_unlock(&object->lock); - spin_unlock(&cookie->lock); _leave(""); } @@ -731,6 +744,7 @@ int __fscache_write_page(struct fscache_cookie *cookie, /* add the page to the pending-storage radix tree on the backing * object */ spin_lock(&object->lock); + spin_lock(&cookie->stores_lock); _debug("store limit %llx", (unsigned long long) object->store_limit); @@ -751,6 +765,7 @@ int __fscache_write_page(struct fscache_cookie *cookie, if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags)) goto already_pending; + spin_unlock(&cookie->stores_lock); spin_unlock(&object->lock); op->op.debug_id = atomic_inc_return(&fscache_op_debug_id); @@ -772,6 +787,7 @@ int __fscache_write_page(struct fscache_cookie *cookie, already_queued: fscache_stat(&fscache_n_stores_again); already_pending: + spin_unlock(&cookie->stores_lock); spin_unlock(&object->lock); spin_unlock(&cookie->lock); radix_tree_preload_end(); @@ -781,7 +797,9 @@ already_pending: return 0; submit_failed: + spin_lock(&cookie->stores_lock); radix_tree_delete(&cookie->stores, page->index); + spin_unlock(&cookie->stores_lock); page_cache_release(page); ret = -ENOBUFS; goto nobufs; diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c index 4c07439d1307..1d53ea68409e 100644 --- a/fs/fscache/stats.c +++ b/fs/fscache/stats.c @@ -58,6 +58,9 @@ atomic_t fscache_n_stores_nobufs; atomic_t fscache_n_stores_oom; atomic_t fscache_n_store_ops; atomic_t fscache_n_store_calls; +atomic_t fscache_n_store_pages; +atomic_t fscache_n_store_radix_deletes; +atomic_t fscache_n_store_pages_over_limit; atomic_t fscache_n_marks; atomic_t fscache_n_uncaches; @@ -200,9 +203,12 @@ static int fscache_stats_show(struct seq_file *m, void *v) atomic_read(&fscache_n_stores_again), atomic_read(&fscache_n_stores_nobufs), atomic_read(&fscache_n_stores_oom)); - seq_printf(m, "Stores : ops=%u run=%u\n", + seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n", atomic_read(&fscache_n_store_ops), - atomic_read(&fscache_n_store_calls)); + atomic_read(&fscache_n_store_calls), + atomic_read(&fscache_n_store_pages), + atomic_read(&fscache_n_store_radix_deletes), + atomic_read(&fscache_n_store_pages_over_limit)); seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u\n", atomic_read(&fscache_n_op_pend), diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 184cbdfbcc99..f3aa4bdafef6 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -310,6 +310,7 @@ struct fscache_cookie { atomic_t usage; /* number of users of this cookie */ atomic_t n_children; /* number of children of this cookie */ spinlock_t lock; + spinlock_t stores_lock; /* lock on page store tree */ struct hlist_head backing_objects; /* object(s) backing this file/index */ const struct fscache_cookie_def *def; /* definition */ struct fscache_cookie *parent; /* parent of this entry */ -- cgit v1.2.3 From 201a15428bd54f83eccec8b7c64a04b8f9431204 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 19 Nov 2009 18:11:35 +0000 Subject: FS-Cache: Handle pages pending storage that get evicted under OOM conditions Handle netfs pages that the vmscan algorithm wants to evict from the pagecache under OOM conditions, but that are waiting for write to the cache. Under these conditions, vmscan calls the releasepage() function of the netfs, asking if a page can be discarded. The problem is typified by the following trace of a stuck process: kslowd005 D 0000000000000000 0 4253 2 0x00000080 ffff88001b14f370 0000000000000046 ffff880020d0d000 0000000000000007 0000000000000006 0000000000000001 ffff88001b14ffd8 ffff880020d0d2a8 000000000000ddf0 00000000000118c0 00000000000118c0 ffff880020d0d2a8 Call Trace: [] __fscache_wait_on_page_write+0x8b/0xa7 [fscache] [] ? autoremove_wake_function+0x0/0x34 [] ? __fscache_check_page_write+0x63/0x70 [fscache] [] nfs_fscache_release_page+0x4e/0xc4 [nfs] [] nfs_release_page+0x3c/0x41 [nfs] [] try_to_release_page+0x32/0x3b [] shrink_page_list+0x316/0x4ac [] shrink_inactive_list+0x392/0x67c [] ? __mutex_unlock_slowpath+0x100/0x10b [] ? trace_hardirqs_on_caller+0x10c/0x130 [] ? mutex_unlock+0x9/0xb [] shrink_list+0x8d/0x8f [] shrink_zone+0x278/0x33c [] ? ktime_get_ts+0xad/0xba [] try_to_free_pages+0x22e/0x392 [] ? isolate_pages_global+0x0/0x212 [] __alloc_pages_nodemask+0x3dc/0x5cf [] grab_cache_page_write_begin+0x65/0xaa [] ext3_write_begin+0x78/0x1eb [] generic_file_buffered_write+0x109/0x28c [] ? current_fs_time+0x22/0x29 [] __generic_file_aio_write+0x350/0x385 [] ? generic_file_aio_write+0x4a/0xae [] generic_file_aio_write+0x60/0xae [] do_sync_write+0xe3/0x120 [] ? autoremove_wake_function+0x0/0x34 [] ? __dentry_open+0x1a5/0x2b8 [] ? dentry_open+0x82/0x89 [] cachefiles_write_page+0x298/0x335 [cachefiles] [] fscache_write_op+0x178/0x2c2 [fscache] [] fscache_op_execute+0x7a/0xd1 [fscache] [] slow_work_execute+0x18f/0x2d1 [] slow_work_thread+0x1c5/0x308 [] ? autoremove_wake_function+0x0/0x34 [] ? slow_work_thread+0x0/0x308 [] kthread+0x7a/0x82 [] child_rip+0xa/0x20 [] ? restore_args+0x0/0x30 [] ? tg_shares_up+0x171/0x227 [] ? kthread+0x0/0x82 [] ? child_rip+0x0/0x20 In the above backtrace, the following is happening: (1) A page storage operation is being executed by a slow-work thread (fscache_write_op()). (2) FS-Cache farms the operation out to the cache to perform (cachefiles_write_page()). (3) CacheFiles is then calling Ext3 to perform the actual write, using Ext3's standard write (do_sync_write()) under KERNEL_DS directly from the netfs page. (4) However, for Ext3 to perform the write, it must allocate some memory, in particular, it must allocate at least one page cache page into which it can copy the data from the netfs page. (5) Under OOM conditions, the memory allocator can't immediately come up with a page, so it uses vmscan to find something to discard (try_to_free_pages()). (6) vmscan finds a clean netfs page it might be able to discard (possibly the one it's trying to write out). (7) The netfs is called to throw the page away (nfs_release_page()) - but it's called with __GFP_WAIT, so the netfs decides to wait for the store to complete (__fscache_wait_on_page_write()). (8) This blocks a slow-work processing thread - possibly against itself. The system ends up stuck because it can't write out any netfs pages to the cache without allocating more memory. To avoid this, we make FS-Cache cancel some writes that aren't in the middle of actually being performed. This means that some data won't make it into the cache this time. To support this, a new FS-Cache function is added fscache_maybe_release_page() that replaces what the netfs releasepage() functions used to do with respect to the cache. The decisions fscache_maybe_release_page() makes are counted and displayed through /proc/fs/fscache/stats on a line labelled "VmScan". There are four counters provided: "nos=N" - pages that weren't pending storage; "gon=N" - pages that were pending storage when we first looked, but weren't by the time we got the object lock; "bsy=N" - pages that we ignored as they were actively being written when we looked; and "can=N" - pages that we cancelled the storage of. What I'd really like to do is alter the behaviour of the cancellation heuristics, depending on how necessary it is to expel pages. If there are plenty of other pages that aren't waiting to be written to the cache that could be ejected first, then it would be nice to hold up on immediate cancellation of cache writes - but I don't see a way of doing that. Signed-off-by: David Howells --- Documentation/filesystems/caching/fscache.txt | 4 ++ Documentation/filesystems/caching/netfs-api.txt | 21 ++++++- fs/9p/cache.c | 14 +---- fs/afs/file.c | 15 +---- fs/fscache/internal.h | 5 ++ fs/fscache/page.c | 79 ++++++++++++++++++++++++- fs/fscache/stats.c | 11 ++++ fs/nfs/fscache.c | 10 +--- include/linux/fscache-cache.h | 1 + include/linux/fscache.h | 27 +++++++++ 10 files changed, 152 insertions(+), 35 deletions(-) (limited to 'include') diff --git a/Documentation/filesystems/caching/fscache.txt b/Documentation/filesystems/caching/fscache.txt index 057a3c71d524..7097fd29fb3d 100644 --- a/Documentation/filesystems/caching/fscache.txt +++ b/Documentation/filesystems/caching/fscache.txt @@ -272,6 +272,10 @@ proc files. pgs=N Number of pages given store req processing time rxd=N Number of store reqs deleted from tracking tree olm=N Number of store reqs over store limit + VmScan nos=N Number of release reqs against pages with no pending store + gon=N Number of release reqs against pages stored by time lock granted + bsy=N Number of release reqs ignored due to in-progress store + can=N Number of page stores cancelled due to release req Ops pend=N Number of times async ops added to pending queues run=N Number of times async ops given CPU time enq=N Number of times async ops queued for processing diff --git a/Documentation/filesystems/caching/netfs-api.txt b/Documentation/filesystems/caching/netfs-api.txt index 2666b1ed5e9e..1902c57b72ef 100644 --- a/Documentation/filesystems/caching/netfs-api.txt +++ b/Documentation/filesystems/caching/netfs-api.txt @@ -641,7 +641,7 @@ data file must be retired (see the relinquish cookie function below). Furthermore, note that this does not cancel the asynchronous read or write operation started by the read/alloc and write functions, so the page -invalidation and release functions must use: +invalidation functions must use: bool fscache_check_page_write(struct fscache_cookie *cookie, struct page *page); @@ -654,6 +654,25 @@ to see if a page is being written to the cache, and: to wait for it to finish if it is. +When releasepage() is being implemented, a special FS-Cache function exists to +manage the heuristics of coping with vmscan trying to eject pages, which may +conflict with the cache trying to write pages to the cache (which may itself +need to allocate memory): + + bool fscache_maybe_release_page(struct fscache_cookie *cookie, + struct page *page, + gfp_t gfp); + +This takes the netfs cookie, and the page and gfp arguments as supplied to +releasepage(). It will return false if the page cannot be released yet for +some reason and if it returns true, the page has been uncached and can now be +released. + +To make a page available for release, this function may wait for an outstanding +storage request to complete, or it may attempt to cancel the storage request - +in which case the page will not be stored in the cache this time. + + ========================== INDEX AND DATA FILE UPDATE ========================== diff --git a/fs/9p/cache.c b/fs/9p/cache.c index 51c94e26a346..bcc5357a9069 100644 --- a/fs/9p/cache.c +++ b/fs/9p/cache.c @@ -343,18 +343,7 @@ int __v9fs_fscache_release_page(struct page *page, gfp_t gfp) BUG_ON(!vcookie->fscache); - if (PageFsCache(page)) { - if (fscache_check_page_write(vcookie->fscache, page)) { - if (!(gfp & __GFP_WAIT)) - return 0; - fscache_wait_on_page_write(vcookie->fscache, page); - } - - fscache_uncache_page(vcookie->fscache, page); - ClearPageFsCache(page); - } - - return 1; + return fscache_maybe_release_page(vnode->cache, page, gfp); } void __v9fs_fscache_invalidate_page(struct page *page) @@ -368,7 +357,6 @@ void __v9fs_fscache_invalidate_page(struct page *page) fscache_wait_on_page_write(vcookie->fscache, page); BUG_ON(!PageLocked(page)); fscache_uncache_page(vcookie->fscache, page); - ClearPageFsCache(page); } } diff --git a/fs/afs/file.c b/fs/afs/file.c index 681c2a7b013f..39b301662f22 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -315,7 +315,6 @@ static void afs_invalidatepage(struct page *page, unsigned long offset) struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); fscache_wait_on_page_write(vnode->cache, page); fscache_uncache_page(vnode->cache, page); - ClearPageFsCache(page); } #endif @@ -349,17 +348,9 @@ static int afs_releasepage(struct page *page, gfp_t gfp_flags) /* deny if page is being written to the cache and the caller hasn't * elected to wait */ #ifdef CONFIG_AFS_FSCACHE - if (PageFsCache(page)) { - if (fscache_check_page_write(vnode->cache, page)) { - if (!(gfp_flags & __GFP_WAIT)) { - _leave(" = F [cache busy]"); - return 0; - } - fscache_wait_on_page_write(vnode->cache, page); - } - - fscache_uncache_page(vnode->cache, page); - ClearPageFsCache(page); + if (!fscache_maybe_release_page(vnode->cache, page, gfp_flags)) { + _leave(" = F [cache busy]"); + return 0; } #endif diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h index a0769872b19c..e5046519b153 100644 --- a/fs/fscache/internal.h +++ b/fs/fscache/internal.h @@ -180,6 +180,11 @@ extern atomic_t fscache_n_store_pages; extern atomic_t fscache_n_store_radix_deletes; extern atomic_t fscache_n_store_pages_over_limit; +extern atomic_t fscache_n_store_vmscan_not_storing; +extern atomic_t fscache_n_store_vmscan_gone; +extern atomic_t fscache_n_store_vmscan_busy; +extern atomic_t fscache_n_store_vmscan_cancelled; + extern atomic_t fscache_n_marks; extern atomic_t fscache_n_uncaches; diff --git a/fs/fscache/page.c b/fs/fscache/page.c index 022a5da8e130..fc76798bd968 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c @@ -42,6 +42,75 @@ void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *pa } EXPORT_SYMBOL(__fscache_wait_on_page_write); +/* + * decide whether a page can be released, possibly by cancelling a store to it + * - we're allowed to sleep if __GFP_WAIT is flagged + */ +bool __fscache_maybe_release_page(struct fscache_cookie *cookie, + struct page *page, + gfp_t gfp) +{ + struct page *xpage; + void *val; + + _enter("%p,%p,%x", cookie, page, gfp); + + rcu_read_lock(); + val = radix_tree_lookup(&cookie->stores, page->index); + if (!val) { + rcu_read_unlock(); + fscache_stat(&fscache_n_store_vmscan_not_storing); + __fscache_uncache_page(cookie, page); + return true; + } + + /* see if the page is actually undergoing storage - if so we can't get + * rid of it till the cache has finished with it */ + if (radix_tree_tag_get(&cookie->stores, page->index, + FSCACHE_COOKIE_STORING_TAG)) { + rcu_read_unlock(); + goto page_busy; + } + + /* the page is pending storage, so we attempt to cancel the store and + * discard the store request so that the page can be reclaimed */ + spin_lock(&cookie->stores_lock); + rcu_read_unlock(); + + if (radix_tree_tag_get(&cookie->stores, page->index, + FSCACHE_COOKIE_STORING_TAG)) { + /* the page started to undergo storage whilst we were looking, + * so now we can only wait or return */ + spin_unlock(&cookie->stores_lock); + goto page_busy; + } + + xpage = radix_tree_delete(&cookie->stores, page->index); + spin_unlock(&cookie->stores_lock); + + if (xpage) { + fscache_stat(&fscache_n_store_vmscan_cancelled); + fscache_stat(&fscache_n_store_radix_deletes); + ASSERTCMP(xpage, ==, page); + } else { + fscache_stat(&fscache_n_store_vmscan_gone); + } + + wake_up_bit(&cookie->flags, 0); + if (xpage) + page_cache_release(xpage); + __fscache_uncache_page(cookie, page); + return true; + +page_busy: + /* we might want to wait here, but that could deadlock the allocator as + * the slow-work threads writing to the cache may all end up sleeping + * on memory allocation */ + fscache_stat(&fscache_n_store_vmscan_busy); + return false; +} +EXPORT_SYMBOL(__fscache_maybe_release_page); + /* * note that a page has finished being written to the cache */ @@ -57,6 +126,8 @@ static void fscache_end_page_write(struct fscache_object *object, /* delete the page from the tree if it is now no longer * pending */ spin_lock(&cookie->stores_lock); + radix_tree_tag_clear(&cookie->stores, page->index, + FSCACHE_COOKIE_STORING_TAG); if (!radix_tree_tag_get(&cookie->stores, page->index, FSCACHE_COOKIE_PENDING_TAG)) { fscache_stat(&fscache_n_store_radix_deletes); @@ -640,8 +711,12 @@ static void fscache_write_op(struct fscache_operation *_op) goto superseded; } - radix_tree_tag_clear(&cookie->stores, page->index, - FSCACHE_COOKIE_PENDING_TAG); + if (page) { + radix_tree_tag_set(&cookie->stores, page->index, + FSCACHE_COOKIE_STORING_TAG); + radix_tree_tag_clear(&cookie->stores, page->index, + FSCACHE_COOKIE_PENDING_TAG); + } spin_unlock(&cookie->stores_lock); spin_unlock(&object->lock); diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c index 045ba396dbf2..cda69994e06d 100644 --- a/fs/fscache/stats.c +++ b/fs/fscache/stats.c @@ -63,6 +63,11 @@ atomic_t fscache_n_store_pages; atomic_t fscache_n_store_radix_deletes; atomic_t fscache_n_store_pages_over_limit; +atomic_t fscache_n_store_vmscan_not_storing; +atomic_t fscache_n_store_vmscan_gone; +atomic_t fscache_n_store_vmscan_busy; +atomic_t fscache_n_store_vmscan_cancelled; + atomic_t fscache_n_marks; atomic_t fscache_n_uncaches; @@ -211,6 +216,12 @@ static int fscache_stats_show(struct seq_file *m, void *v) atomic_read(&fscache_n_store_radix_deletes), atomic_read(&fscache_n_store_pages_over_limit)); + seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n", + atomic_read(&fscache_n_store_vmscan_not_storing), + atomic_read(&fscache_n_store_vmscan_gone), + atomic_read(&fscache_n_store_vmscan_busy), + atomic_read(&fscache_n_store_vmscan_cancelled)); + seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n", atomic_read(&fscache_n_op_pend), atomic_read(&fscache_n_op_run), diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c index 70fad69eb959..fa588006588d 100644 --- a/fs/nfs/fscache.c +++ b/fs/nfs/fscache.c @@ -359,17 +359,13 @@ int nfs_fscache_release_page(struct page *page, gfp_t gfp) BUG_ON(!cookie); - if (fscache_check_page_write(cookie, page)) { - if (!(gfp & __GFP_WAIT)) - return 0; - fscache_wait_on_page_write(cookie, page); - } - if (PageFsCache(page)) { dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n", cookie, page, nfsi); - fscache_uncache_page(cookie, page); + if (!fscache_maybe_release_page(cookie, page, gfp)) + return 0; + nfs_add_fscache_stats(page->mapping->host, NFSIOS_FSCACHE_PAGES_UNCACHED, 1); } diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index f3aa4bdafef6..4750d5fb419f 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -317,6 +317,7 @@ struct fscache_cookie { void *netfs_data; /* back pointer to netfs */ struct radix_tree_root stores; /* pages to be stored on this cookie */ #define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */ +#define FSCACHE_COOKIE_STORING_TAG 1 /* pages tag: writing to cache */ unsigned long flags; #define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */ diff --git a/include/linux/fscache.h b/include/linux/fscache.h index 6d8ee466e0a0..595ce49288b7 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -202,6 +202,8 @@ extern int __fscache_write_page(struct fscache_cookie *, struct page *, gfp_t); extern void __fscache_uncache_page(struct fscache_cookie *, struct page *); extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *); extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *); +extern bool __fscache_maybe_release_page(struct fscache_cookie *, struct page *, + gfp_t); /** * fscache_register_netfs - Register a filesystem as desiring caching services @@ -615,4 +617,29 @@ void fscache_wait_on_page_write(struct fscache_cookie *cookie, __fscache_wait_on_page_write(cookie, page); } +/** + * fscache_maybe_release_page - Consider releasing a page, cancelling a store + * @cookie: The cookie representing the cache object + * @page: The netfs page that is being cached. + * @gfp: The gfp flags passed to releasepage() + * + * Consider releasing a page for the vmscan algorithm, on behalf of the netfs's + * releasepage() call. A storage request on the page may cancelled if it is + * not currently being processed. + * + * The function returns true if the page no longer has a storage request on it, + * and false if a storage request is left in place. If true is returned, the + * page will have been passed to fscache_uncache_page(). If false is returned + * the page cannot be freed yet. + */ +static inline +bool fscache_maybe_release_page(struct fscache_cookie *cookie, + struct page *page, + gfp_t gfp) +{ + if (fscache_cookie_valid(cookie) && PageFsCache(page)) + return __fscache_maybe_release_page(cookie, page, gfp); + return false; +} + #endif /* _LINUX_FSCACHE_H */ -- cgit v1.2.3 From 60d543ca724be155c2b6166e36a00c80b21bd810 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 19 Nov 2009 18:11:45 +0000 Subject: FS-Cache: Start processing an object's operations on that object's death Start processing an object's operations when that object moves into the DYING state as the object cannot be destroyed until all its outstanding operations have completed. Furthermore, make sure that read and allocation operations handle being woken up on a dead object. Such events are recorded in the Allocs.abt and Retrvls.abt statistics as viewable through /proc/fs/fscache/stats. The code for waiting for object activation for the read and allocation operations is also extracted into its own function as it is much the same in all cases, differing only in the stats incremented. Signed-off-by: David Howells --- Documentation/filesystems/caching/fscache.txt | 2 + fs/fscache/internal.h | 5 ++ fs/fscache/object.c | 1 + fs/fscache/page.c | 112 +++++++++++++------------- fs/fscache/stats.c | 12 ++- include/linux/fscache-cache.h | 4 + 6 files changed, 75 insertions(+), 61 deletions(-) (limited to 'include') diff --git a/Documentation/filesystems/caching/fscache.txt b/Documentation/filesystems/caching/fscache.txt index 7097fd29fb3d..3c23411956bb 100644 --- a/Documentation/filesystems/caching/fscache.txt +++ b/Documentation/filesystems/caching/fscache.txt @@ -253,6 +253,7 @@ proc files. int=N Number of alloc reqs aborted -ERESTARTSYS ops=N Number of alloc reqs submitted owt=N Number of alloc reqs waited for CPU time + abt=N Number of alloc reqs aborted due to object death Retrvls n=N Number of retrieval (read) requests seen ok=N Number of successful retr reqs wt=N Number of retr reqs that waited on lookup completion @@ -262,6 +263,7 @@ proc files. oom=N Number of retr reqs failed -ENOMEM ops=N Number of retr reqs submitted owt=N Number of retr reqs waited for CPU time + abt=N Number of retr reqs aborted due to object death Stores n=N Number of storage (write) requests seen ok=N Number of successful store reqs agn=N Number of store reqs on a page already pending storage diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h index 2bf463d26080..5b49a373689b 100644 --- a/fs/fscache/internal.h +++ b/fs/fscache/internal.h @@ -156,6 +156,7 @@ extern atomic_t fscache_n_allocs_ok; extern atomic_t fscache_n_allocs_wait; extern atomic_t fscache_n_allocs_nobufs; extern atomic_t fscache_n_allocs_intr; +extern atomic_t fscache_n_allocs_object_dead; extern atomic_t fscache_n_alloc_ops; extern atomic_t fscache_n_alloc_op_waits; @@ -166,6 +167,7 @@ extern atomic_t fscache_n_retrievals_nodata; extern atomic_t fscache_n_retrievals_nobufs; extern atomic_t fscache_n_retrievals_intr; extern atomic_t fscache_n_retrievals_nomem; +extern atomic_t fscache_n_retrievals_object_dead; extern atomic_t fscache_n_retrieval_ops; extern atomic_t fscache_n_retrieval_op_waits; @@ -249,9 +251,12 @@ static inline void fscache_stat_d(atomic_t *stat) atomic_dec(stat); } +#define __fscache_stat(stat) (stat) + extern const struct file_operations fscache_stats_fops; #else +#define __fscache_stat(stat) (NULL) #define fscache_stat(stat) do {} while (0) #endif diff --git a/fs/fscache/object.c b/fs/fscache/object.c index 74bc562a2cbc..c85c9f582166 100644 --- a/fs/fscache/object.c +++ b/fs/fscache/object.c @@ -201,6 +201,7 @@ static void fscache_object_state_machine(struct fscache_object *object) } spin_unlock(&object->lock); fscache_enqueue_dependents(object); + fscache_start_operations(object); goto terminal_transit; /* handle an abort during initialisation */ diff --git a/fs/fscache/page.c b/fs/fscache/page.c index fc76798bd968..c598ea4c4e7d 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c @@ -313,6 +313,43 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie) return 0; } +/* + * wait for an object to become active (or dead) + */ +static int fscache_wait_for_retrieval_activation(struct fscache_object *object, + struct fscache_retrieval *op, + atomic_t *stat_op_waits, + atomic_t *stat_object_dead) +{ + int ret; + + if (!test_bit(FSCACHE_OP_WAITING, &op->op.flags)) + goto check_if_dead; + + _debug(">>> WT"); + fscache_stat(stat_op_waits); + if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, + fscache_wait_bit_interruptible, + TASK_INTERRUPTIBLE) < 0) { + ret = fscache_cancel_op(&op->op); + if (ret == 0) + return -ERESTARTSYS; + + /* it's been removed from the pending queue by another party, + * so we should get to run shortly */ + wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, + fscache_wait_bit, TASK_UNINTERRUPTIBLE); + } + _debug("<<< GO"); + +check_if_dead: + if (unlikely(fscache_object_is_dead(object))) { + fscache_stat(stat_object_dead); + return -ENOBUFS; + } + return 0; +} + /* * read a page from the cache or allocate a block in which to store it * - we return: @@ -376,25 +413,12 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, /* we wait for the operation to become active, and then process it * *here*, in this thread, and not in the thread pool */ - if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) { - _debug(">>> WT"); - fscache_stat(&fscache_n_retrieval_op_waits); - if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, - fscache_wait_bit_interruptible, - TASK_INTERRUPTIBLE) < 0) { - ret = fscache_cancel_op(&op->op); - if (ret == 0) { - ret = -ERESTARTSYS; - goto error; - } - - /* it's been removed from the pending queue by another - * party, so we should get to run shortly */ - wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, - fscache_wait_bit, TASK_UNINTERRUPTIBLE); - } - _debug("<<< GO"); - } + ret = fscache_wait_for_retrieval_activation( + object, op, + __fscache_stat(&fscache_n_retrieval_op_waits), + __fscache_stat(&fscache_n_retrievals_object_dead)); + if (ret < 0) + goto error; /* ask the cache to honour the operation */ if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) { @@ -506,25 +530,12 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, /* we wait for the operation to become active, and then process it * *here*, in this thread, and not in the thread pool */ - if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) { - _debug(">>> WT"); - fscache_stat(&fscache_n_retrieval_op_waits); - if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, - fscache_wait_bit_interruptible, - TASK_INTERRUPTIBLE) < 0) { - ret = fscache_cancel_op(&op->op); - if (ret == 0) { - ret = -ERESTARTSYS; - goto error; - } - - /* it's been removed from the pending queue by another - * party, so we should get to run shortly */ - wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, - fscache_wait_bit, TASK_UNINTERRUPTIBLE); - } - _debug("<<< GO"); - } + ret = fscache_wait_for_retrieval_activation( + object, op, + __fscache_stat(&fscache_n_retrieval_op_waits), + __fscache_stat(&fscache_n_retrievals_object_dead)); + if (ret < 0) + goto error; /* ask the cache to honour the operation */ if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) { @@ -612,25 +623,12 @@ int __fscache_alloc_page(struct fscache_cookie *cookie, fscache_stat(&fscache_n_alloc_ops); - if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) { - _debug(">>> WT"); - fscache_stat(&fscache_n_alloc_op_waits); - if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, - fscache_wait_bit_interruptible, - TASK_INTERRUPTIBLE) < 0) { - ret = fscache_cancel_op(&op->op); - if (ret == 0) { - ret = -ERESTARTSYS; - goto error; - } - - /* it's been removed from the pending queue by another - * party, so we should get to run shortly */ - wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, - fscache_wait_bit, TASK_UNINTERRUPTIBLE); - } - _debug("<<< GO"); - } + ret = fscache_wait_for_retrieval_activation( + object, op, + __fscache_stat(&fscache_n_alloc_op_waits), + __fscache_stat(&fscache_n_allocs_object_dead)); + if (ret < 0) + goto error; /* ask the cache to honour the operation */ fscache_stat(&fscache_n_cop_allocate_page); diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c index 9e15289eb5c1..05f77caf4a2d 100644 --- a/fs/fscache/stats.c +++ b/fs/fscache/stats.c @@ -39,6 +39,7 @@ atomic_t fscache_n_allocs_ok; atomic_t fscache_n_allocs_wait; atomic_t fscache_n_allocs_nobufs; atomic_t fscache_n_allocs_intr; +atomic_t fscache_n_allocs_object_dead; atomic_t fscache_n_alloc_ops; atomic_t fscache_n_alloc_op_waits; @@ -49,6 +50,7 @@ atomic_t fscache_n_retrievals_nodata; atomic_t fscache_n_retrievals_nobufs; atomic_t fscache_n_retrievals_intr; atomic_t fscache_n_retrievals_nomem; +atomic_t fscache_n_retrievals_object_dead; atomic_t fscache_n_retrieval_ops; atomic_t fscache_n_retrieval_op_waits; @@ -188,9 +190,10 @@ static int fscache_stats_show(struct seq_file *m, void *v) atomic_read(&fscache_n_allocs_wait), atomic_read(&fscache_n_allocs_nobufs), atomic_read(&fscache_n_allocs_intr)); - seq_printf(m, "Allocs : ops=%u owt=%u\n", + seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n", atomic_read(&fscache_n_alloc_ops), - atomic_read(&fscache_n_alloc_op_waits)); + atomic_read(&fscache_n_alloc_op_waits), + atomic_read(&fscache_n_allocs_object_dead)); seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u" " int=%u oom=%u\n", @@ -201,9 +204,10 @@ static int fscache_stats_show(struct seq_file *m, void *v) atomic_read(&fscache_n_retrievals_nobufs), atomic_read(&fscache_n_retrievals_intr), atomic_read(&fscache_n_retrievals_nomem)); - seq_printf(m, "Retrvls: ops=%u owt=%u\n", + seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n", atomic_read(&fscache_n_retrieval_ops), - atomic_read(&fscache_n_retrieval_op_waits)); + atomic_read(&fscache_n_retrieval_op_waits), + atomic_read(&fscache_n_retrievals_object_dead)); seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n", atomic_read(&fscache_n_stores), diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 4750d5fb419f..907bb56c5888 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -404,6 +404,10 @@ extern const char *fscache_object_states[]; (obj)->state >= FSCACHE_OBJECT_AVAILABLE && \ (obj)->state < FSCACHE_OBJECT_DYING) +#define fscache_object_is_dead(obj) \ + (test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \ + (obj)->state >= FSCACHE_OBJECT_DYING) + extern const struct slow_work_ops fscache_object_slow_work_ops; /** -- cgit v1.2.3 From a17754fb8c28af19cd70dcbec6d5b0773b94e0c1 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 19 Nov 2009 18:11:52 +0000 Subject: CacheFiles: Don't write a full page if there's only a partial page to cache cachefiles_write_page() writes a full page to the backing file for the last page of the netfs file, even if the netfs file's last page is only a partial page. This causes the EOF on the backing file to be extended beyond the EOF of the netfs, and thus the backing file will be truncated by cachefiles_attr_changed() called from cachefiles_lookup_object(). So we need to limit the write we make to the backing file on that last page such that it doesn't push the EOF too far. Also, if a backing file that has a partial page at the end is expanded, we discard the partial page and refetch it on the basis that we then have a hole in the file with invalid data, and should the power go out... A better way to deal with this could be to record a note that the partial page contains invalid data until the correct data is written into it. This isn't a problem for netfs's that discard the whole backing file if the file size changes (such as NFS). Signed-off-by: David Howells --- fs/cachefiles/interface.c | 20 +++++++++++++++++--- fs/cachefiles/rdwr.c | 23 +++++++++++++++++++---- include/linux/fscache-cache.h | 3 +++ 3 files changed, 39 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c index dd7f852746cb..8e67abf05985 100644 --- a/fs/cachefiles/interface.c +++ b/fs/cachefiles/interface.c @@ -404,12 +404,26 @@ static int cachefiles_attr_changed(struct fscache_object *_object) if (oi_size == ni_size) return 0; - newattrs.ia_size = ni_size; - newattrs.ia_valid = ATTR_SIZE; - cachefiles_begin_secure(cache, &saved_cred); mutex_lock(&object->backer->d_inode->i_mutex); + + /* if there's an extension to a partial page at the end of the backing + * file, we need to discard the partial page so that we pick up new + * data after it */ + if (oi_size & ~PAGE_MASK && ni_size > oi_size) { + _debug("discard tail %llx", oi_size); + newattrs.ia_valid = ATTR_SIZE; + newattrs.ia_size = oi_size & PAGE_MASK; + ret = notify_change(object->backer, &newattrs); + if (ret < 0) + goto truncate_failed; + } + + newattrs.ia_valid = ATTR_SIZE; + newattrs.ia_size = ni_size; ret = notify_change(object->backer, &newattrs); + +truncate_failed: mutex_unlock(&object->backer->d_inode->i_mutex); cachefiles_end_secure(cache, saved_cred); diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c index 3304646dae84..5a84fd7109ad 100644 --- a/fs/cachefiles/rdwr.c +++ b/fs/cachefiles/rdwr.c @@ -803,7 +803,8 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page) struct cachefiles_cache *cache; mm_segment_t old_fs; struct file *file; - loff_t pos; + loff_t pos, eof; + size_t len; void *data; int ret; @@ -837,15 +838,29 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page) ret = -EIO; if (file->f_op->write) { pos = (loff_t) page->index << PAGE_SHIFT; + + /* we mustn't write more data than we have, so we have + * to beware of a partial page at EOF */ + eof = object->fscache.store_limit_l; + len = PAGE_SIZE; + if (eof & ~PAGE_MASK) { + ASSERTCMP(pos, <, eof); + if (eof - pos < PAGE_SIZE) { + _debug("cut short %llx to %llx", + pos, eof); + len = eof - pos; + ASSERTCMP(pos + len, ==, eof); + } + } + data = kmap(page); old_fs = get_fs(); set_fs(KERNEL_DS); ret = file->f_op->write( - file, (const void __user *) data, PAGE_SIZE, - &pos); + file, (const void __user *) data, len, &pos); set_fs(old_fs); kunmap(page); - if (ret != PAGE_SIZE) + if (ret != len) ret = -EIO; } fput(file); diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 907bb56c5888..5db50002f3b5 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -395,6 +395,7 @@ struct fscache_object { struct rb_node objlist_link; /* link in global object list */ #endif pgoff_t store_limit; /* current storage limit */ + loff_t store_limit_l; /* current storage limit */ }; extern const char *fscache_object_states[]; @@ -439,6 +440,7 @@ void fscache_object_init(struct fscache_object *object, object->events = object->event_mask = 0; object->flags = 0; object->store_limit = 0; + object->store_limit_l = 0; object->cache = cache; object->cookie = cookie; object->parent = NULL; @@ -491,6 +493,7 @@ static inline void fscache_object_lookup_error(struct fscache_object *object) static inline void fscache_set_store_limit(struct fscache_object *object, loff_t i_size) { + object->store_limit_l = i_size; object->store_limit = i_size >> PAGE_SHIFT; if (i_size & ~PAGE_MASK) object->store_limit++; -- cgit v1.2.3 From fee096deb4f33897937b974cb2c5168bab7935be Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 19 Nov 2009 18:12:05 +0000 Subject: CacheFiles: Catch an overly long wait for an old active object Catch an overly long wait for an old, dying active object when we want to replace it with a new one. The probability is that all the slow-work threads are hogged, and the delete can't get a look in. What we do instead is: (1) if there's nothing in the slow work queue, we sleep until either the dying object has finished dying or there is something in the slow work queue behind which we can queue our object. (2) if there is something in the slow work queue, we return ETIMEDOUT to fscache_lookup_object(), which then puts us back on the slow work queue, presumably behind the deletion that we're blocked by. We are then deferred for a while until we work our way back through the queue - without blocking a slow-work thread unnecessarily. A backtrace similar to the following may appear in the log without this patch: INFO: task kslowd004:5711 blocked for more than 120 seconds. "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. kslowd004 D 0000000000000000 0 5711 2 0x00000080 ffff88000340bb80 0000000000000046 ffff88002550d000 0000000000000000 ffff88002550d000 0000000000000007 ffff88000340bfd8 ffff88002550d2a8 000000000000ddf0 00000000000118c0 00000000000118c0 ffff88002550d2a8 Call Trace: [] ? trace_hardirqs_on+0xd/0xf [] ? cachefiles_wait_bit+0x0/0xd [cachefiles] [] cachefiles_wait_bit+0x9/0xd [cachefiles] [] __wait_on_bit+0x43/0x76 [] ? ext3_xattr_get+0x1ec/0x270 [] out_of_line_wait_on_bit+0x69/0x74 [] ? cachefiles_wait_bit+0x0/0xd [cachefiles] [] ? wake_bit_function+0x0/0x2e [] cachefiles_mark_object_active+0x203/0x23b [cachefiles] [] cachefiles_walk_to_object+0x558/0x827 [cachefiles] [] cachefiles_lookup_object+0xac/0x12a [cachefiles] [] fscache_lookup_object+0x1c7/0x214 [fscache] [] fscache_object_state_machine+0xa5/0x52d [fscache] [] fscache_object_slow_work_execute+0x5f/0xa0 [fscache] [] slow_work_execute+0x18f/0x2d1 [] slow_work_thread+0x1c5/0x308 [] ? autoremove_wake_function+0x0/0x34 [] ? slow_work_thread+0x0/0x308 [] kthread+0x7a/0x82 [] child_rip+0xa/0x20 [] ? restore_args+0x0/0x30 [] ? kthread+0x0/0x82 [] ? child_rip+0x0/0x20 1 lock held by kslowd004/5711: #0: (&sb->s_type->i_mutex_key#7/1){+.+.+.}, at: [] cachefiles_walk_to_object+0x1b3/0x827 [cachefiles] Signed-off-by: David Howells --- Documentation/filesystems/caching/fscache.txt | 1 + fs/cachefiles/interface.c | 6 +- fs/cachefiles/namei.c | 87 +++++++++++++++++++++------ fs/fscache/internal.h | 1 + fs/fscache/object.c | 10 ++- fs/fscache/stats.c | 4 +- include/linux/fscache-cache.h | 6 +- 7 files changed, 90 insertions(+), 25 deletions(-) (limited to 'include') diff --git a/Documentation/filesystems/caching/fscache.txt b/Documentation/filesystems/caching/fscache.txt index 3c23411956bb..a91e2e2095b0 100644 --- a/Documentation/filesystems/caching/fscache.txt +++ b/Documentation/filesystems/caching/fscache.txt @@ -235,6 +235,7 @@ proc files. neg=N Number of negative lookups made pos=N Number of positive lookups made crt=N Number of objects created by lookup + tmo=N Number of lookups timed out and requeued Updates n=N Number of update cookie requests seen nul=N Number of upd reqs given a NULL parent run=N Number of upd reqs granted CPU time diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c index 8e67abf05985..9d3c426044ae 100644 --- a/fs/cachefiles/interface.c +++ b/fs/cachefiles/interface.c @@ -114,8 +114,9 @@ nomem_lookup_data: /* * attempt to look up the nominated node in this cache + * - return -ETIMEDOUT to be scheduled again */ -static void cachefiles_lookup_object(struct fscache_object *_object) +static int cachefiles_lookup_object(struct fscache_object *_object) { struct cachefiles_lookup_data *lookup_data; struct cachefiles_object *parent, *object; @@ -145,13 +146,14 @@ static void cachefiles_lookup_object(struct fscache_object *_object) object->fscache.cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) cachefiles_attr_changed(&object->fscache); - if (ret < 0) { + if (ret < 0 && ret != -ETIMEDOUT) { printk(KERN_WARNING "CacheFiles: Lookup failed error %d\n", ret); fscache_object_lookup_error(&object->fscache); } _leave(" [%d]", ret); + return ret; } /* diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c index 00a0cda8f47a..14ac4806e291 100644 --- a/fs/cachefiles/namei.c +++ b/fs/cachefiles/namei.c @@ -21,12 +21,6 @@ #include #include "internal.h" -static int cachefiles_wait_bit(void *flags) -{ - schedule(); - return 0; -} - #define CACHEFILES_KEYBUF_SIZE 512 /* @@ -100,8 +94,8 @@ static noinline void cachefiles_printk_object(struct cachefiles_object *object, /* * record the fact that an object is now active */ -static void cachefiles_mark_object_active(struct cachefiles_cache *cache, - struct cachefiles_object *object) +static int cachefiles_mark_object_active(struct cachefiles_cache *cache, + struct cachefiles_object *object) { struct cachefiles_object *xobject; struct rb_node **_p, *_parent = NULL; @@ -139,8 +133,8 @@ try_again: rb_insert_color(&object->active_node, &cache->active_nodes); write_unlock(&cache->active_lock); - _leave(""); - return; + _leave(" = 0"); + return 0; /* an old object from a previous incarnation is hogging the slot - we * need to wait for it to be destroyed */ @@ -155,13 +149,64 @@ wait_for_old_object: atomic_inc(&xobject->usage); write_unlock(&cache->active_lock); - _debug(">>> wait"); - wait_on_bit(&xobject->flags, CACHEFILES_OBJECT_ACTIVE, - cachefiles_wait_bit, TASK_UNINTERRUPTIBLE); - _debug("<<< waited"); + if (test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) { + wait_queue_head_t *wq; + + signed long timeout = 60 * HZ; + wait_queue_t wait; + bool requeue; + + /* if the object we're waiting for is queued for processing, + * then just put ourselves on the queue behind it */ + if (slow_work_is_queued(&xobject->fscache.work)) { + _debug("queue OBJ%x behind OBJ%x immediately", + object->fscache.debug_id, + xobject->fscache.debug_id); + goto requeue; + } + + /* otherwise we sleep until either the object we're waiting for + * is done, or the slow-work facility wants the thread back to + * do other work */ + wq = bit_waitqueue(&xobject->flags, CACHEFILES_OBJECT_ACTIVE); + init_wait(&wait); + requeue = false; + do { + prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); + if (!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) + break; + requeue = slow_work_sleep_till_thread_needed( + &object->fscache.work, &timeout); + } while (timeout > 0 && !requeue); + finish_wait(wq, &wait); + + if (requeue && + test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) { + _debug("queue OBJ%x behind OBJ%x after wait", + object->fscache.debug_id, + xobject->fscache.debug_id); + goto requeue; + } + + if (timeout <= 0) { + printk(KERN_ERR "\n"); + printk(KERN_ERR "CacheFiles: Error: Overlong" + " wait for old active object to go away\n"); + cachefiles_printk_object(object, xobject); + goto requeue; + } + } + + ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)); cache->cache.ops->put_object(&xobject->fscache); goto try_again; + +requeue: + clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags); + cache->cache.ops->put_object(&xobject->fscache); + _leave(" = -ETIMEDOUT"); + return -ETIMEDOUT; } /* @@ -466,12 +511,15 @@ lookup_again: } /* note that we're now using this object */ - cachefiles_mark_object_active(cache, object); + ret = cachefiles_mark_object_active(cache, object); mutex_unlock(&dir->d_inode->i_mutex); dput(dir); dir = NULL; + if (ret == -ETIMEDOUT) + goto mark_active_timed_out; + _debug("=== OBTAINED_OBJECT ==="); if (object->new) { @@ -515,6 +563,10 @@ create_error: cachefiles_io_error(cache, "Create/mkdir failed"); goto error; +mark_active_timed_out: + _debug("mark active timed out"); + goto release_dentry; + check_error: _debug("check error %d", ret); write_lock(&cache->active_lock); @@ -522,7 +574,7 @@ check_error: clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags); wake_up_bit(&object->flags, CACHEFILES_OBJECT_ACTIVE); write_unlock(&cache->active_lock); - +release_dentry: dput(object->dentry); object->dentry = NULL; goto error_out; @@ -543,9 +595,6 @@ error: error_out2: dput(dir); error_out: - if (ret == -ENOSPC) - ret = -ENOBUFS; - _leave(" = error %d", -ret); return ret; } diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h index 5b49a373689b..0ca2566e038c 100644 --- a/fs/fscache/internal.h +++ b/fs/fscache/internal.h @@ -215,6 +215,7 @@ extern atomic_t fscache_n_object_no_alloc; extern atomic_t fscache_n_object_lookups; extern atomic_t fscache_n_object_lookups_negative; extern atomic_t fscache_n_object_lookups_positive; +extern atomic_t fscache_n_object_lookups_timed_out; extern atomic_t fscache_n_object_created; extern atomic_t fscache_n_object_avail; extern atomic_t fscache_n_object_dead; diff --git a/fs/fscache/object.c b/fs/fscache/object.c index f3f952cf887e..e513ac599c8e 100644 --- a/fs/fscache/object.c +++ b/fs/fscache/object.c @@ -468,6 +468,7 @@ static void fscache_lookup_object(struct fscache_object *object) { struct fscache_cookie *cookie = object->cookie; struct fscache_object *parent; + int ret; _enter(""); @@ -493,12 +494,19 @@ static void fscache_lookup_object(struct fscache_object *object) fscache_stat(&fscache_n_object_lookups); fscache_stat(&fscache_n_cop_lookup_object); - object->cache->ops->lookup_object(object); + ret = object->cache->ops->lookup_object(object); fscache_stat_d(&fscache_n_cop_lookup_object); if (test_bit(FSCACHE_OBJECT_EV_ERROR, &object->events)) set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags); + if (ret == -ETIMEDOUT) { + /* probably stuck behind another object, so move this one to + * the back of the queue */ + fscache_stat(&fscache_n_object_lookups_timed_out); + set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events); + } + _leave(""); } diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c index 05f77caf4a2d..46435f3aae68 100644 --- a/fs/fscache/stats.c +++ b/fs/fscache/stats.c @@ -98,6 +98,7 @@ atomic_t fscache_n_object_no_alloc; atomic_t fscache_n_object_lookups; atomic_t fscache_n_object_lookups_negative; atomic_t fscache_n_object_lookups_positive; +atomic_t fscache_n_object_lookups_timed_out; atomic_t fscache_n_object_created; atomic_t fscache_n_object_avail; atomic_t fscache_n_object_dead; @@ -160,10 +161,11 @@ static int fscache_stats_show(struct seq_file *m, void *v) atomic_read(&fscache_n_acquires_nobufs), atomic_read(&fscache_n_acquires_oom)); - seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u\n", + seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n", atomic_read(&fscache_n_object_lookups), atomic_read(&fscache_n_object_lookups_negative), atomic_read(&fscache_n_object_lookups_positive), + atomic_read(&fscache_n_object_lookups_timed_out), atomic_read(&fscache_n_object_created)); seq_printf(m, "Updates: n=%u nul=%u run=%u\n", diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 5db50002f3b5..7be0c6fbe880 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -234,8 +234,10 @@ struct fscache_cache_ops { struct fscache_object *(*alloc_object)(struct fscache_cache *cache, struct fscache_cookie *cookie); - /* look up the object for a cookie */ - void (*lookup_object)(struct fscache_object *object); + /* look up the object for a cookie + * - return -ETIMEDOUT to be requeued + */ + int (*lookup_object)(struct fscache_object *object); /* finished looking up */ void (*lookup_complete)(struct fscache_object *object); -- cgit v1.2.3 From 308efab5e231d1510cd35931d87629bf5171caae Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Thu, 19 Nov 2009 13:30:36 +0000 Subject: vt: Fix use of "new" in a struct field As this struct is exposed to user space and the API was added for this release it's a bit of a pain for the C++ world and we still have time to fix it. Rename the fields before we end up with that pain in an actual release. Signed-off-by: Alan Cox Reported-by: Olivier Goffart Signed-off-by: Linus Torvalds --- drivers/char/vt_ioctl.c | 6 +++--- include/linux/vt.h | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c index ed86d3bf249a..6aa10284104a 100644 --- a/drivers/char/vt_ioctl.c +++ b/drivers/char/vt_ioctl.c @@ -103,8 +103,8 @@ void vt_event_post(unsigned int event, unsigned int old, unsigned int new) ve->event.event = event; /* kernel view is consoles 0..n-1, user space view is console 1..n with 0 meaning current, so we must bias */ - ve->event.old = old + 1; - ve->event.new = new + 1; + ve->event.oldev = old + 1; + ve->event.newev = new + 1; wake = 1; ve->done = 1; } @@ -186,7 +186,7 @@ int vt_waitactive(int n) vt_event_wait(&vw); if (vw.done == 0) return -EINTR; - } while (vw.event.new != n); + } while (vw.event.newev != n); return 0; } diff --git a/include/linux/vt.h b/include/linux/vt.h index 7afca0d72139..7ffa11f06232 100644 --- a/include/linux/vt.h +++ b/include/linux/vt.h @@ -70,8 +70,8 @@ struct vt_event { #define VT_EVENT_UNBLANK 0x0004 /* Screen unblank */ #define VT_EVENT_RESIZE 0x0008 /* Resize display */ #define VT_MAX_EVENT 0x000F - unsigned int old; /* Old console */ - unsigned int new; /* New console (if changing) */ + unsigned int oldev; /* Old console */ + unsigned int newev; /* New console (if changing) */ unsigned int pad[4]; /* Padding for expansion */ }; -- cgit v1.2.3 From 4ced24c8973f79113444d1e00ee8bd9e74fbf43e Mon Sep 17 00:00:00 2001 From: Kevin Wells Date: Thu, 12 Nov 2009 00:23:00 +0100 Subject: i2c: i2c-pnx: Made buf type unsigned to prevent sign extension Made buf type unsigned to prevent sign extension Signed-off-by: Kevin Wells Signed-off-by: Ben Dooks --- include/linux/i2c-pnx.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/i2c-pnx.h b/include/linux/i2c-pnx.h index f13255e06406..9eb07bbc6522 100644 --- a/include/linux/i2c-pnx.h +++ b/include/linux/i2c-pnx.h @@ -21,7 +21,7 @@ struct i2c_pnx_mif { int mode; /* Interface mode */ struct completion complete; /* I/O completion */ struct timer_list timer; /* Timeout */ - char * buf; /* Data buffer */ + u8 * buf; /* Data buffer */ int len; /* Length of data buffer */ }; -- cgit v1.2.3 From 860dc73608a091e0b325218acc2701709d5f221a Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Thu, 19 Nov 2009 17:48:29 -0500 Subject: [SCSI] fix async scan add/remove race resulting in an oops Async scanning introduced a very wide window where the SCSI device is up and running but has not yet been added to sysfs. We delay the adding until all scans have completed to retain the same ordering as sync scanning. This delay in visibility causes an oops if a device is removed before we make it visible because the SCSI removal routines have an inbuilt assumption that if a device is in SDEV_RUNNING state, it must be visible (which is not necessarily true in the async scanning case). Fix this by introducing an additional is_visible flag which we can use to condition the tear down so we do the right thing for running but not yet made visible. Reported-by: Alexey Kuznetsov Signed-off-by: James Bottomley --- drivers/scsi/scsi_scan.c | 18 +++---------- drivers/scsi/scsi_sysfs.c | 63 ++++++++++++++++++++-------------------------- include/scsi/scsi_device.h | 1 + 3 files changed, 32 insertions(+), 50 deletions(-) (limited to 'include') diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 0547a7f44d42..47291bcff0d5 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -952,16 +952,6 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, return SCSI_SCAN_LUN_PRESENT; } -static inline void scsi_destroy_sdev(struct scsi_device *sdev) -{ - scsi_device_set_state(sdev, SDEV_DEL); - if (sdev->host->hostt->slave_destroy) - sdev->host->hostt->slave_destroy(sdev); - transport_destroy_device(&sdev->sdev_gendev); - put_device(&sdev->sdev_dev); - put_device(&sdev->sdev_gendev); -} - #ifdef CONFIG_SCSI_LOGGING /** * scsi_inq_str - print INQUIRY data from min to max index, strip trailing whitespace @@ -1139,7 +1129,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget, } } } else - scsi_destroy_sdev(sdev); + __scsi_remove_device(sdev); out: return res; } @@ -1500,7 +1490,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, /* * the sdev we used didn't appear in the report luns scan */ - scsi_destroy_sdev(sdev); + __scsi_remove_device(sdev); return ret; } @@ -1710,7 +1700,7 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost) shost_for_each_device(sdev, shost) { if (!scsi_host_scan_allowed(shost) || scsi_sysfs_add_sdev(sdev) != 0) - scsi_destroy_sdev(sdev); + __scsi_remove_device(sdev); } } @@ -1943,7 +1933,7 @@ void scsi_free_host_dev(struct scsi_device *sdev) { BUG_ON(sdev->id != sdev->host->this_id); - scsi_destroy_sdev(sdev); + __scsi_remove_device(sdev); } EXPORT_SYMBOL(scsi_free_host_dev); diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 5c7eb63a19d1..392d8db33905 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -854,82 +854,73 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev) transport_configure_device(&starget->dev); error = device_add(&sdev->sdev_gendev); if (error) { - put_device(sdev->sdev_gendev.parent); printk(KERN_INFO "error 1\n"); - return error; + goto out_remove; } error = device_add(&sdev->sdev_dev); if (error) { printk(KERN_INFO "error 2\n"); - goto clean_device; + device_del(&sdev->sdev_gendev); + goto out_remove; } + transport_add_device(&sdev->sdev_gendev); + sdev->is_visible = 1; /* create queue files, which may be writable, depending on the host */ if (sdev->host->hostt->change_queue_depth) error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_depth_rw); else error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_depth); - if (error) { - __scsi_remove_device(sdev); - goto out; - } + if (error) + goto out_remove; + if (sdev->host->hostt->change_queue_type) error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_type_rw); else error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_type); - if (error) { - __scsi_remove_device(sdev); - goto out; - } + if (error) + goto out_remove; error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL); if (error) + /* we're treating error on bsg register as non-fatal, + * so pretend nothing went wrong */ sdev_printk(KERN_INFO, sdev, "Failed to register bsg queue, errno=%d\n", error); - /* we're treating error on bsg register as non-fatal, so pretend - * nothing went wrong */ - error = 0; - /* add additional host specific attributes */ if (sdev->host->hostt->sdev_attrs) { for (i = 0; sdev->host->hostt->sdev_attrs[i]; i++) { error = device_create_file(&sdev->sdev_gendev, sdev->host->hostt->sdev_attrs[i]); - if (error) { - __scsi_remove_device(sdev); - goto out; - } + if (error) + goto out_remove; } } - transport_add_device(&sdev->sdev_gendev); - out: - return error; - - clean_device: - scsi_device_set_state(sdev, SDEV_CANCEL); - - device_del(&sdev->sdev_gendev); - transport_destroy_device(&sdev->sdev_gendev); - put_device(&sdev->sdev_dev); - put_device(&sdev->sdev_gendev); + return 0; + out_remove: + __scsi_remove_device(sdev); return error; + } void __scsi_remove_device(struct scsi_device *sdev) { struct device *dev = &sdev->sdev_gendev; - if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0) - return; + if (sdev->is_visible) { + if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0) + return; - bsg_unregister_queue(sdev->request_queue); - device_unregister(&sdev->sdev_dev); - transport_remove_device(dev); - device_del(dev); + bsg_unregister_queue(sdev->request_queue); + device_unregister(&sdev->sdev_dev); + transport_remove_device(dev); + device_del(dev); + } else + put_device(&sdev->sdev_dev); scsi_device_set_state(sdev, SDEV_DEL); if (sdev->host->hostt->slave_destroy) sdev->host->hostt->slave_destroy(sdev); diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 9af48cbf0036..f097ae340bc1 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -145,6 +145,7 @@ struct scsi_device { unsigned retry_hwerror:1; /* Retry HARDWARE_ERROR */ unsigned last_sector_bug:1; /* do not use multisector accesses on SD_LAST_BUGGY_SECTORS */ + unsigned is_visible:1; /* is the device visible in sysfs */ DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */ struct list_head event_list; /* asserted events */ -- cgit v1.2.3 From 5fdd4baef6195a1f2960e901c8877e2105f832ca Mon Sep 17 00:00:00 2001 From: Andrei Pelinescu-Onciul Date: Sun, 29 Nov 2009 00:14:02 -0800 Subject: sctp: on T3_RTX retransmit all the in-flight chunks When retransmitting due to T3 timeout, retransmit all the in-flight chunks for the corresponding transport/path, including chunks sent less then 1 rto ago. This is the correct behaviour according to rfc4960 section 6.3.3 E3 and "Note: Any DATA chunks that were sent to the address for which the T3-rtx timer expired but did not fit in one MTU (rule E3 above) should be marked for retransmission and sent as soon as cwnd allows (normally, when a SACK arrives). ". This fixes problems when more then one path is present and the T3 retransmission of the first chunk that timeouts stops the T3 timer for the initial active path, leaving all the other in-flight chunks waiting forever or until a new chunk is transmitted on the same path and timeouts (and this will happen only if the cwnd allows sending new chunks, but since cwnd was dropped to MTU by the timeout => it will wait until the first heartbeat). Example: 10 packets in flight, sent at 0.1 s intervals on the primary path. The primary path is down and the first packet timeouts. The first packet is retransmitted on another path, the T3 timer for the primary path is stopped and cwnd is set to MTU. All the other 9 in-flight packets will not be retransmitted (unless more new packets are sent on the primary path which depend on cwnd allowing it, and even in this case the 9 packets will be retransmitted only after a new packet timeouts which even in the best case would be more then RTO). This commit reverts d0ce92910bc04e107b2f3f2048f07e94f570035d and also removes the now unused transport->last_rto, introduced in b6157d8e03e1e780660a328f7183bcbfa4a93a19. p.s The problem is not only when multiple paths are there. It can happen in a single homed environment. If the application stops sending data, it possible to have a hung association. Signed-off-by: Andrei Pelinescu-Onciul Signed-off-by: Vlad Yasevich Signed-off-by: David S. Miller --- include/net/sctp/structs.h | 1 - net/sctp/outqueue.c | 10 ---------- net/sctp/sm_sideeffect.c | 1 - net/sctp/transport.c | 5 ++--- 4 files changed, 2 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index cd2e18778f81..0a474568b003 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -893,7 +893,6 @@ struct sctp_transport { */ /* RTO : The current retransmission timeout value. */ unsigned long rto; - unsigned long last_rto; __u32 rtt; /* This is the most recent RTT. */ diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index c9f20e28521b..23e5e97aa617 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -423,16 +423,6 @@ void sctp_retransmit_mark(struct sctp_outq *q, if ((reason == SCTP_RTXR_FAST_RTX && (chunk->fast_retransmit == SCTP_NEED_FRTX)) || (reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) { - /* If this chunk was sent less then 1 rto ago, do not - * retransmit this chunk, but give the peer time - * to acknowlege it. Do this only when - * retransmitting due to T3 timeout. - */ - if (reason == SCTP_RTXR_T3_RTX && - time_before(jiffies, chunk->sent_at + - transport->last_rto)) - continue; - /* RFC 2960 6.2.1 Processing a Received SACK * * C) Any time a DATA chunk is marked for diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 8674d4919556..efa516b47e81 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -480,7 +480,6 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc, * that indicates that we have an outstanding HB. */ if (!is_hb || transport->hb_sent) { - transport->last_rto = transport->rto; transport->rto = min((transport->rto * 2), transport->asoc->rto_max); } } diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 3b141bb32faf..37a1184d789f 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -74,7 +74,7 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer, * given destination transport address, set RTO to the protocol * parameter 'RTO.Initial'. */ - peer->last_rto = peer->rto = msecs_to_jiffies(sctp_rto_initial); + peer->rto = msecs_to_jiffies(sctp_rto_initial); peer->rtt = 0; peer->rttvar = 0; peer->srtt = 0; @@ -386,7 +386,6 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt) tp->rto = tp->asoc->rto_max; tp->rtt = rtt; - tp->last_rto = tp->rto; /* Reset rto_pending so that a new RTT measurement is started when a * new data chunk is sent. @@ -602,7 +601,7 @@ void sctp_transport_reset(struct sctp_transport *t) */ t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); t->ssthresh = asoc->peer.i.a_rwnd; - t->last_rto = t->rto = asoc->rto_initial; + t->rto = asoc->rto_initial; t->rtt = 0; t->srtt = 0; t->rttvar = 0; -- cgit v1.2.3 From 827d42c9ac91ddd728e4f4a31fefb906ef2ceff7 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sun, 22 Nov 2009 12:28:41 +0100 Subject: mac80211: fix spurious delBA handling Lennert Buytenhek noticed that delBA handling in mac80211 was broken and has remotely triggerable problems, some of which are due to some code shuffling I did that ended up changing the order in which things were done -- this was commit d75636ef9c1af224f1097941879d5a8db7cd04e5 Author: Johannes Berg Date: Tue Feb 10 21:25:53 2009 +0100 mac80211: RX aggregation: clean up stop session and other parts were already present in the original commit d92684e66091c0f0101819619b315b4bb8b5bcc5 Author: Ron Rindjunsky Date: Mon Jan 28 14:07:22 2008 +0200 mac80211: A-MPDU Tx add delBA from recipient support The first problem is that I moved a BUG_ON before various checks -- thereby making it possible to hit. As the comment indicates, the BUG_ON can be removed since the ampdu_action callback must already exist when the state is != IDLE. The second problem isn't easily exploitable but there's a race condition due to unconditionally setting the state to OPERATIONAL when a delBA frame is received, even when no aggregation session was ever initiated. All the drivers accept stopping the session even then, but that opens a race window where crashes could happen before the driver accepts it. Right now, a WARN_ON may happen with non-HT drivers, while the race opens only for HT drivers. For this case, there are two things necessary to fix it: 1) don't process spurious delBA frames, and be more careful about the session state; don't drop the lock 2) HT drivers need to be prepared to handle a session stop even before the session was really started -- this is true for all drivers (that support aggregation) but iwlwifi which can be fixed easily. The other HT drivers (ath9k and ar9170) are behaving properly already. Reported-by: Lennert Buytenhek Cc: stable@kernel.org Signed-off-by: Johannes Berg Signed-off-by: John W. Linville --- drivers/net/wireless/iwlwifi/iwl-tx.c | 10 +++++++++- include/net/mac80211.h | 6 ++++++ net/mac80211/agg-tx.c | 15 +++++++-------- net/mac80211/ht.c | 8 +++----- net/mac80211/ieee80211_i.h | 2 ++ 5 files changed, 27 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index fb9bcfa6d947..b7e196e3c8d3 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c @@ -1277,8 +1277,16 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid) return -ENXIO; } + if (priv->stations[sta_id].tid[tid].agg.state == + IWL_EMPTYING_HW_QUEUE_ADDBA) { + IWL_DEBUG_HT(priv, "AGG stop before setup done\n"); + ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid); + priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; + return 0; + } + if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON) - IWL_WARN(priv, "Stopping AGG while state not IWL_AGG_ON\n"); + IWL_WARN(priv, "Stopping AGG while state not ON or starting\n"); tid_data = &priv->stations[sta_id].tid[tid]; ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; diff --git a/include/net/mac80211.h b/include/net/mac80211.h index c75b960c8ac8..998c30fc8981 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1283,6 +1283,12 @@ enum ieee80211_filter_flags { * * These flags are used with the ampdu_action() callback in * &struct ieee80211_ops to indicate which action is needed. + * + * Note that drivers MUST be able to deal with a TX aggregation + * session being stopped even before they OK'ed starting it by + * calling ieee80211_start_tx_ba_cb(_irqsafe), because the peer + * might receive the addBA frame and send a delBA right away! + * * @IEEE80211_AMPDU_RX_START: start Rx aggregation * @IEEE80211_AMPDU_RX_STOP: stop Rx aggregation * @IEEE80211_AMPDU_TX_START: start Tx aggregation diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 63224d1ee8dd..89e238b001de 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -123,13 +123,18 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1 ieee80211_tx_skb(sdata, skb, 0); } -static int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, - enum ieee80211_back_parties initiator) +int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, + enum ieee80211_back_parties initiator) { struct ieee80211_local *local = sta->local; int ret; u8 *state; +#ifdef CONFIG_MAC80211_HT_DEBUG + printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n", + sta->sta.addr, tid); +#endif /* CONFIG_MAC80211_HT_DEBUG */ + state = &sta->ampdu_mlme.tid_state_tx[tid]; if (*state == HT_AGG_STATE_OPERATIONAL) @@ -143,7 +148,6 @@ static int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, /* HW shall not deny going back to legacy */ if (WARN_ON(ret)) { - *state = HT_AGG_STATE_OPERATIONAL; /* * We may have pending packets get stuck in this case... * Not bothering with a workaround for now. @@ -525,11 +529,6 @@ int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, goto unlock; } -#ifdef CONFIG_MAC80211_HT_DEBUG - printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n", - sta->sta.addr, tid); -#endif /* CONFIG_MAC80211_HT_DEBUG */ - ret = ___ieee80211_stop_tx_ba_session(sta, tid, initiator); unlock: diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c index 48ef1a282b91..cdc58e61d921 100644 --- a/net/mac80211/ht.c +++ b/net/mac80211/ht.c @@ -141,7 +141,6 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct ieee80211_mgmt *mgmt, size_t len) { - struct ieee80211_local *local = sdata->local; u16 tid, params; u16 initiator; @@ -161,10 +160,9 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata, WLAN_BACK_INITIATOR, 0); else { /* WLAN_BACK_RECIPIENT */ spin_lock_bh(&sta->lock); - sta->ampdu_mlme.tid_state_tx[tid] = - HT_AGG_STATE_OPERATIONAL; + if (sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK) + ___ieee80211_stop_tx_ba_session(sta, tid, + WLAN_BACK_RECIPIENT); spin_unlock_bh(&sta->lock); - ieee80211_stop_tx_ba_session(&local->hw, sta->sta.addr, tid, - WLAN_BACK_RECIPIENT); } } diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index a910bf1f092f..10d316e455de 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1091,6 +1091,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, enum ieee80211_back_parties initiator); +int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, + enum ieee80211_back_parties initiator); /* Spectrum management */ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, -- cgit v1.2.3 From 7716977b6ae5a0cdd0afab5c6035c4d0ce53f599 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 30 Nov 2009 13:24:18 +0000 Subject: mfd: Correct WM831X_MAX_ISEL_VALUE There was confusion between the array size and the highest ISEL value possible. Reported-by: Dan Carpenter Signed-off-by: Mark Brown Signed-off-by: Samuel Ortiz --- drivers/mfd/wm831x-core.c | 2 +- include/linux/mfd/wm831x/regulator.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c index 49b7885c2702..7f27576ca046 100644 --- a/drivers/mfd/wm831x-core.c +++ b/drivers/mfd/wm831x-core.c @@ -29,7 +29,7 @@ /* Current settings - values are 2*2^(reg_val/4) microamps. These are * exported since they are used by multiple drivers. */ -int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL] = { +int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1] = { 2, 2, 3, diff --git a/include/linux/mfd/wm831x/regulator.h b/include/linux/mfd/wm831x/regulator.h index f95466343fb2..955d30fc6a27 100644 --- a/include/linux/mfd/wm831x/regulator.h +++ b/include/linux/mfd/wm831x/regulator.h @@ -1212,7 +1212,7 @@ #define WM831X_LDO1_OK_SHIFT 0 /* LDO1_OK */ #define WM831X_LDO1_OK_WIDTH 1 /* LDO1_OK */ -#define WM831X_ISINK_MAX_ISEL 56 -extern int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL]; +#define WM831X_ISINK_MAX_ISEL 55 +extern int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1]; #endif -- cgit v1.2.3 From f13a48bd798a159291ca583b95453171b88b7448 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 1 Dec 2009 15:36:11 +0000 Subject: SLOW_WORK: Move slow_work's proc file to debugfs Move slow_work's debugging proc file to debugfs. Signed-off-by: David Howells Requested-and-acked-by: Ingo Molnar Signed-off-by: Linus Torvalds --- Documentation/slow-work.txt | 4 +- include/linux/slow-work.h | 8 +- init/Kconfig | 8 +- kernel/Makefile | 2 +- kernel/slow-work-debugfs.c | 227 ++++++++++++++++++++++++++++++++++++++++++++ kernel/slow-work-proc.c | 227 -------------------------------------------- kernel/slow-work.c | 18 ++-- kernel/slow-work.h | 6 +- 8 files changed, 253 insertions(+), 247 deletions(-) create mode 100644 kernel/slow-work-debugfs.c delete mode 100644 kernel/slow-work-proc.c (limited to 'include') diff --git a/Documentation/slow-work.txt b/Documentation/slow-work.txt index 52bc31433723..9dbf4470c7e1 100644 --- a/Documentation/slow-work.txt +++ b/Documentation/slow-work.txt @@ -279,9 +279,9 @@ The slow-work thread pool has a number of configurables: VIEWING EXECUTING AND QUEUED ITEMS ================================== -If CONFIG_SLOW_WORK_PROC is enabled, a proc file is made available: +If CONFIG_SLOW_WORK_DEBUG is enabled, a debugfs file is made available: - /proc/slow_work_rq + /sys/kernel/debug/slow_work/runqueue through which the list of work items being executed and the queues of items to be executed may be viewed. The owner of a work item is given the chance to diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h index 5035a2691739..13337bf6c3f5 100644 --- a/include/linux/slow-work.h +++ b/include/linux/slow-work.h @@ -20,7 +20,7 @@ #include struct slow_work; -#ifdef CONFIG_SLOW_WORK_PROC +#ifdef CONFIG_SLOW_WORK_DEBUG struct seq_file; #endif @@ -42,8 +42,8 @@ struct slow_work_ops { /* execute a work item */ void (*execute)(struct slow_work *work); -#ifdef CONFIG_SLOW_WORK_PROC - /* describe a work item for /proc */ +#ifdef CONFIG_SLOW_WORK_DEBUG + /* describe a work item for debugfs */ void (*desc)(struct slow_work *work, struct seq_file *m); #endif }; @@ -64,7 +64,7 @@ struct slow_work { #define SLOW_WORK_DELAYED 5 /* item is struct delayed_slow_work with active timer */ const struct slow_work_ops *ops; /* operations table for this item */ struct list_head link; /* link in queue */ -#ifdef CONFIG_SLOW_WORK_PROC +#ifdef CONFIG_SLOW_WORK_DEBUG struct timespec mark; /* jiffies at which queued or exec begun */ #endif }; diff --git a/init/Kconfig b/init/Kconfig index ab5c64801fe5..39923ccc287b 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1098,12 +1098,12 @@ config SLOW_WORK See Documentation/slow-work.txt. -config SLOW_WORK_PROC - bool "Slow work debugging through /proc" +config SLOW_WORK_DEBUG + bool "Slow work debugging through debugfs" default n - depends on SLOW_WORK && PROC_FS + depends on SLOW_WORK && DEBUG_FS help - Display the contents of the slow work run queue through /proc, + Display the contents of the slow work run queue through debugfs, including items currently executing. See Documentation/slow-work.txt. diff --git a/kernel/Makefile b/kernel/Makefile index 776ffed1556d..d7c13d249b2d 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -94,7 +94,7 @@ obj-$(CONFIG_X86_DS) += trace/ obj-$(CONFIG_RING_BUFFER) += trace/ obj-$(CONFIG_SMP) += sched_cpupri.o obj-$(CONFIG_SLOW_WORK) += slow-work.o -obj-$(CONFIG_SLOW_WORK_PROC) += slow-work-proc.o +obj-$(CONFIG_SLOW_WORK_DEBUG) += slow-work-debugfs.o obj-$(CONFIG_PERF_EVENTS) += perf_event.o ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) diff --git a/kernel/slow-work-debugfs.c b/kernel/slow-work-debugfs.c new file mode 100644 index 000000000000..e45c43645298 --- /dev/null +++ b/kernel/slow-work-debugfs.c @@ -0,0 +1,227 @@ +/* Slow work debugging + * + * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include "slow-work.h" + +#define ITERATOR_SHIFT (BITS_PER_LONG - 4) +#define ITERATOR_SELECTOR (0xfUL << ITERATOR_SHIFT) +#define ITERATOR_COUNTER (~ITERATOR_SELECTOR) + +void slow_work_new_thread_desc(struct slow_work *work, struct seq_file *m) +{ + seq_puts(m, "Slow-work: New thread"); +} + +/* + * Render the time mark field on a work item into a 5-char time with units plus + * a space + */ +static void slow_work_print_mark(struct seq_file *m, struct slow_work *work) +{ + struct timespec now, diff; + + now = CURRENT_TIME; + diff = timespec_sub(now, work->mark); + + if (diff.tv_sec < 0) + seq_puts(m, " -ve "); + else if (diff.tv_sec == 0 && diff.tv_nsec < 1000) + seq_printf(m, "%3luns ", diff.tv_nsec); + else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000) + seq_printf(m, "%3luus ", diff.tv_nsec / 1000); + else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000000) + seq_printf(m, "%3lums ", diff.tv_nsec / 1000000); + else if (diff.tv_sec <= 1) + seq_puts(m, " 1s "); + else if (diff.tv_sec < 60) + seq_printf(m, "%4lus ", diff.tv_sec); + else if (diff.tv_sec < 60 * 60) + seq_printf(m, "%4lum ", diff.tv_sec / 60); + else if (diff.tv_sec < 60 * 60 * 24) + seq_printf(m, "%4luh ", diff.tv_sec / 3600); + else + seq_puts(m, "exces "); +} + +/* + * Describe a slow work item for debugfs + */ +static int slow_work_runqueue_show(struct seq_file *m, void *v) +{ + struct slow_work *work; + struct list_head *p = v; + unsigned long id; + + switch ((unsigned long) v) { + case 1: + seq_puts(m, "THR PID ITEM ADDR FL MARK DESC\n"); + return 0; + case 2: + seq_puts(m, "=== ===== ================ == ===== ==========\n"); + return 0; + + case 3 ... 3 + SLOW_WORK_THREAD_LIMIT - 1: + id = (unsigned long) v - 3; + + read_lock(&slow_work_execs_lock); + work = slow_work_execs[id]; + if (work) { + smp_read_barrier_depends(); + + seq_printf(m, "%3lu %5d %16p %2lx ", + id, slow_work_pids[id], work, work->flags); + slow_work_print_mark(m, work); + + if (work->ops->desc) + work->ops->desc(work, m); + seq_putc(m, '\n'); + } + read_unlock(&slow_work_execs_lock); + return 0; + + default: + work = list_entry(p, struct slow_work, link); + seq_printf(m, "%3s - %16p %2lx ", + work->flags & SLOW_WORK_VERY_SLOW ? "vsq" : "sq", + work, work->flags); + slow_work_print_mark(m, work); + + if (work->ops->desc) + work->ops->desc(work, m); + seq_putc(m, '\n'); + return 0; + } +} + +/* + * map the iterator to a work item + */ +static void *slow_work_runqueue_index(struct seq_file *m, loff_t *_pos) +{ + struct list_head *p; + unsigned long count, id; + + switch (*_pos >> ITERATOR_SHIFT) { + case 0x0: + if (*_pos == 0) + *_pos = 1; + if (*_pos < 3) + return (void *)(unsigned long) *_pos; + if (*_pos < 3 + SLOW_WORK_THREAD_LIMIT) + for (id = *_pos - 3; + id < SLOW_WORK_THREAD_LIMIT; + id++, (*_pos)++) + if (slow_work_execs[id]) + return (void *)(unsigned long) *_pos; + *_pos = 0x1UL << ITERATOR_SHIFT; + + case 0x1: + count = *_pos & ITERATOR_COUNTER; + list_for_each(p, &slow_work_queue) { + if (count == 0) + return p; + count--; + } + *_pos = 0x2UL << ITERATOR_SHIFT; + + case 0x2: + count = *_pos & ITERATOR_COUNTER; + list_for_each(p, &vslow_work_queue) { + if (count == 0) + return p; + count--; + } + *_pos = 0x3UL << ITERATOR_SHIFT; + + default: + return NULL; + } +} + +/* + * set up the iterator to start reading from the first line + */ +static void *slow_work_runqueue_start(struct seq_file *m, loff_t *_pos) +{ + spin_lock_irq(&slow_work_queue_lock); + return slow_work_runqueue_index(m, _pos); +} + +/* + * move to the next line + */ +static void *slow_work_runqueue_next(struct seq_file *m, void *v, loff_t *_pos) +{ + struct list_head *p = v; + unsigned long selector = *_pos >> ITERATOR_SHIFT; + + (*_pos)++; + switch (selector) { + case 0x0: + return slow_work_runqueue_index(m, _pos); + + case 0x1: + if (*_pos >> ITERATOR_SHIFT == 0x1) { + p = p->next; + if (p != &slow_work_queue) + return p; + } + *_pos = 0x2UL << ITERATOR_SHIFT; + p = &vslow_work_queue; + + case 0x2: + if (*_pos >> ITERATOR_SHIFT == 0x2) { + p = p->next; + if (p != &vslow_work_queue) + return p; + } + *_pos = 0x3UL << ITERATOR_SHIFT; + + default: + return NULL; + } +} + +/* + * clean up after reading + */ +static void slow_work_runqueue_stop(struct seq_file *m, void *v) +{ + spin_unlock_irq(&slow_work_queue_lock); +} + +static const struct seq_operations slow_work_runqueue_ops = { + .start = slow_work_runqueue_start, + .stop = slow_work_runqueue_stop, + .next = slow_work_runqueue_next, + .show = slow_work_runqueue_show, +}; + +/* + * open "/sys/kernel/debug/slow_work/runqueue" to list queue contents + */ +static int slow_work_runqueue_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &slow_work_runqueue_ops); +} + +const struct file_operations slow_work_runqueue_fops = { + .owner = THIS_MODULE, + .open = slow_work_runqueue_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; diff --git a/kernel/slow-work-proc.c b/kernel/slow-work-proc.c deleted file mode 100644 index 3988032571f5..000000000000 --- a/kernel/slow-work-proc.c +++ /dev/null @@ -1,227 +0,0 @@ -/* Slow work debugging - * - * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include "slow-work.h" - -#define ITERATOR_SHIFT (BITS_PER_LONG - 4) -#define ITERATOR_SELECTOR (0xfUL << ITERATOR_SHIFT) -#define ITERATOR_COUNTER (~ITERATOR_SELECTOR) - -void slow_work_new_thread_desc(struct slow_work *work, struct seq_file *m) -{ - seq_puts(m, "Slow-work: New thread"); -} - -/* - * Render the time mark field on a work item into a 5-char time with units plus - * a space - */ -static void slow_work_print_mark(struct seq_file *m, struct slow_work *work) -{ - struct timespec now, diff; - - now = CURRENT_TIME; - diff = timespec_sub(now, work->mark); - - if (diff.tv_sec < 0) - seq_puts(m, " -ve "); - else if (diff.tv_sec == 0 && diff.tv_nsec < 1000) - seq_printf(m, "%3luns ", diff.tv_nsec); - else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000) - seq_printf(m, "%3luus ", diff.tv_nsec / 1000); - else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000000) - seq_printf(m, "%3lums ", diff.tv_nsec / 1000000); - else if (diff.tv_sec <= 1) - seq_puts(m, " 1s "); - else if (diff.tv_sec < 60) - seq_printf(m, "%4lus ", diff.tv_sec); - else if (diff.tv_sec < 60 * 60) - seq_printf(m, "%4lum ", diff.tv_sec / 60); - else if (diff.tv_sec < 60 * 60 * 24) - seq_printf(m, "%4luh ", diff.tv_sec / 3600); - else - seq_puts(m, "exces "); -} - -/* - * Describe a slow work item for /proc - */ -static int slow_work_runqueue_show(struct seq_file *m, void *v) -{ - struct slow_work *work; - struct list_head *p = v; - unsigned long id; - - switch ((unsigned long) v) { - case 1: - seq_puts(m, "THR PID ITEM ADDR FL MARK DESC\n"); - return 0; - case 2: - seq_puts(m, "=== ===== ================ == ===== ==========\n"); - return 0; - - case 3 ... 3 + SLOW_WORK_THREAD_LIMIT - 1: - id = (unsigned long) v - 3; - - read_lock(&slow_work_execs_lock); - work = slow_work_execs[id]; - if (work) { - smp_read_barrier_depends(); - - seq_printf(m, "%3lu %5d %16p %2lx ", - id, slow_work_pids[id], work, work->flags); - slow_work_print_mark(m, work); - - if (work->ops->desc) - work->ops->desc(work, m); - seq_putc(m, '\n'); - } - read_unlock(&slow_work_execs_lock); - return 0; - - default: - work = list_entry(p, struct slow_work, link); - seq_printf(m, "%3s - %16p %2lx ", - work->flags & SLOW_WORK_VERY_SLOW ? "vsq" : "sq", - work, work->flags); - slow_work_print_mark(m, work); - - if (work->ops->desc) - work->ops->desc(work, m); - seq_putc(m, '\n'); - return 0; - } -} - -/* - * map the iterator to a work item - */ -static void *slow_work_runqueue_index(struct seq_file *m, loff_t *_pos) -{ - struct list_head *p; - unsigned long count, id; - - switch (*_pos >> ITERATOR_SHIFT) { - case 0x0: - if (*_pos == 0) - *_pos = 1; - if (*_pos < 3) - return (void *)(unsigned long) *_pos; - if (*_pos < 3 + SLOW_WORK_THREAD_LIMIT) - for (id = *_pos - 3; - id < SLOW_WORK_THREAD_LIMIT; - id++, (*_pos)++) - if (slow_work_execs[id]) - return (void *)(unsigned long) *_pos; - *_pos = 0x1UL << ITERATOR_SHIFT; - - case 0x1: - count = *_pos & ITERATOR_COUNTER; - list_for_each(p, &slow_work_queue) { - if (count == 0) - return p; - count--; - } - *_pos = 0x2UL << ITERATOR_SHIFT; - - case 0x2: - count = *_pos & ITERATOR_COUNTER; - list_for_each(p, &vslow_work_queue) { - if (count == 0) - return p; - count--; - } - *_pos = 0x3UL << ITERATOR_SHIFT; - - default: - return NULL; - } -} - -/* - * set up the iterator to start reading from the first line - */ -static void *slow_work_runqueue_start(struct seq_file *m, loff_t *_pos) -{ - spin_lock_irq(&slow_work_queue_lock); - return slow_work_runqueue_index(m, _pos); -} - -/* - * move to the next line - */ -static void *slow_work_runqueue_next(struct seq_file *m, void *v, loff_t *_pos) -{ - struct list_head *p = v; - unsigned long selector = *_pos >> ITERATOR_SHIFT; - - (*_pos)++; - switch (selector) { - case 0x0: - return slow_work_runqueue_index(m, _pos); - - case 0x1: - if (*_pos >> ITERATOR_SHIFT == 0x1) { - p = p->next; - if (p != &slow_work_queue) - return p; - } - *_pos = 0x2UL << ITERATOR_SHIFT; - p = &vslow_work_queue; - - case 0x2: - if (*_pos >> ITERATOR_SHIFT == 0x2) { - p = p->next; - if (p != &vslow_work_queue) - return p; - } - *_pos = 0x3UL << ITERATOR_SHIFT; - - default: - return NULL; - } -} - -/* - * clean up after reading - */ -static void slow_work_runqueue_stop(struct seq_file *m, void *v) -{ - spin_unlock_irq(&slow_work_queue_lock); -} - -static const struct seq_operations slow_work_runqueue_ops = { - .start = slow_work_runqueue_start, - .stop = slow_work_runqueue_stop, - .next = slow_work_runqueue_next, - .show = slow_work_runqueue_show, -}; - -/* - * open "/proc/slow_work_rq" to list queue contents - */ -static int slow_work_runqueue_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &slow_work_runqueue_ops); -} - -const struct file_operations slow_work_runqueue_fops = { - .owner = THIS_MODULE, - .open = slow_work_runqueue_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; diff --git a/kernel/slow-work.c b/kernel/slow-work.c index b5c17f15f9de..00889bd3c590 100644 --- a/kernel/slow-work.c +++ b/kernel/slow-work.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include "slow-work.h" static void slow_work_cull_timeout(unsigned long); @@ -138,7 +138,7 @@ static void slow_work_clear_thread_processing(int id) {} /* * Data for tracking currently executing items for indication through /proc */ -#ifdef CONFIG_SLOW_WORK_PROC +#ifdef CONFIG_SLOW_WORK_DEBUG struct slow_work *slow_work_execs[SLOW_WORK_THREAD_LIMIT]; pid_t slow_work_pids[SLOW_WORK_THREAD_LIMIT]; DEFINE_RWLOCK(slow_work_execs_lock); @@ -823,7 +823,7 @@ static void slow_work_new_thread_execute(struct slow_work *work) static const struct slow_work_ops slow_work_new_thread_ops = { .owner = THIS_MODULE, .execute = slow_work_new_thread_execute, -#ifdef CONFIG_SLOW_WORK_PROC +#ifdef CONFIG_SLOW_WORK_DEBUG .desc = slow_work_new_thread_desc, #endif }; @@ -1055,9 +1055,15 @@ static int __init init_slow_work(void) if (slow_work_max_max_threads < nr_cpus * 2) slow_work_max_max_threads = nr_cpus * 2; #endif -#ifdef CONFIG_SLOW_WORK_PROC - proc_create("slow_work_rq", S_IFREG | 0400, NULL, - &slow_work_runqueue_fops); +#ifdef CONFIG_SLOW_WORK_DEBUG + { + struct dentry *dbdir; + + dbdir = debugfs_create_dir("slow_work", NULL); + if (dbdir && !IS_ERR(dbdir)) + debugfs_create_file("runqueue", S_IFREG | 0400, dbdir, + NULL, &slow_work_runqueue_fops); + } #endif return 0; } diff --git a/kernel/slow-work.h b/kernel/slow-work.h index 3c2f007f3ad6..321f3c59d732 100644 --- a/kernel/slow-work.h +++ b/kernel/slow-work.h @@ -19,7 +19,7 @@ /* * slow-work.c */ -#ifdef CONFIG_SLOW_WORK_PROC +#ifdef CONFIG_SLOW_WORK_DEBUG extern struct slow_work *slow_work_execs[]; extern pid_t slow_work_pids[]; extern rwlock_t slow_work_execs_lock; @@ -30,9 +30,9 @@ extern struct list_head vslow_work_queue; extern spinlock_t slow_work_queue_lock; /* - * slow-work-proc.c + * slow-work-debugfs.c */ -#ifdef CONFIG_SLOW_WORK_PROC +#ifdef CONFIG_SLOW_WORK_DEBUG extern const struct file_operations slow_work_runqueue_fops; extern void slow_work_new_thread_desc(struct slow_work *, struct seq_file *); -- cgit v1.2.3