summaryrefslogtreecommitdiff
path: root/net/core/sock.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-04-14 13:32:23 +0400
committerIngo Molnar <mingo@elte.hu>2009-04-14 13:32:30 +0400
commit05cfbd66d07c44865983c8b65ae9d0037d874206 (patch)
tree084b665cc97b47d1592fe76ea0a39a7753288a02 /net/core/sock.c
parent31c9a24ec82926fcae49483e53566d231e705057 (diff)
parentef631b0ca01655d24e9ca7e199262c4a46416a26 (diff)
downloadlinux-05cfbd66d07c44865983c8b65ae9d0037d874206.tar.xz
Merge branch 'core/urgent' into core/rcu
Merge reason: new patches to be queued up depend on: ef631b0: rcu: Make hierarchical RCU less IPI-happy Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'net/core/sock.c')
-rw-r--r--net/core/sock.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/net/core/sock.c b/net/core/sock.c
index 0620046e4eba..7dbf3ffb35cc 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1677,7 +1677,7 @@ static void sock_def_error_report(struct sock *sk)
{
read_lock(&sk->sk_callback_lock);
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible(sk->sk_sleep);
+ wake_up_interruptible_poll(sk->sk_sleep, POLLERR);
sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
read_unlock(&sk->sk_callback_lock);
}
@@ -1686,7 +1686,8 @@ static void sock_def_readable(struct sock *sk, int len)
{
read_lock(&sk->sk_callback_lock);
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible_sync(sk->sk_sleep);
+ wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN |
+ POLLRDNORM | POLLRDBAND);
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
read_unlock(&sk->sk_callback_lock);
}
@@ -1700,7 +1701,8 @@ static void sock_def_write_space(struct sock *sk)
*/
if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible_sync(sk->sk_sleep);
+ wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT |
+ POLLWRNORM | POLLWRBAND);
/* Should agree with poll, otherwise some programs break */
if (sock_writeable(sk))