diff options
Diffstat (limited to 'net/tipc/socket.c')
-rw-r--r-- | net/tipc/socket.c | 141 |
1 files changed, 68 insertions, 73 deletions
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 22909036b9bc..1b5fb6103ded 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -43,7 +43,7 @@ #include <linux/slab.h> #include <linux/poll.h> #include <linux/fcntl.h> -#include <asm/semaphore.h> +#include <linux/mutex.h> #include <asm/string.h> #include <asm/atomic.h> #include <net/sock.h> @@ -63,7 +63,7 @@ struct tipc_sock { struct sock sk; struct tipc_port *p; - struct semaphore sem; + struct mutex lock; }; #define tipc_sk(sk) ((struct tipc_sock*)sk) @@ -102,44 +102,6 @@ static void sock_unlock(struct tipc_sock* tsock) } /** - * pollmask - determine the current set of poll() events for a socket - * @sock: socket structure - * - * TIPC sets the returned events as follows: - * a) POLLRDNORM and POLLIN are set if the socket's receive queue is non-empty - * or if a connection-oriented socket is does not have an active connection - * (i.e. a read operation will not block). - * b) POLLOUT is set except when a socket's connection has been terminated - * (i.e. a write operation will not block). - * c) POLLHUP is set when a socket's connection has been terminated. - * - * IMPORTANT: The fact that a read or write operation will not block does NOT - * imply that the operation will succeed! - * - * Returns pollmask value - */ - -static u32 pollmask(struct socket *sock) -{ - u32 mask; - - if ((skb_queue_len(&sock->sk->sk_receive_queue) != 0) || - (sock->state == SS_UNCONNECTED) || - (sock->state == SS_DISCONNECTING)) - mask = (POLLRDNORM | POLLIN); - else - mask = 0; - - if (sock->state == SS_DISCONNECTING) - mask |= POLLHUP; - else - mask |= POLLOUT; - - return mask; -} - - -/** * advance_queue - discard first buffer in queue * @tsock: TIPC socket */ @@ -217,7 +179,7 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol) tsock->p = port; port->usr_handle = tsock; - init_MUTEX(&tsock->sem); + mutex_init(&tsock->lock); dbg("sock_create: %x\n",tsock); @@ -253,9 +215,9 @@ static int release(struct socket *sock) dbg("sock_delete: %x\n",tsock); if (!tsock) return 0; - down(&tsock->sem); + mutex_lock(&tsock->lock); if (!sock->sk) { - up(&tsock->sem); + mutex_unlock(&tsock->lock); return 0; } @@ -288,7 +250,7 @@ static int release(struct socket *sock) atomic_dec(&tipc_queue_size); } - up(&tsock->sem); + mutex_unlock(&tsock->lock); sock_put(sk); @@ -315,7 +277,7 @@ static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len) struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; int res; - if (down_interruptible(&tsock->sem)) + if (mutex_lock_interruptible(&tsock->lock)) return -ERESTARTSYS; if (unlikely(!uaddr_len)) { @@ -346,7 +308,7 @@ static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len) res = tipc_withdraw(tsock->p->ref, -addr->scope, &addr->addr.nameseq); exit: - up(&tsock->sem); + mutex_unlock(&tsock->lock); return res; } @@ -367,7 +329,7 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr, struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; u32 res; - if (down_interruptible(&tsock->sem)) + if (mutex_lock_interruptible(&tsock->lock)) return -ERESTARTSYS; *uaddr_len = sizeof(*addr); @@ -380,7 +342,7 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr, res = tipc_ownidentity(tsock->p->ref, &addr->addr.id); addr->addr.name.domain = 0; - up(&tsock->sem); + mutex_unlock(&tsock->lock); return res; } @@ -390,15 +352,47 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr, * @sock: socket for which to calculate the poll bits * @wait: ??? * - * Returns the pollmask + * Returns pollmask value + * + * COMMENTARY: + * It appears that the usual socket locking mechanisms are not useful here + * since the pollmask info is potentially out-of-date the moment this routine + * exits. TCP and other protocols seem to rely on higher level poll routines + * to handle any preventable race conditions, so TIPC will do the same ... + * + * TIPC sets the returned events as follows: + * a) POLLRDNORM and POLLIN are set if the socket's receive queue is non-empty + * or if a connection-oriented socket is does not have an active connection + * (i.e. a read operation will not block). + * b) POLLOUT is set except when a socket's connection has been terminated + * (i.e. a write operation will not block). + * c) POLLHUP is set when a socket's connection has been terminated. + * + * IMPORTANT: The fact that a read or write operation will not block does NOT + * imply that the operation will succeed! */ static unsigned int poll(struct file *file, struct socket *sock, poll_table *wait) { - poll_wait(file, sock->sk->sk_sleep, wait); - /* NEED LOCK HERE? */ - return pollmask(sock); + struct sock *sk = sock->sk; + u32 mask; + + poll_wait(file, sk->sk_sleep, wait); + + if (!skb_queue_empty(&sk->sk_receive_queue) || + (sock->state == SS_UNCONNECTED) || + (sock->state == SS_DISCONNECTING)) + mask = (POLLRDNORM | POLLIN); + else + mask = 0; + + if (sock->state == SS_DISCONNECTING) + mask |= POLLHUP; + else + mask |= POLLOUT; + + return mask; } /** @@ -477,7 +471,7 @@ static int send_msg(struct kiocb *iocb, struct socket *sock, } } - if (down_interruptible(&tsock->sem)) + if (mutex_lock_interruptible(&tsock->lock)) return -ERESTARTSYS; if (needs_conn) { @@ -523,7 +517,7 @@ static int send_msg(struct kiocb *iocb, struct socket *sock, } if (likely(res != -ELINKCONG)) { exit: - up(&tsock->sem); + mutex_unlock(&tsock->lock); return res; } if (m->msg_flags & MSG_DONTWAIT) { @@ -562,7 +556,7 @@ static int send_packet(struct kiocb *iocb, struct socket *sock, if (unlikely(dest)) return send_msg(iocb, sock, m, total_len); - if (down_interruptible(&tsock->sem)) { + if (mutex_lock_interruptible(&tsock->lock)) { return -ERESTARTSYS; } @@ -578,7 +572,7 @@ static int send_packet(struct kiocb *iocb, struct socket *sock, res = tipc_send(tsock->p->ref, m->msg_iovlen, m->msg_iov); if (likely(res != -ELINKCONG)) { exit: - up(&tsock->sem); + mutex_unlock(&tsock->lock); return res; } if (m->msg_flags & MSG_DONTWAIT) { @@ -846,7 +840,7 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock, /* Look for a message in receive queue; wait if necessary */ - if (unlikely(down_interruptible(&tsock->sem))) + if (unlikely(mutex_lock_interruptible(&tsock->lock))) return -ERESTARTSYS; restart: @@ -930,7 +924,7 @@ restart: advance_queue(tsock); } exit: - up(&tsock->sem); + mutex_unlock(&tsock->lock); return res; } @@ -981,7 +975,7 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock, /* Look for a message in receive queue; wait if necessary */ - if (unlikely(down_interruptible(&tsock->sem))) + if (unlikely(mutex_lock_interruptible(&tsock->lock))) return -ERESTARTSYS; restart: @@ -1077,7 +1071,7 @@ restart: goto restart; exit: - up(&tsock->sem); + mutex_unlock(&tsock->lock); return sz_copied ? sz_copied : res; } @@ -1293,7 +1287,7 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen, return res; } - if (down_interruptible(&tsock->sem)) + if (mutex_lock_interruptible(&tsock->lock)) return -ERESTARTSYS; /* Wait for destination's 'ACK' response */ @@ -1317,7 +1311,7 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen, sock->state = SS_DISCONNECTING; } - up(&tsock->sem); + mutex_unlock(&tsock->lock); return res; } @@ -1365,7 +1359,7 @@ static int accept(struct socket *sock, struct socket *newsock, int flags) (flags & O_NONBLOCK))) return -EWOULDBLOCK; - if (down_interruptible(&tsock->sem)) + if (mutex_lock_interruptible(&tsock->lock)) return -ERESTARTSYS; if (wait_event_interruptible(*sock->sk->sk_sleep, @@ -1375,7 +1369,7 @@ static int accept(struct socket *sock, struct socket *newsock, int flags) } buf = skb_peek(&sock->sk->sk_receive_queue); - res = tipc_create(sock->sk->sk_net, newsock, 0); + res = tipc_create(sock_net(sock->sk), newsock, 0); if (!res) { struct tipc_sock *new_tsock = tipc_sk(newsock->sk); struct tipc_portid id; @@ -1412,14 +1406,14 @@ static int accept(struct socket *sock, struct socket *newsock, int flags) } } exit: - up(&tsock->sem); + mutex_unlock(&tsock->lock); return res; } /** * shutdown - shutdown socket connection * @sock: socket structure - * @how: direction to close (unused; always treated as read + write) + * @how: direction to close (must be SHUT_RDWR) * * Terminates connection (if necessary), then purges socket's receive queue. * @@ -1432,9 +1426,10 @@ static int shutdown(struct socket *sock, int how) struct sk_buff *buf; int res; - /* Could return -EINVAL for an invalid "how", but why bother? */ + if (how != SHUT_RDWR) + return -EINVAL; - if (down_interruptible(&tsock->sem)) + if (mutex_lock_interruptible(&tsock->lock)) return -ERESTARTSYS; sock_lock(tsock); @@ -1484,7 +1479,7 @@ restart: sock_unlock(tsock); - up(&tsock->sem); + mutex_unlock(&tsock->lock); return res; } @@ -1518,7 +1513,7 @@ static int setsockopt(struct socket *sock, if ((res = get_user(value, (u32 __user *)ov))) return res; - if (down_interruptible(&tsock->sem)) + if (mutex_lock_interruptible(&tsock->lock)) return -ERESTARTSYS; switch (opt) { @@ -1541,7 +1536,7 @@ static int setsockopt(struct socket *sock, res = -EINVAL; } - up(&tsock->sem); + mutex_unlock(&tsock->lock); return res; } @@ -1574,7 +1569,7 @@ static int getsockopt(struct socket *sock, if ((res = get_user(len, ol))) return res; - if (down_interruptible(&tsock->sem)) + if (mutex_lock_interruptible(&tsock->lock)) return -ERESTARTSYS; switch (opt) { @@ -1607,7 +1602,7 @@ static int getsockopt(struct socket *sock, res = put_user(sizeof(value), ol); } - up(&tsock->sem); + mutex_unlock(&tsock->lock); return res; } |