summaryrefslogtreecommitdiff
path: root/net/ipv4/udp.c
diff options
context:
space:
mode:
authorYOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>2008-03-25 20:26:21 +0300
committerYOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>2008-03-25 22:39:55 +0300
commit3b1e0a655f8eba44ab1ee2a1068d169ccfb853b9 (patch)
tree09edb35f32ebcfb1b4dad904425128a110ef16ee /net/ipv4/udp.c
parentc346dca10840a874240c78efe3f39acf4312a1f2 (diff)
downloadlinux-3b1e0a655f8eba44ab1ee2a1068d169ccfb853b9.tar.xz
[NET] NETNS: Omit sock->sk_net without CONFIG_NET_NS.
Introduce per-sock inlines: sock_net(), sock_net_set() and per-inet_timewait_sock inlines: twsk_net(), twsk_net_set(). Without CONFIG_NET_NS, no namespace other than &init_net exists. Let's explicitly define them to help compiler optimizations. Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
Diffstat (limited to 'net/ipv4/udp.c')
-rw-r--r--net/ipv4/udp.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index e2cd93481359..76d52d37d6ac 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -137,7 +137,7 @@ static inline int __udp_lib_lport_inuse(struct net *net, __u16 num,
struct hlist_node *node;
sk_for_each(sk, node, &udptable[num & (UDP_HTABLE_SIZE - 1)])
- if (sk->sk_net == net && sk->sk_hash == num)
+ if (sock_net(sk) == net && sk->sk_hash == num)
return 1;
return 0;
}
@@ -158,7 +158,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
struct hlist_head *head;
struct sock *sk2;
int error = 1;
- struct net *net = sk->sk_net;
+ struct net *net = sock_net(sk);
write_lock_bh(&udp_hash_lock);
@@ -218,7 +218,7 @@ gotit:
sk_for_each(sk2, node, head)
if (sk2->sk_hash == snum &&
sk2 != sk &&
- sk2->sk_net == net &&
+ sock_net(sk2) == net &&
(!sk2->sk_reuse || !sk->sk_reuse) &&
(!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if
|| sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
@@ -269,7 +269,7 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) {
struct inet_sock *inet = inet_sk(sk);
- if (sk->sk_net == net && sk->sk_hash == hnum &&
+ if (sock_net(sk) == net && sk->sk_hash == hnum &&
!ipv6_only_sock(sk)) {
int score = (sk->sk_family == PF_INET ? 1 : 0);
if (inet->rcv_saddr) {
@@ -607,7 +607,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
ipc.oif = sk->sk_bound_dev_if;
if (msg->msg_controllen) {
- err = ip_cmsg_send(sk->sk_net, msg, &ipc);
+ err = ip_cmsg_send(sock_net(sk), msg, &ipc);
if (err)
return err;
if (ipc.opt)
@@ -656,7 +656,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
{ .sport = inet->sport,
.dport = dport } } };
security_sk_classify_flow(sk, &fl);
- err = ip_route_output_flow(sk->sk_net, &rt, &fl, sk, 1);
+ err = ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 1);
if (err) {
if (err == -ENETUNREACH)
IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
@@ -1511,7 +1511,7 @@ static struct sock *udp_get_first(struct seq_file *seq)
for (state->bucket = 0; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) {
struct hlist_node *node;
sk_for_each(sk, node, state->hashtable + state->bucket) {
- if (sk->sk_net != net)
+ if (sock_net(sk) != net)
continue;
if (sk->sk_family == state->family)
goto found;
@@ -1531,7 +1531,7 @@ static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
sk = sk_next(sk);
try_again:
;
- } while (sk && (sk->sk_net != net || sk->sk_family != state->family));
+ } while (sk && (sock_net(sk) != net || sk->sk_family != state->family));
if (!sk && ++state->bucket < UDP_HTABLE_SIZE) {
sk = sk_head(state->hashtable + state->bucket);