diff options
author | David S. Miller <davem@davemloft.net> | 2010-02-16 22:15:13 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-02-16 22:15:13 +0300 |
commit | 749f621e20ab0db35a15ff730088922603c809ba (patch) | |
tree | 2684d12199b58f2b9e0c5b7e6cc0ea3f002e611a /net | |
parent | 339c6e99853d2ef1f02ad8a313e079050a300427 (diff) | |
parent | 3e5e524ffb5fcf2447eb5dd9f8e54ad22dd9baa7 (diff) | |
download | linux-749f621e20ab0db35a15ff730088922603c809ba.tar.xz |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-next-2.6
Diffstat (limited to 'net')
84 files changed, 2053 insertions, 1713 deletions
diff --git a/net/bridge/netfilter/ebt_802_3.c b/net/bridge/netfilter/ebt_802_3.c index bd91dc58d49b..5d1176758ca5 100644 --- a/net/bridge/netfilter/ebt_802_3.c +++ b/net/bridge/netfilter/ebt_802_3.c @@ -52,7 +52,7 @@ static struct xt_match ebt_802_3_mt_reg __read_mostly = { .family = NFPROTO_BRIDGE, .match = ebt_802_3_mt, .checkentry = ebt_802_3_mt_check, - .matchsize = XT_ALIGN(sizeof(struct ebt_802_3_info)), + .matchsize = sizeof(struct ebt_802_3_info), .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebt_arp.c b/net/bridge/netfilter/ebt_arp.c index b7ad60419f9a..e727697c5847 100644 --- a/net/bridge/netfilter/ebt_arp.c +++ b/net/bridge/netfilter/ebt_arp.c @@ -120,7 +120,7 @@ static struct xt_match ebt_arp_mt_reg __read_mostly = { .family = NFPROTO_BRIDGE, .match = ebt_arp_mt, .checkentry = ebt_arp_mt_check, - .matchsize = XT_ALIGN(sizeof(struct ebt_arp_info)), + .matchsize = sizeof(struct ebt_arp_info), .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebt_arpreply.c b/net/bridge/netfilter/ebt_arpreply.c index 76584cd72e57..f392e9d93f53 100644 --- a/net/bridge/netfilter/ebt_arpreply.c +++ b/net/bridge/netfilter/ebt_arpreply.c @@ -78,7 +78,7 @@ static struct xt_target ebt_arpreply_tg_reg __read_mostly = { .hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_PRE_ROUTING), .target = ebt_arpreply_tg, .checkentry = ebt_arpreply_tg_check, - .targetsize = XT_ALIGN(sizeof(struct ebt_arpreply_info)), + .targetsize = sizeof(struct ebt_arpreply_info), .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebt_dnat.c b/net/bridge/netfilter/ebt_dnat.c index 6b49ea9e31fb..2bb40d728a35 100644 --- a/net/bridge/netfilter/ebt_dnat.c +++ b/net/bridge/netfilter/ebt_dnat.c @@ -54,7 +54,7 @@ static struct xt_target ebt_dnat_tg_reg __read_mostly = { (1 << NF_BR_LOCAL_OUT) | (1 << NF_BR_BROUTING), .target = ebt_dnat_tg, .checkentry = ebt_dnat_tg_check, - .targetsize = XT_ALIGN(sizeof(struct ebt_nat_info)), + .targetsize = sizeof(struct ebt_nat_info), .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebt_ip.c b/net/bridge/netfilter/ebt_ip.c index d771bbfbcbe6..5de6df6f86b8 100644 --- a/net/bridge/netfilter/ebt_ip.c +++ b/net/bridge/netfilter/ebt_ip.c @@ -110,7 +110,7 @@ static struct xt_match ebt_ip_mt_reg __read_mostly = { .family = NFPROTO_BRIDGE, .match = ebt_ip_mt, .checkentry = ebt_ip_mt_check, - .matchsize = XT_ALIGN(sizeof(struct ebt_ip_info)), + .matchsize = sizeof(struct ebt_ip_info), .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebt_ip6.c b/net/bridge/netfilter/ebt_ip6.c index 784a6573876c..bbf2534ef026 100644 --- a/net/bridge/netfilter/ebt_ip6.c +++ b/net/bridge/netfilter/ebt_ip6.c @@ -122,7 +122,7 @@ static struct xt_match ebt_ip6_mt_reg __read_mostly = { .family = NFPROTO_BRIDGE, .match = ebt_ip6_mt, .checkentry = ebt_ip6_mt_check, - .matchsize = XT_ALIGN(sizeof(struct ebt_ip6_info)), + .matchsize = sizeof(struct ebt_ip6_info), .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebt_limit.c b/net/bridge/netfilter/ebt_limit.c index f7bd9192ff0c..9dd16e6b10e7 100644 --- a/net/bridge/netfilter/ebt_limit.c +++ b/net/bridge/netfilter/ebt_limit.c @@ -90,7 +90,7 @@ static struct xt_match ebt_limit_mt_reg __read_mostly = { .family = NFPROTO_BRIDGE, .match = ebt_limit_mt, .checkentry = ebt_limit_mt_check, - .matchsize = XT_ALIGN(sizeof(struct ebt_limit_info)), + .matchsize = sizeof(struct ebt_limit_info), .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c index e4ea3fdd1d41..e873924ddb5d 100644 --- a/net/bridge/netfilter/ebt_log.c +++ b/net/bridge/netfilter/ebt_log.c @@ -195,7 +195,7 @@ static struct xt_target ebt_log_tg_reg __read_mostly = { .family = NFPROTO_BRIDGE, .target = ebt_log_tg, .checkentry = ebt_log_tg_check, - .targetsize = XT_ALIGN(sizeof(struct ebt_log_info)), + .targetsize = sizeof(struct ebt_log_info), .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebt_mark.c b/net/bridge/netfilter/ebt_mark.c index 2fee7e8e2e93..153e167374a2 100644 --- a/net/bridge/netfilter/ebt_mark.c +++ b/net/bridge/netfilter/ebt_mark.c @@ -59,7 +59,7 @@ static struct xt_target ebt_mark_tg_reg __read_mostly = { .family = NFPROTO_BRIDGE, .target = ebt_mark_tg, .checkentry = ebt_mark_tg_check, - .targetsize = XT_ALIGN(sizeof(struct ebt_mark_t_info)), + .targetsize = sizeof(struct ebt_mark_t_info), .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebt_mark_m.c b/net/bridge/netfilter/ebt_mark_m.c index ea570f214b1d..89abf4030399 100644 --- a/net/bridge/netfilter/ebt_mark_m.c +++ b/net/bridge/netfilter/ebt_mark_m.c @@ -41,7 +41,7 @@ static struct xt_match ebt_mark_mt_reg __read_mostly = { .family = NFPROTO_BRIDGE, .match = ebt_mark_mt, .checkentry = ebt_mark_mt_check, - .matchsize = XT_ALIGN(sizeof(struct ebt_mark_m_info)), + .matchsize = sizeof(struct ebt_mark_m_info), .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebt_nflog.c b/net/bridge/netfilter/ebt_nflog.c index 2a63d996dd4e..40dbd248b9ae 100644 --- a/net/bridge/netfilter/ebt_nflog.c +++ b/net/bridge/netfilter/ebt_nflog.c @@ -51,7 +51,7 @@ static struct xt_target ebt_nflog_tg_reg __read_mostly = { .family = NFPROTO_BRIDGE, .target = ebt_nflog_tg, .checkentry = ebt_nflog_tg_check, - .targetsize = XT_ALIGN(sizeof(struct ebt_nflog_info)), + .targetsize = sizeof(struct ebt_nflog_info), .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebt_pkttype.c b/net/bridge/netfilter/ebt_pkttype.c index 883e96e2a542..e2a07e6cbef3 100644 --- a/net/bridge/netfilter/ebt_pkttype.c +++ b/net/bridge/netfilter/ebt_pkttype.c @@ -36,7 +36,7 @@ static struct xt_match ebt_pkttype_mt_reg __read_mostly = { .family = NFPROTO_BRIDGE, .match = ebt_pkttype_mt, .checkentry = ebt_pkttype_mt_check, - .matchsize = XT_ALIGN(sizeof(struct ebt_pkttype_info)), + .matchsize = sizeof(struct ebt_pkttype_info), .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebt_redirect.c b/net/bridge/netfilter/ebt_redirect.c index c8a49f7a57ba..9be8fbcd370b 100644 --- a/net/bridge/netfilter/ebt_redirect.c +++ b/net/bridge/netfilter/ebt_redirect.c @@ -59,7 +59,7 @@ static struct xt_target ebt_redirect_tg_reg __read_mostly = { (1 << NF_BR_BROUTING), .target = ebt_redirect_tg, .checkentry = ebt_redirect_tg_check, - .targetsize = XT_ALIGN(sizeof(struct ebt_redirect_info)), + .targetsize = sizeof(struct ebt_redirect_info), .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebt_snat.c b/net/bridge/netfilter/ebt_snat.c index 8d04d4c302bd..9c7b520765a2 100644 --- a/net/bridge/netfilter/ebt_snat.c +++ b/net/bridge/netfilter/ebt_snat.c @@ -67,7 +67,7 @@ static struct xt_target ebt_snat_tg_reg __read_mostly = { .hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_POST_ROUTING), .target = ebt_snat_tg, .checkentry = ebt_snat_tg_check, - .targetsize = XT_ALIGN(sizeof(struct ebt_nat_info)), + .targetsize = sizeof(struct ebt_nat_info), .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebt_stp.c b/net/bridge/netfilter/ebt_stp.c index 75e29a9cebda..92a93d363765 100644 --- a/net/bridge/netfilter/ebt_stp.c +++ b/net/bridge/netfilter/ebt_stp.c @@ -177,7 +177,7 @@ static struct xt_match ebt_stp_mt_reg __read_mostly = { .family = NFPROTO_BRIDGE, .match = ebt_stp_mt, .checkentry = ebt_stp_mt_check, - .matchsize = XT_ALIGN(sizeof(struct ebt_stp_info)), + .matchsize = sizeof(struct ebt_stp_info), .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c index ce50688a6431..c6ac657074a6 100644 --- a/net/bridge/netfilter/ebt_ulog.c +++ b/net/bridge/netfilter/ebt_ulog.c @@ -275,7 +275,7 @@ static struct xt_target ebt_ulog_tg_reg __read_mostly = { .family = NFPROTO_BRIDGE, .target = ebt_ulog_tg, .checkentry = ebt_ulog_tg_check, - .targetsize = XT_ALIGN(sizeof(struct ebt_ulog_info)), + .targetsize = sizeof(struct ebt_ulog_info), .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebt_vlan.c b/net/bridge/netfilter/ebt_vlan.c index 3dddd489328e..be1dd2e1f615 100644 --- a/net/bridge/netfilter/ebt_vlan.c +++ b/net/bridge/netfilter/ebt_vlan.c @@ -163,7 +163,7 @@ static struct xt_match ebt_vlan_mt_reg __read_mostly = { .family = NFPROTO_BRIDGE, .match = ebt_vlan_mt, .checkentry = ebt_vlan_mt_check, - .matchsize = XT_ALIGN(sizeof(struct ebt_vlan_info)), + .matchsize = sizeof(struct ebt_vlan_info), .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c index d32ab13e728c..ae3f106c3908 100644 --- a/net/bridge/netfilter/ebtable_broute.c +++ b/net/bridge/netfilter/ebtable_broute.c @@ -71,7 +71,7 @@ static int __net_init broute_net_init(struct net *net) static void __net_exit broute_net_exit(struct net *net) { - ebt_unregister_table(net->xt.broute_table); + ebt_unregister_table(net, net->xt.broute_table); } static struct pernet_operations broute_net_ops = { diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c index 60b1a6ca7185..42e6bd094574 100644 --- a/net/bridge/netfilter/ebtable_filter.c +++ b/net/bridge/netfilter/ebtable_filter.c @@ -107,7 +107,7 @@ static int __net_init frame_filter_net_init(struct net *net) static void __net_exit frame_filter_net_exit(struct net *net) { - ebt_unregister_table(net->xt.frame_filter); + ebt_unregister_table(net, net->xt.frame_filter); } static struct pernet_operations frame_filter_net_ops = { diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c index 4a98804203b0..6dc2f878ae05 100644 --- a/net/bridge/netfilter/ebtable_nat.c +++ b/net/bridge/netfilter/ebtable_nat.c @@ -107,7 +107,7 @@ static int __net_init frame_nat_net_init(struct net *net) static void __net_exit frame_nat_net_exit(struct net *net) { - ebt_unregister_table(net->xt.frame_nat); + ebt_unregister_table(net, net->xt.frame_nat); } static struct pernet_operations frame_nat_net_ops = { diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 0b7f262cd148..4370e9680487 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -82,7 +82,8 @@ static inline int ebt_do_match (struct ebt_entry_match *m, return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH; } -static inline int ebt_dev_check(char *entry, const struct net_device *device) +static inline int +ebt_dev_check(const char *entry, const struct net_device *device) { int i = 0; const char *devname; @@ -100,8 +101,9 @@ static inline int ebt_dev_check(char *entry, const struct net_device *device) #define FWINV2(bool,invflg) ((bool) ^ !!(e->invflags & invflg)) /* process standard matches */ -static inline int ebt_basic_match(struct ebt_entry *e, struct ethhdr *h, - const struct net_device *in, const struct net_device *out) +static inline int +ebt_basic_match(const struct ebt_entry *e, const struct ethhdr *h, + const struct net_device *in, const struct net_device *out) { int verdict, i; @@ -156,12 +158,12 @@ unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb, int i, nentries; struct ebt_entry *point; struct ebt_counter *counter_base, *cb_base; - struct ebt_entry_target *t; + const struct ebt_entry_target *t; int verdict, sp = 0; struct ebt_chainstack *cs; struct ebt_entries *chaininfo; - char *base; - struct ebt_table_info *private; + const char *base; + const struct ebt_table_info *private; bool hotdrop = false; struct xt_match_param mtpar; struct xt_target_param tgpar; @@ -395,7 +397,7 @@ ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par, return 0; } -static int ebt_verify_pointers(struct ebt_replace *repl, +static int ebt_verify_pointers(const struct ebt_replace *repl, struct ebt_table_info *newinfo) { unsigned int limit = repl->entries_size; @@ -442,6 +444,8 @@ static int ebt_verify_pointers(struct ebt_replace *repl, break; if (left < e->next_offset) break; + if (e->next_offset < sizeof(struct ebt_entry)) + return -EINVAL; offset += e->next_offset; } } @@ -466,8 +470,8 @@ static int ebt_verify_pointers(struct ebt_replace *repl, * to parse the userspace data */ static inline int -ebt_check_entry_size_and_hooks(struct ebt_entry *e, - struct ebt_table_info *newinfo, +ebt_check_entry_size_and_hooks(const struct ebt_entry *e, + const struct ebt_table_info *newinfo, unsigned int *n, unsigned int *cnt, unsigned int *totalcnt, unsigned int *udc_cnt) { @@ -561,13 +565,14 @@ ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo, } static inline int -ebt_cleanup_match(struct ebt_entry_match *m, unsigned int *i) +ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i) { struct xt_mtdtor_param par; if (i && (*i)-- == 0) return 1; + par.net = net; par.match = m->u.match; par.matchinfo = m->data; par.family = NFPROTO_BRIDGE; @@ -578,13 +583,14 @@ ebt_cleanup_match(struct ebt_entry_match *m, unsigned int *i) } static inline int -ebt_cleanup_watcher(struct ebt_entry_watcher *w, unsigned int *i) +ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i) { struct xt_tgdtor_param par; if (i && (*i)-- == 0) return 1; + par.net = net; par.target = w->u.watcher; par.targinfo = w->data; par.family = NFPROTO_BRIDGE; @@ -595,7 +601,7 @@ ebt_cleanup_watcher(struct ebt_entry_watcher *w, unsigned int *i) } static inline int -ebt_cleanup_entry(struct ebt_entry *e, unsigned int *cnt) +ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt) { struct xt_tgdtor_param par; struct ebt_entry_target *t; @@ -605,10 +611,11 @@ ebt_cleanup_entry(struct ebt_entry *e, unsigned int *cnt) /* we're done */ if (cnt && (*cnt)-- == 0) return 1; - EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, NULL); - EBT_MATCH_ITERATE(e, ebt_cleanup_match, NULL); + EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL); + EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL); t = (struct ebt_entry_target *)(((char *)e) + e->target_offset); + par.net = net; par.target = t->u.target; par.targinfo = t->data; par.family = NFPROTO_BRIDGE; @@ -619,7 +626,8 @@ ebt_cleanup_entry(struct ebt_entry *e, unsigned int *cnt) } static inline int -ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo, +ebt_check_entry(struct ebt_entry *e, struct net *net, + const struct ebt_table_info *newinfo, const char *name, unsigned int *cnt, struct ebt_cl_stack *cl_s, unsigned int udc_cnt) { @@ -671,6 +679,7 @@ ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo, } i = 0; + mtpar.net = tgpar.net = net; mtpar.table = tgpar.table = name; mtpar.entryinfo = tgpar.entryinfo = e; mtpar.hook_mask = tgpar.hook_mask = hookmask; @@ -726,9 +735,9 @@ ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo, (*cnt)++; return 0; cleanup_watchers: - EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, &j); + EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j); cleanup_matches: - EBT_MATCH_ITERATE(e, ebt_cleanup_match, &i); + EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i); return ret; } @@ -737,12 +746,12 @@ cleanup_matches: * the hook mask for udc tells us from which base chains the udc can be * accessed. This mask is a parameter to the check() functions of the extensions */ -static int check_chainloops(struct ebt_entries *chain, struct ebt_cl_stack *cl_s, +static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s, unsigned int udc_cnt, unsigned int hooknr, char *base) { int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict; - struct ebt_entry *e = (struct ebt_entry *)chain->data; - struct ebt_entry_target *t; + const struct ebt_entry *e = (struct ebt_entry *)chain->data; + const struct ebt_entry_target *t; while (pos < nentries || chain_nr != -1) { /* end of udc, go back one 'recursion' step */ @@ -808,7 +817,8 @@ letscontinue: } /* do the parsing of the table/chains/entries/matches/watchers/targets, heh */ -static int translate_table(char *name, struct ebt_table_info *newinfo) +static int translate_table(struct net *net, const char *name, + struct ebt_table_info *newinfo) { unsigned int i, j, k, udc_cnt; int ret; @@ -917,17 +927,17 @@ static int translate_table(char *name, struct ebt_table_info *newinfo) /* used to know what we need to clean up if something goes wrong */ i = 0; ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, - ebt_check_entry, newinfo, name, &i, cl_s, udc_cnt); + ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt); if (ret != 0) { EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, - ebt_cleanup_entry, &i); + ebt_cleanup_entry, net, &i); } vfree(cl_s); return ret; } /* called under write_lock */ -static void get_counters(struct ebt_counter *oldcounters, +static void get_counters(const struct ebt_counter *oldcounters, struct ebt_counter *counters, unsigned int nentries) { int i, cpu; @@ -950,7 +960,8 @@ static void get_counters(struct ebt_counter *oldcounters, } /* replace the table */ -static int do_replace(struct net *net, void __user *user, unsigned int len) +static int do_replace(struct net *net, const void __user *user, + unsigned int len) { int ret, i, countersize; struct ebt_table_info *newinfo; @@ -1017,7 +1028,7 @@ static int do_replace(struct net *net, void __user *user, unsigned int len) if (ret != 0) goto free_counterstmp; - ret = translate_table(tmp.name, newinfo); + ret = translate_table(net, tmp.name, newinfo); if (ret != 0) goto free_counterstmp; @@ -1070,7 +1081,7 @@ static int do_replace(struct net *net, void __user *user, unsigned int len) /* decrease module count and free resources */ EBT_ENTRY_ITERATE(table->entries, table->entries_size, - ebt_cleanup_entry, NULL); + ebt_cleanup_entry, net, NULL); vfree(table->entries); if (table->chainstack) { @@ -1087,7 +1098,7 @@ free_unlock: mutex_unlock(&ebt_mutex); free_iterate: EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, - ebt_cleanup_entry, NULL); + ebt_cleanup_entry, net, NULL); free_counterstmp: vfree(counterstmp); /* can be initialized in translate_table() */ @@ -1154,7 +1165,7 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table) newinfo->hook_entry[i] = p + ((char *)repl->hook_entry[i] - repl->entries); } - ret = translate_table(repl->name, newinfo); + ret = translate_table(net, repl->name, newinfo); if (ret != 0) { BUGPRINT("Translate_table failed\n"); goto free_chainstack; @@ -1204,7 +1215,7 @@ out: return ERR_PTR(ret); } -void ebt_unregister_table(struct ebt_table *table) +void ebt_unregister_table(struct net *net, struct ebt_table *table) { int i; @@ -1216,7 +1227,7 @@ void ebt_unregister_table(struct ebt_table *table) list_del(&table->list); mutex_unlock(&ebt_mutex); EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size, - ebt_cleanup_entry, NULL); + ebt_cleanup_entry, net, NULL); if (table->private->nentries) module_put(table->me); vfree(table->private->entries); @@ -1230,7 +1241,8 @@ void ebt_unregister_table(struct ebt_table *table) } /* userspace just supplied us with counters */ -static int update_counters(struct net *net, void __user *user, unsigned int len) +static int update_counters(struct net *net, const void __user *user, + unsigned int len) { int i, ret; struct ebt_counter *tmp; @@ -1285,8 +1297,8 @@ free_tmp: return ret; } -static inline int ebt_make_matchname(struct ebt_entry_match *m, - char *base, char __user *ubase) +static inline int ebt_make_matchname(const struct ebt_entry_match *m, + const char *base, char __user *ubase) { char __user *hlp = ubase + ((char *)m - base); if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN)) @@ -1294,8 +1306,8 @@ static inline int ebt_make_matchname(struct ebt_entry_match *m, return 0; } -static inline int ebt_make_watchername(struct ebt_entry_watcher *w, - char *base, char __user *ubase) +static inline int ebt_make_watchername(const struct ebt_entry_watcher *w, + const char *base, char __user *ubase) { char __user *hlp = ubase + ((char *)w - base); if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN)) @@ -1303,11 +1315,12 @@ static inline int ebt_make_watchername(struct ebt_entry_watcher *w, return 0; } -static inline int ebt_make_names(struct ebt_entry *e, char *base, char __user *ubase) +static inline int +ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase) { int ret; char __user *hlp; - struct ebt_entry_target *t; + const struct ebt_entry_target *t; if (e->bitmask == 0) return 0; @@ -1328,10 +1341,11 @@ static inline int ebt_make_names(struct ebt_entry *e, char *base, char __user *u /* called with ebt_mutex locked */ static int copy_everything_to_user(struct ebt_table *t, void __user *user, - int *len, int cmd) + const int *len, int cmd) { struct ebt_replace tmp; - struct ebt_counter *counterstmp, *oldcounters; + struct ebt_counter *counterstmp; + const struct ebt_counter *oldcounters; unsigned int entries_size, nentries; char *entries; diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 90203e1b9187..4db5c1ece0f9 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -27,6 +27,7 @@ #include <linux/netfilter/x_tables.h> #include <linux/netfilter_arp/arp_tables.h> +#include "../../netfilter/xt_repldata.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); @@ -58,6 +59,12 @@ do { \ #define ARP_NF_ASSERT(x) #endif +void *arpt_alloc_initial_table(const struct xt_table *info) +{ + return xt_alloc_initial_table(arpt, ARPT); +} +EXPORT_SYMBOL_GPL(arpt_alloc_initial_table); + static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap, const char *hdr_addr, int len) { @@ -226,7 +233,14 @@ arpt_error(struct sk_buff *skb, const struct xt_target_param *par) return NF_DROP; } -static inline struct arpt_entry *get_entry(void *base, unsigned int offset) +static inline const struct arpt_entry_target * +arpt_get_target_c(const struct arpt_entry *e) +{ + return arpt_get_target((struct arpt_entry *)e); +} + +static inline struct arpt_entry * +get_entry(const void *base, unsigned int offset) { return (struct arpt_entry *)(base + offset); } @@ -273,7 +287,7 @@ unsigned int arpt_do_table(struct sk_buff *skb, arp = arp_hdr(skb); do { - struct arpt_entry_target *t; + const struct arpt_entry_target *t; int hdr_len; if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) { @@ -285,7 +299,7 @@ unsigned int arpt_do_table(struct sk_buff *skb, (2 * skb->dev->addr_len); ADD_COUNTER(e->counters, hdr_len, 1); - t = arpt_get_target(e); + t = arpt_get_target_c(e); /* Standard target? */ if (!t->u.kernel.target->target) { @@ -351,7 +365,7 @@ static inline bool unconditional(const struct arpt_arp *arp) /* Figures out from what hook each rule can be called: returns 0 if * there are loops. Puts hook bitmask in comefrom. */ -static int mark_source_chains(struct xt_table_info *newinfo, +static int mark_source_chains(const struct xt_table_info *newinfo, unsigned int valid_hooks, void *entry0) { unsigned int hook; @@ -372,7 +386,7 @@ static int mark_source_chains(struct xt_table_info *newinfo, for (;;) { const struct arpt_standard_target *t - = (void *)arpt_get_target(e); + = (void *)arpt_get_target_c(e); int visited = e->comefrom & (1 << hook); if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) { @@ -456,7 +470,7 @@ static int mark_source_chains(struct xt_table_info *newinfo, return 1; } -static inline int check_entry(struct arpt_entry *e, const char *name) +static inline int check_entry(const struct arpt_entry *e, const char *name) { const struct arpt_entry_target *t; @@ -468,7 +482,7 @@ static inline int check_entry(struct arpt_entry *e, const char *name) if (e->target_offset + sizeof(struct arpt_entry_target) > e->next_offset) return -EINVAL; - t = arpt_get_target(e); + t = arpt_get_target_c(e); if (e->target_offset + t->u.target_size > e->next_offset) return -EINVAL; @@ -533,14 +547,14 @@ out: return ret; } -static bool check_underflow(struct arpt_entry *e) +static bool check_underflow(const struct arpt_entry *e) { const struct arpt_entry_target *t; unsigned int verdict; if (!unconditional(&e->arp)) return false; - t = arpt_get_target(e); + t = arpt_get_target_c(e); if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) return false; verdict = ((struct arpt_standard_target *)t)->verdict; @@ -550,8 +564,8 @@ static bool check_underflow(struct arpt_entry *e) static inline int check_entry_size_and_hooks(struct arpt_entry *e, struct xt_table_info *newinfo, - unsigned char *base, - unsigned char *limit, + const unsigned char *base, + const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, unsigned int valid_hooks, @@ -761,11 +775,11 @@ static void get_counters(const struct xt_table_info *t, local_bh_enable(); } -static struct xt_counters *alloc_counters(struct xt_table *table) +static struct xt_counters *alloc_counters(const struct xt_table *table) { unsigned int countersize; struct xt_counters *counters; - struct xt_table_info *private = table->private; + const struct xt_table_info *private = table->private; /* We need atomic snapshot of counters: rest doesn't change * (other than comefrom, which userspace doesn't care @@ -783,11 +797,11 @@ static struct xt_counters *alloc_counters(struct xt_table *table) } static int copy_entries_to_user(unsigned int total_size, - struct xt_table *table, + const struct xt_table *table, void __user *userptr) { unsigned int off, num; - struct arpt_entry *e; + const struct arpt_entry *e; struct xt_counters *counters; struct xt_table_info *private = table->private; int ret = 0; @@ -807,7 +821,7 @@ static int copy_entries_to_user(unsigned int total_size, /* FIXME: use iterator macros --RR */ /* ... then go back and fix counters and names */ for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ - struct arpt_entry_target *t; + const struct arpt_entry_target *t; e = (struct arpt_entry *)(loc_cpu_entry + off); if (copy_to_user(userptr + off @@ -818,7 +832,7 @@ static int copy_entries_to_user(unsigned int total_size, goto free_counters; } - t = arpt_get_target(e); + t = arpt_get_target_c(e); if (copy_to_user(userptr + off + e->target_offset + offsetof(struct arpt_entry_target, u.user.name), @@ -835,7 +849,7 @@ static int copy_entries_to_user(unsigned int total_size, } #ifdef CONFIG_COMPAT -static void compat_standard_from_user(void *dst, void *src) +static void compat_standard_from_user(void *dst, const void *src) { int v = *(compat_int_t *)src; @@ -844,7 +858,7 @@ static void compat_standard_from_user(void *dst, void *src) memcpy(dst, &v, sizeof(v)); } -static int compat_standard_to_user(void __user *dst, void *src) +static int compat_standard_to_user(void __user *dst, const void *src) { compat_int_t cv = *(int *)src; @@ -853,18 +867,18 @@ static int compat_standard_to_user(void __user *dst, void *src) return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; } -static int compat_calc_entry(struct arpt_entry *e, +static int compat_calc_entry(const struct arpt_entry *e, const struct xt_table_info *info, - void *base, struct xt_table_info *newinfo) + const void *base, struct xt_table_info *newinfo) { - struct arpt_entry_target *t; + const struct arpt_entry_target *t; unsigned int entry_offset; int off, i, ret; off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); entry_offset = (void *)e - base; - t = arpt_get_target(e); + t = arpt_get_target_c(e); off += xt_compat_target_offset(t->u.kernel.target); newinfo->size -= off; ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off); @@ -900,7 +914,8 @@ static int compat_table_info(const struct xt_table_info *info, } #endif -static int get_info(struct net *net, void __user *user, int *len, int compat) +static int get_info(struct net *net, void __user *user, + const int *len, int compat) { char name[ARPT_TABLE_MAXNAMELEN]; struct xt_table *t; @@ -959,7 +974,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat) } static int get_entries(struct net *net, struct arpt_get_entries __user *uptr, - int *len) + const int *len) { int ret; struct arpt_get_entries get; @@ -1073,7 +1088,8 @@ static int __do_replace(struct net *net, const char *name, return ret; } -static int do_replace(struct net *net, void __user *user, unsigned int len) +static int do_replace(struct net *net, const void __user *user, + unsigned int len) { int ret; struct arpt_replace tmp; @@ -1133,8 +1149,8 @@ add_counter_to_entry(struct arpt_entry *e, return 0; } -static int do_add_counters(struct net *net, void __user *user, unsigned int len, - int compat) +static int do_add_counters(struct net *net, const void __user *user, + unsigned int len, int compat) { unsigned int i, curcpu; struct xt_counters_info tmp; @@ -1238,10 +1254,10 @@ static inline int check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, struct xt_table_info *newinfo, unsigned int *size, - unsigned char *base, - unsigned char *limit, - unsigned int *hook_entries, - unsigned int *underflows, + const unsigned char *base, + const unsigned char *limit, + const unsigned int *hook_entries, + const unsigned int *underflows, unsigned int *i, const char *name) { diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c index 97337601827a..bfe26f32b930 100644 --- a/net/ipv4/netfilter/arptable_filter.c +++ b/net/ipv4/netfilter/arptable_filter.c @@ -6,6 +6,7 @@ */ #include <linux/module.h> +#include <linux/netfilter/x_tables.h> #include <linux/netfilter_arp/arp_tables.h> MODULE_LICENSE("GPL"); @@ -15,93 +16,37 @@ MODULE_DESCRIPTION("arptables filter table"); #define FILTER_VALID_HOOKS ((1 << NF_ARP_IN) | (1 << NF_ARP_OUT) | \ (1 << NF_ARP_FORWARD)) -static const struct -{ - struct arpt_replace repl; - struct arpt_standard entries[3]; - struct arpt_error term; -} initial_table __net_initdata = { - .repl = { - .name = "filter", - .valid_hooks = FILTER_VALID_HOOKS, - .num_entries = 4, - .size = sizeof(struct arpt_standard) * 3 + sizeof(struct arpt_error), - .hook_entry = { - [NF_ARP_IN] = 0, - [NF_ARP_OUT] = sizeof(struct arpt_standard), - [NF_ARP_FORWARD] = 2 * sizeof(struct arpt_standard), - }, - .underflow = { - [NF_ARP_IN] = 0, - [NF_ARP_OUT] = sizeof(struct arpt_standard), - [NF_ARP_FORWARD] = 2 * sizeof(struct arpt_standard), - }, - }, - .entries = { - ARPT_STANDARD_INIT(NF_ACCEPT), /* ARP_IN */ - ARPT_STANDARD_INIT(NF_ACCEPT), /* ARP_OUT */ - ARPT_STANDARD_INIT(NF_ACCEPT), /* ARP_FORWARD */ - }, - .term = ARPT_ERROR_INIT, -}; - static const struct xt_table packet_filter = { .name = "filter", .valid_hooks = FILTER_VALID_HOOKS, .me = THIS_MODULE, .af = NFPROTO_ARP, + .priority = NF_IP_PRI_FILTER, }; /* The work comes in here from netfilter.c */ -static unsigned int arpt_in_hook(unsigned int hook, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +static unsigned int +arptable_filter_hook(unsigned int hook, struct sk_buff *skb, + const struct net_device *in, const struct net_device *out, + int (*okfn)(struct sk_buff *)) { - return arpt_do_table(skb, hook, in, out, - dev_net(in)->ipv4.arptable_filter); -} + const struct net *net = dev_net((in != NULL) ? in : out); -static unsigned int arpt_out_hook(unsigned int hook, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) -{ - return arpt_do_table(skb, hook, in, out, - dev_net(out)->ipv4.arptable_filter); + return arpt_do_table(skb, hook, in, out, net->ipv4.arptable_filter); } -static struct nf_hook_ops arpt_ops[] __read_mostly = { - { - .hook = arpt_in_hook, - .owner = THIS_MODULE, - .pf = NFPROTO_ARP, - .hooknum = NF_ARP_IN, - .priority = NF_IP_PRI_FILTER, - }, - { - .hook = arpt_out_hook, - .owner = THIS_MODULE, - .pf = NFPROTO_ARP, - .hooknum = NF_ARP_OUT, - .priority = NF_IP_PRI_FILTER, - }, - { - .hook = arpt_in_hook, - .owner = THIS_MODULE, - .pf = NFPROTO_ARP, - .hooknum = NF_ARP_FORWARD, - .priority = NF_IP_PRI_FILTER, - }, -}; +static struct nf_hook_ops *arpfilter_ops __read_mostly; static int __net_init arptable_filter_net_init(struct net *net) { - /* Register table */ + struct arpt_replace *repl; + + repl = arpt_alloc_initial_table(&packet_filter); + if (repl == NULL) + return -ENOMEM; net->ipv4.arptable_filter = - arpt_register_table(net, &packet_filter, &initial_table.repl); + arpt_register_table(net, &packet_filter, repl); + kfree(repl); if (IS_ERR(net->ipv4.arptable_filter)) return PTR_ERR(net->ipv4.arptable_filter); return 0; @@ -125,9 +70,11 @@ static int __init arptable_filter_init(void) if (ret < 0) return ret; - ret = nf_register_hooks(arpt_ops, ARRAY_SIZE(arpt_ops)); - if (ret < 0) + arpfilter_ops = xt_hook_link(&packet_filter, arptable_filter_hook); + if (IS_ERR(arpfilter_ops)) { + ret = PTR_ERR(arpfilter_ops); goto cleanup_table; + } return ret; cleanup_table: @@ -137,7 +84,7 @@ cleanup_table: static void __exit arptable_filter_fini(void) { - nf_unregister_hooks(arpt_ops, ARRAY_SIZE(arpt_ops)); + xt_hook_unlink(&packet_filter, arpfilter_ops); unregister_pernet_subsys(&arptable_filter_net_ops); } diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 3ce53cf13d5a..e94c18bdfc68 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -28,6 +28,7 @@ #include <linux/netfilter/x_tables.h> #include <linux/netfilter_ipv4/ip_tables.h> #include <net/netfilter/nf_log.h> +#include "../../netfilter/xt_repldata.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); @@ -66,6 +67,12 @@ do { \ #define inline #endif +void *ipt_alloc_initial_table(const struct xt_table *info) +{ + return xt_alloc_initial_table(ipt, IPT); +} +EXPORT_SYMBOL_GPL(ipt_alloc_initial_table); + /* We keep a set of rules for each CPU, so we can avoid write-locking them in the softirq when updating the counters and therefore @@ -169,7 +176,7 @@ ipt_error(struct sk_buff *skb, const struct xt_target_param *par) /* Performance critical - called for every packet */ static inline bool -do_match(struct ipt_entry_match *m, const struct sk_buff *skb, +do_match(const struct ipt_entry_match *m, const struct sk_buff *skb, struct xt_match_param *par) { par->match = m->u.kernel.match; @@ -184,7 +191,7 @@ do_match(struct ipt_entry_match *m, const struct sk_buff *skb, /* Performance critical */ static inline struct ipt_entry * -get_entry(void *base, unsigned int offset) +get_entry(const void *base, unsigned int offset) { return (struct ipt_entry *)(base + offset); } @@ -199,6 +206,13 @@ static inline bool unconditional(const struct ipt_ip *ip) #undef FWINV } +/* for const-correctness */ +static inline const struct ipt_entry_target * +ipt_get_target_c(const struct ipt_entry *e) +{ + return ipt_get_target((struct ipt_entry *)e); +} + #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) static const char *const hooknames[] = { @@ -233,11 +247,11 @@ static struct nf_loginfo trace_loginfo = { /* Mildly perf critical (only if packet tracing is on) */ static inline int -get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e, +get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e, const char *hookname, const char **chainname, const char **comment, unsigned int *rulenum) { - struct ipt_standard_target *t = (void *)ipt_get_target(s); + const struct ipt_standard_target *t = (void *)ipt_get_target_c(s); if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) { /* Head of user chain: ERROR target with chainname */ @@ -263,15 +277,15 @@ get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e, return 0; } -static void trace_packet(struct sk_buff *skb, +static void trace_packet(const struct sk_buff *skb, unsigned int hook, const struct net_device *in, const struct net_device *out, const char *tablename, - struct xt_table_info *private, - struct ipt_entry *e) + const struct xt_table_info *private, + const struct ipt_entry *e) { - void *table_base; + const void *table_base; const struct ipt_entry *root; const char *hookname, *chainname, *comment; unsigned int rulenum = 0; @@ -315,9 +329,9 @@ ipt_do_table(struct sk_buff *skb, /* Initializing verdict to NF_DROP keeps gcc happy. */ unsigned int verdict = NF_DROP; const char *indev, *outdev; - void *table_base; + const void *table_base; struct ipt_entry *e, *back; - struct xt_table_info *private; + const struct xt_table_info *private; struct xt_match_param mtpar; struct xt_target_param tgpar; @@ -350,7 +364,7 @@ ipt_do_table(struct sk_buff *skb, back = get_entry(table_base, private->underflow[hook]); do { - struct ipt_entry_target *t; + const struct ipt_entry_target *t; IP_NF_ASSERT(e); IP_NF_ASSERT(back); @@ -443,7 +457,7 @@ ipt_do_table(struct sk_buff *skb, /* Figures out from what hook each rule can be called: returns 0 if there are loops. Puts hook bitmask in comefrom. */ static int -mark_source_chains(struct xt_table_info *newinfo, +mark_source_chains(const struct xt_table_info *newinfo, unsigned int valid_hooks, void *entry0) { unsigned int hook; @@ -461,8 +475,8 @@ mark_source_chains(struct xt_table_info *newinfo, e->counters.pcnt = pos; for (;;) { - struct ipt_standard_target *t - = (void *)ipt_get_target(e); + const struct ipt_standard_target *t + = (void *)ipt_get_target_c(e); int visited = e->comefrom & (1 << hook); if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { @@ -553,13 +567,14 @@ mark_source_chains(struct xt_table_info *newinfo, } static int -cleanup_match(struct ipt_entry_match *m, unsigned int *i) +cleanup_match(struct ipt_entry_match *m, struct net *net, unsigned int *i) { struct xt_mtdtor_param par; if (i && (*i)-- == 0) return 1; + par.net = net; par.match = m->u.kernel.match; par.matchinfo = m->data; par.family = NFPROTO_IPV4; @@ -570,9 +585,9 @@ cleanup_match(struct ipt_entry_match *m, unsigned int *i) } static int -check_entry(struct ipt_entry *e, const char *name) +check_entry(const struct ipt_entry *e, const char *name) { - struct ipt_entry_target *t; + const struct ipt_entry_target *t; if (!ip_checkentry(&e->ip)) { duprintf("ip_tables: ip check failed %p %s.\n", e, name); @@ -583,7 +598,7 @@ check_entry(struct ipt_entry *e, const char *name) e->next_offset) return -EINVAL; - t = ipt_get_target(e); + t = ipt_get_target_c(e); if (e->target_offset + t->u.target_size > e->next_offset) return -EINVAL; @@ -637,10 +652,11 @@ err: return ret; } -static int check_target(struct ipt_entry *e, const char *name) +static int check_target(struct ipt_entry *e, struct net *net, const char *name) { struct ipt_entry_target *t = ipt_get_target(e); struct xt_tgchk_param par = { + .net = net, .table = name, .entryinfo = e, .target = t->u.kernel.target, @@ -661,8 +677,8 @@ static int check_target(struct ipt_entry *e, const char *name) } static int -find_check_entry(struct ipt_entry *e, const char *name, unsigned int size, - unsigned int *i) +find_check_entry(struct ipt_entry *e, struct net *net, const char *name, + unsigned int size, unsigned int *i) { struct ipt_entry_target *t; struct xt_target *target; @@ -675,6 +691,7 @@ find_check_entry(struct ipt_entry *e, const char *name, unsigned int size, return ret; j = 0; + mtpar.net = net; mtpar.table = name; mtpar.entryinfo = &e->ip; mtpar.hook_mask = e->comefrom; @@ -695,7 +712,7 @@ find_check_entry(struct ipt_entry *e, const char *name, unsigned int size, } t->u.kernel.target = target; - ret = check_target(e, name); + ret = check_target(e, net, name); if (ret) goto err; @@ -704,18 +721,18 @@ find_check_entry(struct ipt_entry *e, const char *name, unsigned int size, err: module_put(t->u.kernel.target->me); cleanup_matches: - IPT_MATCH_ITERATE(e, cleanup_match, &j); + IPT_MATCH_ITERATE(e, cleanup_match, net, &j); return ret; } -static bool check_underflow(struct ipt_entry *e) +static bool check_underflow(const struct ipt_entry *e) { const struct ipt_entry_target *t; unsigned int verdict; if (!unconditional(&e->ip)) return false; - t = ipt_get_target(e); + t = ipt_get_target_c(e); if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) return false; verdict = ((struct ipt_standard_target *)t)->verdict; @@ -726,8 +743,8 @@ static bool check_underflow(struct ipt_entry *e) static int check_entry_size_and_hooks(struct ipt_entry *e, struct xt_table_info *newinfo, - unsigned char *base, - unsigned char *limit, + const unsigned char *base, + const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, unsigned int valid_hooks, @@ -774,7 +791,7 @@ check_entry_size_and_hooks(struct ipt_entry *e, } static int -cleanup_entry(struct ipt_entry *e, unsigned int *i) +cleanup_entry(struct ipt_entry *e, struct net *net, unsigned int *i) { struct xt_tgdtor_param par; struct ipt_entry_target *t; @@ -783,9 +800,10 @@ cleanup_entry(struct ipt_entry *e, unsigned int *i) return 1; /* Cleanup all matches */ - IPT_MATCH_ITERATE(e, cleanup_match, NULL); + IPT_MATCH_ITERATE(e, cleanup_match, net, NULL); t = ipt_get_target(e); + par.net = net; par.target = t->u.kernel.target; par.targinfo = t->data; par.family = NFPROTO_IPV4; @@ -798,7 +816,8 @@ cleanup_entry(struct ipt_entry *e, unsigned int *i) /* Checks and translates the user-supplied table segment (held in newinfo) */ static int -translate_table(const char *name, +translate_table(struct net *net, + const char *name, unsigned int valid_hooks, struct xt_table_info *newinfo, void *entry0, @@ -860,11 +879,11 @@ translate_table(const char *name, /* Finally, each sanity check must pass */ i = 0; ret = IPT_ENTRY_ITERATE(entry0, newinfo->size, - find_check_entry, name, size, &i); + find_check_entry, net, name, size, &i); if (ret != 0) { IPT_ENTRY_ITERATE(entry0, newinfo->size, - cleanup_entry, &i); + cleanup_entry, net, &i); return ret; } @@ -940,11 +959,11 @@ get_counters(const struct xt_table_info *t, local_bh_enable(); } -static struct xt_counters * alloc_counters(struct xt_table *table) +static struct xt_counters *alloc_counters(const struct xt_table *table) { unsigned int countersize; struct xt_counters *counters; - struct xt_table_info *private = table->private; + const struct xt_table_info *private = table->private; /* We need atomic snapshot of counters: rest doesn't change (other than comefrom, which userspace doesn't care @@ -962,11 +981,11 @@ static struct xt_counters * alloc_counters(struct xt_table *table) static int copy_entries_to_user(unsigned int total_size, - struct xt_table *table, + const struct xt_table *table, void __user *userptr) { unsigned int off, num; - struct ipt_entry *e; + const struct ipt_entry *e; struct xt_counters *counters; const struct xt_table_info *private = table->private; int ret = 0; @@ -1018,7 +1037,7 @@ copy_entries_to_user(unsigned int total_size, } } - t = ipt_get_target(e); + t = ipt_get_target_c(e); if (copy_to_user(userptr + off + e->target_offset + offsetof(struct ipt_entry_target, u.user.name), @@ -1035,7 +1054,7 @@ copy_entries_to_user(unsigned int total_size, } #ifdef CONFIG_COMPAT -static void compat_standard_from_user(void *dst, void *src) +static void compat_standard_from_user(void *dst, const void *src) { int v = *(compat_int_t *)src; @@ -1044,7 +1063,7 @@ static void compat_standard_from_user(void *dst, void *src) memcpy(dst, &v, sizeof(v)); } -static int compat_standard_to_user(void __user *dst, void *src) +static int compat_standard_to_user(void __user *dst, const void *src) { compat_int_t cv = *(int *)src; @@ -1054,24 +1073,24 @@ static int compat_standard_to_user(void __user *dst, void *src) } static inline int -compat_calc_match(struct ipt_entry_match *m, int *size) +compat_calc_match(const struct ipt_entry_match *m, int *size) { *size += xt_compat_match_offset(m->u.kernel.match); return 0; } -static int compat_calc_entry(struct ipt_entry *e, +static int compat_calc_entry(const struct ipt_entry *e, const struct xt_table_info *info, - void *base, struct xt_table_info *newinfo) + const void *base, struct xt_table_info *newinfo) { - struct ipt_entry_target *t; + const struct ipt_entry_target *t; unsigned int entry_offset; int off, i, ret; off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); entry_offset = (void *)e - base; IPT_MATCH_ITERATE(e, compat_calc_match, &off); - t = ipt_get_target(e); + t = ipt_get_target_c(e); off += xt_compat_target_offset(t->u.kernel.target); newinfo->size -= off; ret = xt_compat_add_offset(AF_INET, entry_offset, off); @@ -1107,7 +1126,8 @@ static int compat_table_info(const struct xt_table_info *info, } #endif -static int get_info(struct net *net, void __user *user, int *len, int compat) +static int get_info(struct net *net, void __user *user, + const int *len, int compat) { char name[IPT_TABLE_MAXNAMELEN]; struct xt_table *t; @@ -1167,7 +1187,8 @@ static int get_info(struct net *net, void __user *user, int *len, int compat) } static int -get_entries(struct net *net, struct ipt_get_entries __user *uptr, int *len) +get_entries(struct net *net, struct ipt_get_entries __user *uptr, + const int *len) { int ret; struct ipt_get_entries get; @@ -1258,7 +1279,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, /* Decrease module usage counts and free resource */ loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, - NULL); + net, NULL); xt_free_table_info(oldinfo); if (copy_to_user(counters_ptr, counters, sizeof(struct xt_counters) * num_counters) != 0) @@ -1277,7 +1298,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, } static int -do_replace(struct net *net, void __user *user, unsigned int len) +do_replace(struct net *net, const void __user *user, unsigned int len) { int ret; struct ipt_replace tmp; @@ -1303,7 +1324,7 @@ do_replace(struct net *net, void __user *user, unsigned int len) goto free_newinfo; } - ret = translate_table(tmp.name, tmp.valid_hooks, + ret = translate_table(net, tmp.name, tmp.valid_hooks, newinfo, loc_cpu_entry, tmp.size, tmp.num_entries, tmp.hook_entry, tmp.underflow); if (ret != 0) @@ -1318,7 +1339,7 @@ do_replace(struct net *net, void __user *user, unsigned int len) return 0; free_newinfo_untrans: - IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL); + IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, net, NULL); free_newinfo: xt_free_table_info(newinfo); return ret; @@ -1338,7 +1359,8 @@ add_counter_to_entry(struct ipt_entry *e, } static int -do_add_counters(struct net *net, void __user *user, unsigned int len, int compat) +do_add_counters(struct net *net, const void __user *user, + unsigned int len, int compat) { unsigned int i, curcpu; struct xt_counters_info tmp; @@ -1534,10 +1556,10 @@ static int check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, struct xt_table_info *newinfo, unsigned int *size, - unsigned char *base, - unsigned char *limit, - unsigned int *hook_entries, - unsigned int *underflows, + const unsigned char *base, + const unsigned char *limit, + const unsigned int *hook_entries, + const unsigned int *underflows, unsigned int *i, const char *name) { @@ -1655,7 +1677,7 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr, } static int -compat_check_entry(struct ipt_entry *e, const char *name, +compat_check_entry(struct ipt_entry *e, struct net *net, const char *name, unsigned int *i) { struct xt_mtchk_param mtpar; @@ -1663,6 +1685,7 @@ compat_check_entry(struct ipt_entry *e, const char *name, int ret; j = 0; + mtpar.net = net; mtpar.table = name; mtpar.entryinfo = &e->ip; mtpar.hook_mask = e->comefrom; @@ -1671,7 +1694,7 @@ compat_check_entry(struct ipt_entry *e, const char *name, if (ret) goto cleanup_matches; - ret = check_target(e, name); + ret = check_target(e, net, name); if (ret) goto cleanup_matches; @@ -1679,12 +1702,13 @@ compat_check_entry(struct ipt_entry *e, const char *name, return 0; cleanup_matches: - IPT_MATCH_ITERATE(e, cleanup_match, &j); + IPT_MATCH_ITERATE(e, cleanup_match, net, &j); return ret; } static int -translate_compat_table(const char *name, +translate_compat_table(struct net *net, + const char *name, unsigned int valid_hooks, struct xt_table_info **pinfo, void **pentry0, @@ -1773,12 +1797,12 @@ translate_compat_table(const char *name, i = 0; ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry, - name, &i); + net, name, &i); if (ret) { j -= i; COMPAT_IPT_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i, compat_release_entry, &j); - IPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i); + IPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, net, &i); xt_free_table_info(newinfo); return ret; } @@ -1833,7 +1857,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) goto free_newinfo; } - ret = translate_compat_table(tmp.name, tmp.valid_hooks, + ret = translate_compat_table(net, tmp.name, tmp.valid_hooks, &newinfo, &loc_cpu_entry, tmp.size, tmp.num_entries, tmp.hook_entry, tmp.underflow); @@ -1849,7 +1873,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) return 0; free_newinfo_untrans: - IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL); + IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, net, NULL); free_newinfo: xt_free_table_info(newinfo); return ret; @@ -2086,7 +2110,7 @@ struct xt_table *ipt_register_table(struct net *net, loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; memcpy(loc_cpu_entry, repl->entries, repl->size); - ret = translate_table(table->name, table->valid_hooks, + ret = translate_table(net, table->name, table->valid_hooks, newinfo, loc_cpu_entry, repl->size, repl->num_entries, repl->hook_entry, @@ -2108,7 +2132,7 @@ out: return ERR_PTR(ret); } -void ipt_unregister_table(struct xt_table *table) +void ipt_unregister_table(struct net *net, struct xt_table *table) { struct xt_table_info *private; void *loc_cpu_entry; @@ -2118,7 +2142,7 @@ void ipt_unregister_table(struct xt_table *table) /* Decrease module usage counts and free resources */ loc_cpu_entry = private->entries[raw_smp_processor_id()]; - IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL); + IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, net, NULL); if (private->number > private->initial_entries) module_put(table_owner); xt_free_table_info(private); diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 40ca2d240abb..0886f96c736b 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -560,8 +560,7 @@ struct clusterip_seq_position { static void *clusterip_seq_start(struct seq_file *s, loff_t *pos) { - const struct proc_dir_entry *pde = s->private; - struct clusterip_config *c = pde->data; + struct clusterip_config *c = s->private; unsigned int weight; u_int32_t local_nodes; struct clusterip_seq_position *idx; @@ -632,10 +631,9 @@ static int clusterip_proc_open(struct inode *inode, struct file *file) if (!ret) { struct seq_file *sf = file->private_data; - struct proc_dir_entry *pde = PDE(inode); - struct clusterip_config *c = pde->data; + struct clusterip_config *c = PDE(inode)->data; - sf->private = pde; + sf->private = c; clusterip_config_get(c); } @@ -645,8 +643,7 @@ static int clusterip_proc_open(struct inode *inode, struct file *file) static int clusterip_proc_release(struct inode *inode, struct file *file) { - struct proc_dir_entry *pde = PDE(inode); - struct clusterip_config *c = pde->data; + struct clusterip_config *c = PDE(inode)->data; int ret; ret = seq_release(inode, file); @@ -660,10 +657,9 @@ static int clusterip_proc_release(struct inode *inode, struct file *file) static ssize_t clusterip_proc_write(struct file *file, const char __user *input, size_t size, loff_t *ofs) { + struct clusterip_config *c = PDE(file->f_path.dentry->d_inode)->data; #define PROC_WRITELEN 10 char buffer[PROC_WRITELEN+1]; - const struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); - struct clusterip_config *c = pde->data; unsigned long nodenum; if (copy_from_user(buffer, input, PROC_WRITELEN)) diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c index 399061c3fd7d..09a5d3f7cc41 100644 --- a/net/ipv4/netfilter/ipt_ULOG.c +++ b/net/ipv4/netfilter/ipt_ULOG.c @@ -338,7 +338,7 @@ struct compat_ipt_ulog_info { char prefix[ULOG_PREFIX_LEN]; }; -static void ulog_tg_compat_from_user(void *dst, void *src) +static void ulog_tg_compat_from_user(void *dst, const void *src) { const struct compat_ipt_ulog_info *cl = src; struct ipt_ulog_info l = { @@ -351,7 +351,7 @@ static void ulog_tg_compat_from_user(void *dst, void *src) memcpy(dst, &l, sizeof(l)); } -static int ulog_tg_compat_to_user(void __user *dst, void *src) +static int ulog_tg_compat_to_user(void __user *dst, const void *src) { const struct ipt_ulog_info *l = src; struct compat_ipt_ulog_info cl = { diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c index df566cbd68e5..c8dc9800d620 100644 --- a/net/ipv4/netfilter/iptable_filter.c +++ b/net/ipv4/netfilter/iptable_filter.c @@ -23,104 +23,32 @@ MODULE_DESCRIPTION("iptables filter table"); (1 << NF_INET_FORWARD) | \ (1 << NF_INET_LOCAL_OUT)) -static struct -{ - struct ipt_replace repl; - struct ipt_standard entries[3]; - struct ipt_error term; -} initial_table __net_initdata = { - .repl = { - .name = "filter", - .valid_hooks = FILTER_VALID_HOOKS, - .num_entries = 4, - .size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error), - .hook_entry = { - [NF_INET_LOCAL_IN] = 0, - [NF_INET_FORWARD] = sizeof(struct ipt_standard), - [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2, - }, - .underflow = { - [NF_INET_LOCAL_IN] = 0, - [NF_INET_FORWARD] = sizeof(struct ipt_standard), - [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2, - }, - }, - .entries = { - IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */ - IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */ - IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */ - }, - .term = IPT_ERROR_INIT, /* ERROR */ -}; - static const struct xt_table packet_filter = { .name = "filter", .valid_hooks = FILTER_VALID_HOOKS, .me = THIS_MODULE, .af = NFPROTO_IPV4, + .priority = NF_IP_PRI_FILTER, }; -/* The work comes in here from netfilter.c. */ -static unsigned int -ipt_local_in_hook(unsigned int hook, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) -{ - return ipt_do_table(skb, hook, in, out, - dev_net(in)->ipv4.iptable_filter); -} - static unsigned int -ipt_hook(unsigned int hook, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +iptable_filter_hook(unsigned int hook, struct sk_buff *skb, + const struct net_device *in, const struct net_device *out, + int (*okfn)(struct sk_buff *)) { - return ipt_do_table(skb, hook, in, out, - dev_net(in)->ipv4.iptable_filter); -} + const struct net *net; -static unsigned int -ipt_local_out_hook(unsigned int hook, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) -{ - /* root is playing with raw sockets. */ - if (skb->len < sizeof(struct iphdr) || - ip_hdrlen(skb) < sizeof(struct iphdr)) + if (hook == NF_INET_LOCAL_OUT && + (skb->len < sizeof(struct iphdr) || + ip_hdrlen(skb) < sizeof(struct iphdr))) + /* root is playing with raw sockets. */ return NF_ACCEPT; - return ipt_do_table(skb, hook, in, out, - dev_net(out)->ipv4.iptable_filter); + + net = dev_net((in != NULL) ? in : out); + return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_filter); } -static struct nf_hook_ops ipt_ops[] __read_mostly = { - { - .hook = ipt_local_in_hook, - .owner = THIS_MODULE, - .pf = NFPROTO_IPV4, - .hooknum = NF_INET_LOCAL_IN, - .priority = NF_IP_PRI_FILTER, - }, - { - .hook = ipt_hook, - .owner = THIS_MODULE, - .pf = NFPROTO_IPV4, - .hooknum = NF_INET_FORWARD, - .priority = NF_IP_PRI_FILTER, - }, - { - .hook = ipt_local_out_hook, - .owner = THIS_MODULE, - .pf = NFPROTO_IPV4, - .hooknum = NF_INET_LOCAL_OUT, - .priority = NF_IP_PRI_FILTER, - }, -}; +static struct nf_hook_ops *filter_ops __read_mostly; /* Default to forward because I got too much mail already. */ static int forward = NF_ACCEPT; @@ -128,9 +56,18 @@ module_param(forward, bool, 0000); static int __net_init iptable_filter_net_init(struct net *net) { - /* Register table */ + struct ipt_replace *repl; + + repl = ipt_alloc_initial_table(&packet_filter); + if (repl == NULL) + return -ENOMEM; + /* Entry 1 is the FORWARD hook */ + ((struct ipt_standard *)repl->entries)[1].target.verdict = + -forward - 1; + net->ipv4.iptable_filter = - ipt_register_table(net, &packet_filter, &initial_table.repl); + ipt_register_table(net, &packet_filter, repl); + kfree(repl); if (IS_ERR(net->ipv4.iptable_filter)) return PTR_ERR(net->ipv4.iptable_filter); return 0; @@ -138,7 +75,7 @@ static int __net_init iptable_filter_net_init(struct net *net) static void __net_exit iptable_filter_net_exit(struct net *net) { - ipt_unregister_table(net->ipv4.iptable_filter); + ipt_unregister_table(net, net->ipv4.iptable_filter); } static struct pernet_operations iptable_filter_net_ops = { @@ -155,17 +92,16 @@ static int __init iptable_filter_init(void) return -EINVAL; } - /* Entry 1 is the FORWARD hook */ - initial_table.entries[1].target.verdict = -forward - 1; - ret = register_pernet_subsys(&iptable_filter_net_ops); if (ret < 0) return ret; /* Register hooks */ - ret = nf_register_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); - if (ret < 0) + filter_ops = xt_hook_link(&packet_filter, iptable_filter_hook); + if (IS_ERR(filter_ops)) { + ret = PTR_ERR(filter_ops); goto cleanup_table; + } return ret; @@ -176,7 +112,7 @@ static int __init iptable_filter_init(void) static void __exit iptable_filter_fini(void) { - nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); + xt_hook_unlink(&packet_filter, filter_ops); unregister_pernet_subsys(&iptable_filter_net_ops); } diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c index fae78c3076c4..b9b83464cbf4 100644 --- a/net/ipv4/netfilter/iptable_mangle.c +++ b/net/ipv4/netfilter/iptable_mangle.c @@ -27,101 +27,16 @@ MODULE_DESCRIPTION("iptables mangle table"); (1 << NF_INET_LOCAL_OUT) | \ (1 << NF_INET_POST_ROUTING)) -/* Ouch - five different hooks? Maybe this should be a config option..... -- BC */ -static const struct -{ - struct ipt_replace repl; - struct ipt_standard entries[5]; - struct ipt_error term; -} initial_table __net_initdata = { - .repl = { - .name = "mangle", - .valid_hooks = MANGLE_VALID_HOOKS, - .num_entries = 6, - .size = sizeof(struct ipt_standard) * 5 + sizeof(struct ipt_error), - .hook_entry = { - [NF_INET_PRE_ROUTING] = 0, - [NF_INET_LOCAL_IN] = sizeof(struct ipt_standard), - [NF_INET_FORWARD] = sizeof(struct ipt_standard) * 2, - [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 3, - [NF_INET_POST_ROUTING] = sizeof(struct ipt_standard) * 4, - }, - .underflow = { - [NF_INET_PRE_ROUTING] = 0, - [NF_INET_LOCAL_IN] = sizeof(struct ipt_standard), - [NF_INET_FORWARD] = sizeof(struct ipt_standard) * 2, - [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 3, - [NF_INET_POST_ROUTING] = sizeof(struct ipt_standard) * 4, - }, - }, - .entries = { - IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */ - IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */ - IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */ - IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */ - IPT_STANDARD_INIT(NF_ACCEPT), /* POST_ROUTING */ - }, - .term = IPT_ERROR_INIT, /* ERROR */ -}; - static const struct xt_table packet_mangler = { .name = "mangle", .valid_hooks = MANGLE_VALID_HOOKS, .me = THIS_MODULE, .af = NFPROTO_IPV4, + .priority = NF_IP_PRI_MANGLE, }; -/* The work comes in here from netfilter.c. */ -static unsigned int -ipt_pre_routing_hook(unsigned int hook, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) -{ - return ipt_do_table(skb, hook, in, out, - dev_net(in)->ipv4.iptable_mangle); -} - -static unsigned int -ipt_post_routing_hook(unsigned int hook, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) -{ - return ipt_do_table(skb, hook, in, out, - dev_net(out)->ipv4.iptable_mangle); -} - -static unsigned int -ipt_local_in_hook(unsigned int hook, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) -{ - return ipt_do_table(skb, hook, in, out, - dev_net(in)->ipv4.iptable_mangle); -} - -static unsigned int -ipt_forward_hook(unsigned int hook, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) -{ - return ipt_do_table(skb, hook, in, out, - dev_net(in)->ipv4.iptable_mangle); -} - static unsigned int -ipt_local_hook(unsigned int hook, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +ipt_mangle_out(struct sk_buff *skb, const struct net_device *out) { unsigned int ret; const struct iphdr *iph; @@ -141,7 +56,7 @@ ipt_local_hook(unsigned int hook, daddr = iph->daddr; tos = iph->tos; - ret = ipt_do_table(skb, hook, in, out, + ret = ipt_do_table(skb, NF_INET_LOCAL_OUT, NULL, out, dev_net(out)->ipv4.iptable_mangle); /* Reroute for ANY change. */ if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE) { @@ -158,49 +73,36 @@ ipt_local_hook(unsigned int hook, return ret; } -static struct nf_hook_ops ipt_ops[] __read_mostly = { - { - .hook = ipt_pre_routing_hook, - .owner = THIS_MODULE, - .pf = NFPROTO_IPV4, - .hooknum = NF_INET_PRE_ROUTING, - .priority = NF_IP_PRI_MANGLE, - }, - { - .hook = ipt_local_in_hook, - .owner = THIS_MODULE, - .pf = NFPROTO_IPV4, - .hooknum = NF_INET_LOCAL_IN, - .priority = NF_IP_PRI_MANGLE, - }, - { - .hook = ipt_forward_hook, - .owner = THIS_MODULE, - .pf = NFPROTO_IPV4, - .hooknum = NF_INET_FORWARD, - .priority = NF_IP_PRI_MANGLE, - }, - { - .hook = ipt_local_hook, - .owner = THIS_MODULE, - .pf = NFPROTO_IPV4, - .hooknum = NF_INET_LOCAL_OUT, - .priority = NF_IP_PRI_MANGLE, - }, - { - .hook = ipt_post_routing_hook, - .owner = THIS_MODULE, - .pf = NFPROTO_IPV4, - .hooknum = NF_INET_POST_ROUTING, - .priority = NF_IP_PRI_MANGLE, - }, -}; +/* The work comes in here from netfilter.c. */ +static unsigned int +iptable_mangle_hook(unsigned int hook, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) +{ + if (hook == NF_INET_LOCAL_OUT) + return ipt_mangle_out(skb, out); + if (hook == NF_INET_POST_ROUTING) + return ipt_do_table(skb, hook, in, out, + dev_net(out)->ipv4.iptable_mangle); + /* PREROUTING/INPUT/FORWARD: */ + return ipt_do_table(skb, hook, in, out, + dev_net(in)->ipv4.iptable_mangle); +} + +static struct nf_hook_ops *mangle_ops __read_mostly; static int __net_init iptable_mangle_net_init(struct net *net) { - /* Register table */ + struct ipt_replace *repl; + + repl = ipt_alloc_initial_table(&packet_mangler); + if (repl == NULL) + return -ENOMEM; net->ipv4.iptable_mangle = - ipt_register_table(net, &packet_mangler, &initial_table.repl); + ipt_register_table(net, &packet_mangler, repl); + kfree(repl); if (IS_ERR(net->ipv4.iptable_mangle)) return PTR_ERR(net->ipv4.iptable_mangle); return 0; @@ -208,7 +110,7 @@ static int __net_init iptable_mangle_net_init(struct net *net) static void __net_exit iptable_mangle_net_exit(struct net *net) { - ipt_unregister_table(net->ipv4.iptable_mangle); + ipt_unregister_table(net, net->ipv4.iptable_mangle); } static struct pernet_operations iptable_mangle_net_ops = { @@ -225,9 +127,11 @@ static int __init iptable_mangle_init(void) return ret; /* Register hooks */ - ret = nf_register_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); - if (ret < 0) + mangle_ops = xt_hook_link(&packet_mangler, iptable_mangle_hook); + if (IS_ERR(mangle_ops)) { + ret = PTR_ERR(mangle_ops); goto cleanup_table; + } return ret; @@ -238,7 +142,7 @@ static int __init iptable_mangle_init(void) static void __exit iptable_mangle_fini(void) { - nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); + xt_hook_unlink(&packet_mangler, mangle_ops); unregister_pernet_subsys(&iptable_mangle_net_ops); } diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c index 993edc23be09..06fb9d11953c 100644 --- a/net/ipv4/netfilter/iptable_raw.c +++ b/net/ipv4/netfilter/iptable_raw.c @@ -9,90 +9,44 @@ #define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT)) -static const struct -{ - struct ipt_replace repl; - struct ipt_standard entries[2]; - struct ipt_error term; -} initial_table __net_initdata = { - .repl = { - .name = "raw", - .valid_hooks = RAW_VALID_HOOKS, - .num_entries = 3, - .size = sizeof(struct ipt_standard) * 2 + sizeof(struct ipt_error), - .hook_entry = { - [NF_INET_PRE_ROUTING] = 0, - [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) - }, - .underflow = { - [NF_INET_PRE_ROUTING] = 0, - [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) - }, - }, - .entries = { - IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */ - IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */ - }, - .term = IPT_ERROR_INIT, /* ERROR */ -}; - static const struct xt_table packet_raw = { .name = "raw", .valid_hooks = RAW_VALID_HOOKS, .me = THIS_MODULE, .af = NFPROTO_IPV4, + .priority = NF_IP_PRI_RAW, }; /* The work comes in here from netfilter.c. */ static unsigned int -ipt_hook(unsigned int hook, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +iptable_raw_hook(unsigned int hook, struct sk_buff *skb, + const struct net_device *in, const struct net_device *out, + int (*okfn)(struct sk_buff *)) { - return ipt_do_table(skb, hook, in, out, - dev_net(in)->ipv4.iptable_raw); -} + const struct net *net; -static unsigned int -ipt_local_hook(unsigned int hook, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) -{ - /* root is playing with raw sockets. */ - if (skb->len < sizeof(struct iphdr) || - ip_hdrlen(skb) < sizeof(struct iphdr)) + if (hook == NF_INET_LOCAL_OUT && + (skb->len < sizeof(struct iphdr) || + ip_hdrlen(skb) < sizeof(struct iphdr))) + /* root is playing with raw sockets. */ return NF_ACCEPT; - return ipt_do_table(skb, hook, in, out, - dev_net(out)->ipv4.iptable_raw); + + net = dev_net((in != NULL) ? in : out); + return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_raw); } -/* 'raw' is the very first table. */ -static struct nf_hook_ops ipt_ops[] __read_mostly = { - { - .hook = ipt_hook, - .pf = NFPROTO_IPV4, - .hooknum = NF_INET_PRE_ROUTING, - .priority = NF_IP_PRI_RAW, - .owner = THIS_MODULE, - }, - { - .hook = ipt_local_hook, - .pf = NFPROTO_IPV4, - .hooknum = NF_INET_LOCAL_OUT, - .priority = NF_IP_PRI_RAW, - .owner = THIS_MODULE, - }, -}; +static struct nf_hook_ops *rawtable_ops __read_mostly; static int __net_init iptable_raw_net_init(struct net *net) { - /* Register table */ + struct ipt_replace *repl; + + repl = ipt_alloc_initial_table(&packet_raw); + if (repl == NULL) + return -ENOMEM; net->ipv4.iptable_raw = - ipt_register_table(net, &packet_raw, &initial_table.repl); + ipt_register_table(net, &packet_raw, repl); + kfree(repl); if (IS_ERR(net->ipv4.iptable_raw)) return PTR_ERR(net->ipv4.iptable_raw); return 0; @@ -100,7 +54,7 @@ static int __net_init iptable_raw_net_init(struct net *net) static void __net_exit iptable_raw_net_exit(struct net *net) { - ipt_unregister_table(net->ipv4.iptable_raw); + ipt_unregister_table(net, net->ipv4.iptable_raw); } static struct pernet_operations iptable_raw_net_ops = { @@ -117,9 +71,11 @@ static int __init iptable_raw_init(void) return ret; /* Register hooks */ - ret = nf_register_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); - if (ret < 0) + rawtable_ops = xt_hook_link(&packet_raw, iptable_raw_hook); + if (IS_ERR(rawtable_ops)) { + ret = PTR_ERR(rawtable_ops); goto cleanup_table; + } return ret; @@ -130,7 +86,7 @@ static int __init iptable_raw_init(void) static void __exit iptable_raw_fini(void) { - nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); + xt_hook_unlink(&packet_raw, rawtable_ops); unregister_pernet_subsys(&iptable_raw_net_ops); } diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c index 3bd3d6388da5..cce2f64e6f21 100644 --- a/net/ipv4/netfilter/iptable_security.c +++ b/net/ipv4/netfilter/iptable_security.c @@ -27,109 +27,44 @@ MODULE_DESCRIPTION("iptables security table, for MAC rules"); (1 << NF_INET_FORWARD) | \ (1 << NF_INET_LOCAL_OUT) -static const struct -{ - struct ipt_replace repl; - struct ipt_standard entries[3]; - struct ipt_error term; -} initial_table __net_initdata = { - .repl = { - .name = "security", - .valid_hooks = SECURITY_VALID_HOOKS, - .num_entries = 4, - .size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error), - .hook_entry = { - [NF_INET_LOCAL_IN] = 0, - [NF_INET_FORWARD] = sizeof(struct ipt_standard), - [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2, - }, - .underflow = { - [NF_INET_LOCAL_IN] = 0, - [NF_INET_FORWARD] = sizeof(struct ipt_standard), - [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2, - }, - }, - .entries = { - IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */ - IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */ - IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */ - }, - .term = IPT_ERROR_INIT, /* ERROR */ -}; - static const struct xt_table security_table = { .name = "security", .valid_hooks = SECURITY_VALID_HOOKS, .me = THIS_MODULE, .af = NFPROTO_IPV4, + .priority = NF_IP_PRI_SECURITY, }; static unsigned int -ipt_local_in_hook(unsigned int hook, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) -{ - return ipt_do_table(skb, hook, in, out, - dev_net(in)->ipv4.iptable_security); -} - -static unsigned int -ipt_forward_hook(unsigned int hook, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +iptable_security_hook(unsigned int hook, struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) { - return ipt_do_table(skb, hook, in, out, - dev_net(in)->ipv4.iptable_security); -} + const struct net *net; -static unsigned int -ipt_local_out_hook(unsigned int hook, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) -{ - /* Somebody is playing with raw sockets. */ - if (skb->len < sizeof(struct iphdr) || - ip_hdrlen(skb) < sizeof(struct iphdr)) + if (hook == NF_INET_LOCAL_OUT && + (skb->len < sizeof(struct iphdr) || + ip_hdrlen(skb) < sizeof(struct iphdr))) + /* Somebody is playing with raw sockets. */ return NF_ACCEPT; - return ipt_do_table(skb, hook, in, out, - dev_net(out)->ipv4.iptable_security); + + net = dev_net((in != NULL) ? in : out); + return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_security); } -static struct nf_hook_ops ipt_ops[] __read_mostly = { - { - .hook = ipt_local_in_hook, - .owner = THIS_MODULE, - .pf = NFPROTO_IPV4, - .hooknum = NF_INET_LOCAL_IN, - .priority = NF_IP_PRI_SECURITY, - }, - { - .hook = ipt_forward_hook, - .owner = THIS_MODULE, - .pf = NFPROTO_IPV4, - .hooknum = NF_INET_FORWARD, - .priority = NF_IP_PRI_SECURITY, - }, - { - .hook = ipt_local_out_hook, - .owner = THIS_MODULE, - .pf = NFPROTO_IPV4, - .hooknum = NF_INET_LOCAL_OUT, - .priority = NF_IP_PRI_SECURITY, - }, -}; +static struct nf_hook_ops *sectbl_ops __read_mostly; static int __net_init iptable_security_net_init(struct net *net) { - net->ipv4.iptable_security = - ipt_register_table(net, &security_table, &initial_table.repl); + struct ipt_replace *repl; + repl = ipt_alloc_initial_table(&security_table); + if (repl == NULL) + return -ENOMEM; + net->ipv4.iptable_security = + ipt_register_table(net, &security_table, repl); + kfree(repl); if (IS_ERR(net->ipv4.iptable_security)) return PTR_ERR(net->ipv4.iptable_security); @@ -138,7 +73,7 @@ static int __net_init iptable_security_net_init(struct net *net) static void __net_exit iptable_security_net_exit(struct net *net) { - ipt_unregister_table(net->ipv4.iptable_security); + ipt_unregister_table(net, net->ipv4.iptable_security); } static struct pernet_operations iptable_security_net_ops = { @@ -154,9 +89,11 @@ static int __init iptable_security_init(void) if (ret < 0) return ret; - ret = nf_register_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); - if (ret < 0) + sectbl_ops = xt_hook_link(&security_table, iptable_security_hook); + if (IS_ERR(sectbl_ops)) { + ret = PTR_ERR(sectbl_ops); goto cleanup_table; + } return ret; @@ -167,7 +104,7 @@ cleanup_table: static void __exit iptable_security_fini(void) { - nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); + xt_hook_unlink(&security_table, sectbl_ops); unregister_pernet_subsys(&iptable_security_net_ops); } diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index d1ea38a7c490..2bb1f87051c4 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c @@ -22,6 +22,7 @@ #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_l4proto.h> #include <net/netfilter/nf_conntrack_l3proto.h> +#include <net/netfilter/nf_conntrack_zones.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/ipv4/nf_conntrack_ipv4.h> #include <net/netfilter/nf_nat_helper.h> @@ -266,7 +267,7 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len) return -EINVAL; } - h = nf_conntrack_find_get(sock_net(sk), &tuple); + h = nf_conntrack_find_get(sock_net(sk), NF_CT_DEFAULT_ZONE, &tuple); if (h) { struct sockaddr_in sin; struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c index 7afd39b5b781..7404bde95994 100644 --- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c +++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c @@ -18,6 +18,7 @@ #include <net/netfilter/nf_conntrack_tuple.h> #include <net/netfilter/nf_conntrack_l4proto.h> #include <net/netfilter/nf_conntrack_core.h> +#include <net/netfilter/nf_conntrack_zones.h> #include <net/netfilter/nf_log.h> static unsigned int nf_ct_icmp_timeout __read_mostly = 30*HZ; @@ -114,13 +115,14 @@ static bool icmp_new(struct nf_conn *ct, const struct sk_buff *skb, /* Returns conntrack if it dealt with ICMP, and filled in skb fields */ static int -icmp_error_message(struct net *net, struct sk_buff *skb, +icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, enum ip_conntrack_info *ctinfo, unsigned int hooknum) { struct nf_conntrack_tuple innertuple, origtuple; const struct nf_conntrack_l4proto *innerproto; const struct nf_conntrack_tuple_hash *h; + u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE; NF_CT_ASSERT(skb->nfct == NULL); @@ -146,7 +148,7 @@ icmp_error_message(struct net *net, struct sk_buff *skb, *ctinfo = IP_CT_RELATED; - h = nf_conntrack_find_get(net, &innertuple); + h = nf_conntrack_find_get(net, zone, &innertuple); if (!h) { pr_debug("icmp_error_message: no match\n"); return -NF_ACCEPT; @@ -163,7 +165,8 @@ icmp_error_message(struct net *net, struct sk_buff *skb, /* Small and modified version of icmp_rcv */ static int -icmp_error(struct net *net, struct sk_buff *skb, unsigned int dataoff, +icmp_error(struct net *net, struct nf_conn *tmpl, + struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum) { const struct icmphdr *icmph; @@ -208,7 +211,7 @@ icmp_error(struct net *net, struct sk_buff *skb, unsigned int dataoff, icmph->type != ICMP_REDIRECT) return NF_ACCEPT; - return icmp_error_message(net, skb, ctinfo, hooknum); + return icmp_error_message(net, tmpl, skb, ctinfo, hooknum); } #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c index 331ead3ebd1b..d498a704d456 100644 --- a/net/ipv4/netfilter/nf_defrag_ipv4.c +++ b/net/ipv4/netfilter/nf_defrag_ipv4.c @@ -16,7 +16,9 @@ #include <linux/netfilter_bridge.h> #include <linux/netfilter_ipv4.h> +#include <net/netfilter/nf_conntrack_zones.h> #include <net/netfilter/ipv4/nf_defrag_ipv4.h> +#include <net/netfilter/nf_conntrack.h> /* Returns new sk_buff, or NULL */ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user) @@ -38,15 +40,20 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user) static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum, struct sk_buff *skb) { + u16 zone = NF_CT_DEFAULT_ZONE; + + if (skb->nfct) + zone = nf_ct_zone((struct nf_conn *)skb->nfct); + #ifdef CONFIG_BRIDGE_NETFILTER if (skb->nf_bridge && skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING) - return IP_DEFRAG_CONNTRACK_BRIDGE_IN; + return IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone; #endif if (hooknum == NF_INET_PRE_ROUTING) - return IP_DEFRAG_CONNTRACK_IN; + return IP_DEFRAG_CONNTRACK_IN + zone; else - return IP_DEFRAG_CONNTRACK_OUT; + return IP_DEFRAG_CONNTRACK_OUT + zone; } static unsigned int ipv4_conntrack_defrag(unsigned int hooknum, @@ -59,7 +66,7 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum, #if !defined(CONFIG_NF_NAT) && !defined(CONFIG_NF_NAT_MODULE) /* Previously seen (loopback)? Ignore. Do this before fragment check. */ - if (skb->nfct) + if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct)) return NF_ACCEPT; #endif #endif diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c index 26066a2327ad..4595281c2863 100644 --- a/net/ipv4/netfilter/nf_nat_core.c +++ b/net/ipv4/netfilter/nf_nat_core.c @@ -30,6 +30,7 @@ #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_l3proto.h> #include <net/netfilter/nf_conntrack_l4proto.h> +#include <net/netfilter/nf_conntrack_zones.h> static DEFINE_SPINLOCK(nf_nat_lock); @@ -69,13 +70,14 @@ EXPORT_SYMBOL_GPL(nf_nat_proto_put); /* We keep an extra hash for each conntrack, for fast searching. */ static inline unsigned int -hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple) +hash_by_src(const struct net *net, u16 zone, + const struct nf_conntrack_tuple *tuple) { unsigned int hash; /* Original src, to ensure we map it consistently if poss. */ hash = jhash_3words((__force u32)tuple->src.u3.ip, - (__force u32)tuple->src.u.all, + (__force u32)tuple->src.u.all ^ zone, tuple->dst.protonum, 0); return ((u64)hash * net->ipv4.nat_htable_size) >> 32; } @@ -139,12 +141,12 @@ same_src(const struct nf_conn *ct, /* Only called for SRC manip */ static int -find_appropriate_src(struct net *net, +find_appropriate_src(struct net *net, u16 zone, const struct nf_conntrack_tuple *tuple, struct nf_conntrack_tuple *result, const struct nf_nat_range *range) { - unsigned int h = hash_by_src(net, tuple); + unsigned int h = hash_by_src(net, zone, tuple); const struct nf_conn_nat *nat; const struct nf_conn *ct; const struct hlist_node *n; @@ -152,7 +154,7 @@ find_appropriate_src(struct net *net, rcu_read_lock(); hlist_for_each_entry_rcu(nat, n, &net->ipv4.nat_bysource[h], bysource) { ct = nat->ct; - if (same_src(ct, tuple)) { + if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) { /* Copy source part from reply tuple. */ nf_ct_invert_tuplepr(result, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); @@ -175,7 +177,7 @@ find_appropriate_src(struct net *net, the ip with the lowest src-ip/dst-ip/proto usage. */ static void -find_best_ips_proto(struct nf_conntrack_tuple *tuple, +find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple, const struct nf_nat_range *range, const struct nf_conn *ct, enum nf_nat_manip_type maniptype) @@ -209,7 +211,7 @@ find_best_ips_proto(struct nf_conntrack_tuple *tuple, maxip = ntohl(range->max_ip); j = jhash_2words((__force u32)tuple->src.u3.ip, range->flags & IP_NAT_RANGE_PERSISTENT ? - 0 : (__force u32)tuple->dst.u3.ip, 0); + 0 : (__force u32)tuple->dst.u3.ip ^ zone, 0); j = ((u64)j * (maxip - minip + 1)) >> 32; *var_ipp = htonl(minip + j); } @@ -229,6 +231,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple, { struct net *net = nf_ct_net(ct); const struct nf_nat_protocol *proto; + u16 zone = nf_ct_zone(ct); /* 1) If this srcip/proto/src-proto-part is currently mapped, and that same mapping gives a unique tuple within the given @@ -239,7 +242,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple, manips not an issue. */ if (maniptype == IP_NAT_MANIP_SRC && !(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) { - if (find_appropriate_src(net, orig_tuple, tuple, range)) { + if (find_appropriate_src(net, zone, orig_tuple, tuple, range)) { pr_debug("get_unique_tuple: Found current src map\n"); if (!nf_nat_used_tuple(tuple, ct)) return; @@ -249,7 +252,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple, /* 2) Select the least-used IP/proto combination in the given range. */ *tuple = *orig_tuple; - find_best_ips_proto(tuple, range, ct, maniptype); + find_best_ips_proto(zone, tuple, range, ct, maniptype); /* 3) The per-protocol part of the manip is made to map into the range to make a unique tuple. */ @@ -327,7 +330,8 @@ nf_nat_setup_info(struct nf_conn *ct, if (have_to_hash) { unsigned int srchash; - srchash = hash_by_src(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); + srchash = hash_by_src(net, nf_ct_zone(ct), + &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); spin_lock_bh(&nf_nat_lock); /* nf_conntrack_alter_reply might re-allocate exntension aera */ nat = nfct_nat(ct); diff --git a/net/ipv4/netfilter/nf_nat_ftp.c b/net/ipv4/netfilter/nf_nat_ftp.c index a1d5d58a58bf..86e0e84ff0a0 100644 --- a/net/ipv4/netfilter/nf_nat_ftp.c +++ b/net/ipv4/netfilter/nf_nat_ftp.c @@ -27,76 +27,29 @@ MODULE_ALIAS("ip_nat_ftp"); /* FIXME: Time out? --RR */ -static int -mangle_rfc959_packet(struct sk_buff *skb, - __be32 newip, - u_int16_t port, - unsigned int matchoff, - unsigned int matchlen, - struct nf_conn *ct, - enum ip_conntrack_info ctinfo) +static int nf_nat_ftp_fmt_cmd(enum nf_ct_ftp_type type, + char *buffer, size_t buflen, + __be32 addr, u16 port) { - char buffer[sizeof("nnn,nnn,nnn,nnn,nnn,nnn")]; - - sprintf(buffer, "%u,%u,%u,%u,%u,%u", - NIPQUAD(newip), port>>8, port&0xFF); - - pr_debug("calling nf_nat_mangle_tcp_packet\n"); - - return nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff, - matchlen, buffer, strlen(buffer)); -} - -/* |1|132.235.1.2|6275| */ -static int -mangle_eprt_packet(struct sk_buff *skb, - __be32 newip, - u_int16_t port, - unsigned int matchoff, - unsigned int matchlen, - struct nf_conn *ct, - enum ip_conntrack_info ctinfo) -{ - char buffer[sizeof("|1|255.255.255.255|65535|")]; - - sprintf(buffer, "|1|%u.%u.%u.%u|%u|", NIPQUAD(newip), port); - - pr_debug("calling nf_nat_mangle_tcp_packet\n"); - - return nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff, - matchlen, buffer, strlen(buffer)); -} - -/* |1|132.235.1.2|6275| */ -static int -mangle_epsv_packet(struct sk_buff *skb, - __be32 newip, - u_int16_t port, - unsigned int matchoff, - unsigned int matchlen, - struct nf_conn *ct, - enum ip_conntrack_info ctinfo) -{ - char buffer[sizeof("|||65535|")]; - - sprintf(buffer, "|||%u|", port); - - pr_debug("calling nf_nat_mangle_tcp_packet\n"); + switch (type) { + case NF_CT_FTP_PORT: + case NF_CT_FTP_PASV: + return snprintf(buffer, buflen, "%u,%u,%u,%u,%u,%u", + ((unsigned char *)&addr)[0], + ((unsigned char *)&addr)[1], + ((unsigned char *)&addr)[2], + ((unsigned char *)&addr)[3], + port >> 8, + port & 0xFF); + case NF_CT_FTP_EPRT: + return snprintf(buffer, buflen, "|1|%pI4|%u|", &addr, port); + case NF_CT_FTP_EPSV: + return snprintf(buffer, buflen, "|||%u|", port); + } - return nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff, - matchlen, buffer, strlen(buffer)); + return 0; } -static int (*mangle[])(struct sk_buff *, __be32, u_int16_t, - unsigned int, unsigned int, struct nf_conn *, - enum ip_conntrack_info) -= { - [NF_CT_FTP_PORT] = mangle_rfc959_packet, - [NF_CT_FTP_PASV] = mangle_rfc959_packet, - [NF_CT_FTP_EPRT] = mangle_eprt_packet, - [NF_CT_FTP_EPSV] = mangle_epsv_packet -}; - /* So, this packet has hit the connection tracking matching code. Mangle it, and change the expectation to match the new version. */ static unsigned int nf_nat_ftp(struct sk_buff *skb, @@ -110,6 +63,8 @@ static unsigned int nf_nat_ftp(struct sk_buff *skb, u_int16_t port; int dir = CTINFO2DIR(ctinfo); struct nf_conn *ct = exp->master; + char buffer[sizeof("|1|255.255.255.255|65535|")]; + unsigned int buflen; pr_debug("FTP_NAT: type %i, off %u len %u\n", type, matchoff, matchlen); @@ -132,11 +87,21 @@ static unsigned int nf_nat_ftp(struct sk_buff *skb, if (port == 0) return NF_DROP; - if (!mangle[type](skb, newip, port, matchoff, matchlen, ct, ctinfo)) { - nf_ct_unexpect_related(exp); - return NF_DROP; - } + buflen = nf_nat_ftp_fmt_cmd(type, buffer, sizeof(buffer), newip, port); + if (!buflen) + goto out; + + pr_debug("calling nf_nat_mangle_tcp_packet\n"); + + if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff, + matchlen, buffer, buflen)) + goto out; + return NF_ACCEPT; + +out: + nf_ct_unexpect_related(exp); + return NF_DROP; } static void __exit nf_nat_ftp_fini(void) diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c index 7f10a6be0191..4b6af4bb1f50 100644 --- a/net/ipv4/netfilter/nf_nat_helper.c +++ b/net/ipv4/netfilter/nf_nat_helper.c @@ -141,6 +141,17 @@ static int enlarge_skb(struct sk_buff *skb, unsigned int extra) return 1; } +void nf_nat_set_seq_adjust(struct nf_conn *ct, enum ip_conntrack_info ctinfo, + __be32 seq, s16 off) +{ + if (!off) + return; + set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); + adjust_tcp_sequence(ntohl(seq), off, ct, ctinfo); + nf_conntrack_event_cache(IPCT_NATSEQADJ, ct); +} +EXPORT_SYMBOL_GPL(nf_nat_set_seq_adjust); + /* Generic function for mangling variable-length address changes inside * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX * command in FTP). @@ -149,14 +160,13 @@ static int enlarge_skb(struct sk_buff *skb, unsigned int extra) * skb enlargement, ... * * */ -int -nf_nat_mangle_tcp_packet(struct sk_buff *skb, - struct nf_conn *ct, - enum ip_conntrack_info ctinfo, - unsigned int match_offset, - unsigned int match_len, - const char *rep_buffer, - unsigned int rep_len) +int __nf_nat_mangle_tcp_packet(struct sk_buff *skb, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned int match_offset, + unsigned int match_len, + const char *rep_buffer, + unsigned int rep_len, bool adjust) { struct rtable *rt = skb_rtable(skb); struct iphdr *iph; @@ -202,16 +212,13 @@ nf_nat_mangle_tcp_packet(struct sk_buff *skb, inet_proto_csum_replace2(&tcph->check, skb, htons(oldlen), htons(datalen), 1); - if (rep_len != match_len) { - set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); - adjust_tcp_sequence(ntohl(tcph->seq), - (int)rep_len - (int)match_len, - ct, ctinfo); - nf_conntrack_event_cache(IPCT_NATSEQADJ, ct); - } + if (adjust && rep_len != match_len) + nf_nat_set_seq_adjust(ct, ctinfo, tcph->seq, + (int)rep_len - (int)match_len); + return 1; } -EXPORT_SYMBOL(nf_nat_mangle_tcp_packet); +EXPORT_SYMBOL(__nf_nat_mangle_tcp_packet); /* Generic function for mangling variable-length address changes inside * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c index 9eb171056c63..4c060038d29f 100644 --- a/net/ipv4/netfilter/nf_nat_pptp.c +++ b/net/ipv4/netfilter/nf_nat_pptp.c @@ -25,6 +25,7 @@ #include <net/netfilter/nf_nat_rule.h> #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_expect.h> +#include <net/netfilter/nf_conntrack_zones.h> #include <linux/netfilter/nf_conntrack_proto_gre.h> #include <linux/netfilter/nf_conntrack_pptp.h> @@ -74,7 +75,7 @@ static void pptp_nat_expected(struct nf_conn *ct, pr_debug("trying to unexpect other dir: "); nf_ct_dump_tuple_ip(&t); - other_exp = nf_ct_expect_find_get(net, &t); + other_exp = nf_ct_expect_find_get(net, nf_ct_zone(ct), &t); if (other_exp) { nf_ct_unexpect_related(other_exp); nf_ct_expect_put(other_exp); diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c index 9e81e0dfb4ec..ab74cc0535e2 100644 --- a/net/ipv4/netfilter/nf_nat_rule.c +++ b/net/ipv4/netfilter/nf_nat_rule.c @@ -28,36 +28,6 @@ (1 << NF_INET_POST_ROUTING) | \ (1 << NF_INET_LOCAL_OUT)) -static const struct -{ - struct ipt_replace repl; - struct ipt_standard entries[3]; - struct ipt_error term; -} nat_initial_table __net_initdata = { - .repl = { - .name = "nat", - .valid_hooks = NAT_VALID_HOOKS, - .num_entries = 4, - .size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error), - .hook_entry = { - [NF_INET_PRE_ROUTING] = 0, - [NF_INET_POST_ROUTING] = sizeof(struct ipt_standard), - [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2 - }, - .underflow = { - [NF_INET_PRE_ROUTING] = 0, - [NF_INET_POST_ROUTING] = sizeof(struct ipt_standard), - [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2 - }, - }, - .entries = { - IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */ - IPT_STANDARD_INIT(NF_ACCEPT), /* POST_ROUTING */ - IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */ - }, - .term = IPT_ERROR_INIT, /* ERROR */ -}; - static const struct xt_table nat_table = { .name = "nat", .valid_hooks = NAT_VALID_HOOKS, @@ -186,8 +156,13 @@ static struct xt_target ipt_dnat_reg __read_mostly = { static int __net_init nf_nat_rule_net_init(struct net *net) { - net->ipv4.nat_table = ipt_register_table(net, &nat_table, - &nat_initial_table.repl); + struct ipt_replace *repl; + + repl = ipt_alloc_initial_table(&nat_table); + if (repl == NULL) + return -ENOMEM; + net->ipv4.nat_table = ipt_register_table(net, &nat_table, repl); + kfree(repl); if (IS_ERR(net->ipv4.nat_table)) return PTR_ERR(net->ipv4.nat_table); return 0; @@ -195,7 +170,7 @@ static int __net_init nf_nat_rule_net_init(struct net *net) static void __net_exit nf_nat_rule_net_exit(struct net *net) { - ipt_unregister_table(net->ipv4.nat_table); + ipt_unregister_table(net, net->ipv4.nat_table); } static struct pernet_operations nf_nat_rule_net_ops = { diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c index 07d61a57613c..11b538deaaec 100644 --- a/net/ipv4/netfilter/nf_nat_sip.c +++ b/net/ipv4/netfilter/nf_nat_sip.c @@ -1,4 +1,4 @@ -/* SIP extension for UDP NAT alteration. +/* SIP extension for NAT alteration. * * (C) 2005 by Christian Hentschel <chentschel@arnet.com.ar> * based on RR's ip_nat_ftp.c and other modules. @@ -15,6 +15,7 @@ #include <linux/ip.h> #include <net/ip.h> #include <linux/udp.h> +#include <linux/tcp.h> #include <net/netfilter/nf_nat.h> #include <net/netfilter/nf_nat_helper.h> @@ -29,25 +30,42 @@ MODULE_DESCRIPTION("SIP NAT helper"); MODULE_ALIAS("ip_nat_sip"); -static unsigned int mangle_packet(struct sk_buff *skb, +static unsigned int mangle_packet(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, unsigned int matchoff, unsigned int matchlen, const char *buffer, unsigned int buflen) { enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); - - if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, matchoff, matchlen, - buffer, buflen)) - return 0; + struct tcphdr *th; + unsigned int baseoff; + + if (nf_ct_protonum(ct) == IPPROTO_TCP) { + th = (struct tcphdr *)(skb->data + ip_hdrlen(skb)); + baseoff = ip_hdrlen(skb) + th->doff * 4; + matchoff += dataoff - baseoff; + + if (!__nf_nat_mangle_tcp_packet(skb, ct, ctinfo, + matchoff, matchlen, + buffer, buflen, false)) + return 0; + } else { + baseoff = ip_hdrlen(skb) + sizeof(struct udphdr); + matchoff += dataoff - baseoff; + + if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, + matchoff, matchlen, + buffer, buflen)) + return 0; + } /* Reload data pointer and adjust datalen value */ - *dptr = skb->data + ip_hdrlen(skb) + sizeof(struct udphdr); + *dptr = skb->data + dataoff; *datalen += buflen - matchlen; return 1; } -static int map_addr(struct sk_buff *skb, +static int map_addr(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, unsigned int matchoff, unsigned int matchlen, union nf_inet_addr *addr, __be16 port) @@ -76,11 +94,11 @@ static int map_addr(struct sk_buff *skb, buflen = sprintf(buffer, "%pI4:%u", &newaddr, ntohs(newport)); - return mangle_packet(skb, dptr, datalen, matchoff, matchlen, + return mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen, buffer, buflen); } -static int map_sip_addr(struct sk_buff *skb, +static int map_sip_addr(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, enum sip_header_types type) { @@ -93,16 +111,18 @@ static int map_sip_addr(struct sk_buff *skb, if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, type, NULL, &matchoff, &matchlen, &addr, &port) <= 0) return 1; - return map_addr(skb, dptr, datalen, matchoff, matchlen, &addr, port); + return map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen, + &addr, port); } -static unsigned int ip_nat_sip(struct sk_buff *skb, +static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen) { enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); - unsigned int dataoff, matchoff, matchlen; + unsigned int coff, matchoff, matchlen; + enum sip_header_types hdr; union nf_inet_addr addr; __be16 port; int request, in_header; @@ -112,16 +132,21 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, if (ct_sip_parse_request(ct, *dptr, *datalen, &matchoff, &matchlen, &addr, &port) > 0 && - !map_addr(skb, dptr, datalen, matchoff, matchlen, + !map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen, &addr, port)) return NF_DROP; request = 1; } else request = 0; + if (nf_ct_protonum(ct) == IPPROTO_TCP) + hdr = SIP_HDR_VIA_TCP; + else + hdr = SIP_HDR_VIA_UDP; + /* Translate topmost Via header and parameters */ if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, - SIP_HDR_VIA, NULL, &matchoff, &matchlen, + hdr, NULL, &matchoff, &matchlen, &addr, &port) > 0) { unsigned int matchend, poff, plen, buflen, n; char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")]; @@ -138,7 +163,7 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, goto next; } - if (!map_addr(skb, dptr, datalen, matchoff, matchlen, + if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen, &addr, port)) return NF_DROP; @@ -153,8 +178,8 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, addr.ip != ct->tuplehash[!dir].tuple.dst.u3.ip) { buflen = sprintf(buffer, "%pI4", &ct->tuplehash[!dir].tuple.dst.u3.ip); - if (!mangle_packet(skb, dptr, datalen, poff, plen, - buffer, buflen)) + if (!mangle_packet(skb, dataoff, dptr, datalen, + poff, plen, buffer, buflen)) return NF_DROP; } @@ -167,8 +192,8 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, addr.ip != ct->tuplehash[!dir].tuple.src.u3.ip) { buflen = sprintf(buffer, "%pI4", &ct->tuplehash[!dir].tuple.src.u3.ip); - if (!mangle_packet(skb, dptr, datalen, poff, plen, - buffer, buflen)) + if (!mangle_packet(skb, dataoff, dptr, datalen, + poff, plen, buffer, buflen)) return NF_DROP; } @@ -181,31 +206,45 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, htons(n) != ct->tuplehash[!dir].tuple.src.u.udp.port) { __be16 p = ct->tuplehash[!dir].tuple.src.u.udp.port; buflen = sprintf(buffer, "%u", ntohs(p)); - if (!mangle_packet(skb, dptr, datalen, poff, plen, - buffer, buflen)) + if (!mangle_packet(skb, dataoff, dptr, datalen, + poff, plen, buffer, buflen)) return NF_DROP; } } next: /* Translate Contact headers */ - dataoff = 0; + coff = 0; in_header = 0; - while (ct_sip_parse_header_uri(ct, *dptr, &dataoff, *datalen, + while (ct_sip_parse_header_uri(ct, *dptr, &coff, *datalen, SIP_HDR_CONTACT, &in_header, &matchoff, &matchlen, &addr, &port) > 0) { - if (!map_addr(skb, dptr, datalen, matchoff, matchlen, + if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen, &addr, port)) return NF_DROP; } - if (!map_sip_addr(skb, dptr, datalen, SIP_HDR_FROM) || - !map_sip_addr(skb, dptr, datalen, SIP_HDR_TO)) + if (!map_sip_addr(skb, dataoff, dptr, datalen, SIP_HDR_FROM) || + !map_sip_addr(skb, dataoff, dptr, datalen, SIP_HDR_TO)) return NF_DROP; + return NF_ACCEPT; } +static void ip_nat_sip_seq_adjust(struct sk_buff *skb, s16 off) +{ + enum ip_conntrack_info ctinfo; + struct nf_conn *ct = nf_ct_get(skb, &ctinfo); + const struct tcphdr *th; + + if (nf_ct_protonum(ct) != IPPROTO_TCP || off == 0) + return; + + th = (struct tcphdr *)(skb->data + ip_hdrlen(skb)); + nf_nat_set_seq_adjust(ct, ctinfo, th->seq, off); +} + /* Handles expected signalling connections and media streams */ static void ip_nat_sip_expected(struct nf_conn *ct, struct nf_conntrack_expect *exp) @@ -232,7 +271,7 @@ static void ip_nat_sip_expected(struct nf_conn *ct, } } -static unsigned int ip_nat_sip_expect(struct sk_buff *skb, +static unsigned int ip_nat_sip_expect(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, struct nf_conntrack_expect *exp, unsigned int matchoff, @@ -279,8 +318,8 @@ static unsigned int ip_nat_sip_expect(struct sk_buff *skb, if (exp->tuple.dst.u3.ip != exp->saved_ip || exp->tuple.dst.u.udp.port != exp->saved_proto.udp.port) { buflen = sprintf(buffer, "%pI4:%u", &newip, port); - if (!mangle_packet(skb, dptr, datalen, matchoff, matchlen, - buffer, buflen)) + if (!mangle_packet(skb, dataoff, dptr, datalen, + matchoff, matchlen, buffer, buflen)) goto err; } return NF_ACCEPT; @@ -290,7 +329,7 @@ err: return NF_DROP; } -static int mangle_content_len(struct sk_buff *skb, +static int mangle_content_len(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen) { enum ip_conntrack_info ctinfo; @@ -312,12 +351,13 @@ static int mangle_content_len(struct sk_buff *skb, return 0; buflen = sprintf(buffer, "%u", c_len); - return mangle_packet(skb, dptr, datalen, matchoff, matchlen, + return mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen, buffer, buflen); } -static int mangle_sdp_packet(struct sk_buff *skb, const char **dptr, - unsigned int dataoff, unsigned int *datalen, +static int mangle_sdp_packet(struct sk_buff *skb, unsigned int dataoff, + const char **dptr, unsigned int *datalen, + unsigned int sdpoff, enum sdp_header_types type, enum sdp_header_types term, char *buffer, int buflen) @@ -326,16 +366,16 @@ static int mangle_sdp_packet(struct sk_buff *skb, const char **dptr, struct nf_conn *ct = nf_ct_get(skb, &ctinfo); unsigned int matchlen, matchoff; - if (ct_sip_get_sdp_header(ct, *dptr, dataoff, *datalen, type, term, + if (ct_sip_get_sdp_header(ct, *dptr, sdpoff, *datalen, type, term, &matchoff, &matchlen) <= 0) return -ENOENT; - return mangle_packet(skb, dptr, datalen, matchoff, matchlen, + return mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen, buffer, buflen) ? 0 : -EINVAL; } -static unsigned int ip_nat_sdp_addr(struct sk_buff *skb, const char **dptr, - unsigned int dataoff, - unsigned int *datalen, +static unsigned int ip_nat_sdp_addr(struct sk_buff *skb, unsigned int dataoff, + const char **dptr, unsigned int *datalen, + unsigned int sdpoff, enum sdp_header_types type, enum sdp_header_types term, const union nf_inet_addr *addr) @@ -344,16 +384,15 @@ static unsigned int ip_nat_sdp_addr(struct sk_buff *skb, const char **dptr, unsigned int buflen; buflen = sprintf(buffer, "%pI4", &addr->ip); - if (mangle_sdp_packet(skb, dptr, dataoff, datalen, type, term, + if (mangle_sdp_packet(skb, dataoff, dptr, datalen, sdpoff, type, term, buffer, buflen)) return 0; - return mangle_content_len(skb, dptr, datalen); + return mangle_content_len(skb, dataoff, dptr, datalen); } -static unsigned int ip_nat_sdp_port(struct sk_buff *skb, - const char **dptr, - unsigned int *datalen, +static unsigned int ip_nat_sdp_port(struct sk_buff *skb, unsigned int dataoff, + const char **dptr, unsigned int *datalen, unsigned int matchoff, unsigned int matchlen, u_int16_t port) @@ -362,16 +401,16 @@ static unsigned int ip_nat_sdp_port(struct sk_buff *skb, unsigned int buflen; buflen = sprintf(buffer, "%u", port); - if (!mangle_packet(skb, dptr, datalen, matchoff, matchlen, + if (!mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen, buffer, buflen)) return 0; - return mangle_content_len(skb, dptr, datalen); + return mangle_content_len(skb, dataoff, dptr, datalen); } -static unsigned int ip_nat_sdp_session(struct sk_buff *skb, const char **dptr, - unsigned int dataoff, - unsigned int *datalen, +static unsigned int ip_nat_sdp_session(struct sk_buff *skb, unsigned int dataoff, + const char **dptr, unsigned int *datalen, + unsigned int sdpoff, const union nf_inet_addr *addr) { char buffer[sizeof("nnn.nnn.nnn.nnn")]; @@ -379,12 +418,12 @@ static unsigned int ip_nat_sdp_session(struct sk_buff *skb, const char **dptr, /* Mangle session description owner and contact addresses */ buflen = sprintf(buffer, "%pI4", &addr->ip); - if (mangle_sdp_packet(skb, dptr, dataoff, datalen, + if (mangle_sdp_packet(skb, dataoff, dptr, datalen, sdpoff, SDP_HDR_OWNER_IP4, SDP_HDR_MEDIA, buffer, buflen)) return 0; - switch (mangle_sdp_packet(skb, dptr, dataoff, datalen, + switch (mangle_sdp_packet(skb, dataoff, dptr, datalen, sdpoff, SDP_HDR_CONNECTION_IP4, SDP_HDR_MEDIA, buffer, buflen)) { case 0: @@ -401,14 +440,13 @@ static unsigned int ip_nat_sdp_session(struct sk_buff *skb, const char **dptr, return 0; } - return mangle_content_len(skb, dptr, datalen); + return mangle_content_len(skb, dataoff, dptr, datalen); } /* So, this packet has hit the connection tracking matching code. Mangle it, and change the expectation to match the new version. */ -static unsigned int ip_nat_sdp_media(struct sk_buff *skb, - const char **dptr, - unsigned int *datalen, +static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff, + const char **dptr, unsigned int *datalen, struct nf_conntrack_expect *rtp_exp, struct nf_conntrack_expect *rtcp_exp, unsigned int mediaoff, @@ -456,7 +494,8 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb, /* Update media port. */ if (rtp_exp->tuple.dst.u.udp.port != rtp_exp->saved_proto.udp.port && - !ip_nat_sdp_port(skb, dptr, datalen, mediaoff, medialen, port)) + !ip_nat_sdp_port(skb, dataoff, dptr, datalen, + mediaoff, medialen, port)) goto err2; return NF_ACCEPT; @@ -471,6 +510,7 @@ err1: static void __exit nf_nat_sip_fini(void) { rcu_assign_pointer(nf_nat_sip_hook, NULL); + rcu_assign_pointer(nf_nat_sip_seq_adjust_hook, NULL); rcu_assign_pointer(nf_nat_sip_expect_hook, NULL); rcu_assign_pointer(nf_nat_sdp_addr_hook, NULL); rcu_assign_pointer(nf_nat_sdp_port_hook, NULL); @@ -482,12 +522,14 @@ static void __exit nf_nat_sip_fini(void) static int __init nf_nat_sip_init(void) { BUG_ON(nf_nat_sip_hook != NULL); + BUG_ON(nf_nat_sip_seq_adjust_hook != NULL); BUG_ON(nf_nat_sip_expect_hook != NULL); BUG_ON(nf_nat_sdp_addr_hook != NULL); BUG_ON(nf_nat_sdp_port_hook != NULL); BUG_ON(nf_nat_sdp_session_hook != NULL); BUG_ON(nf_nat_sdp_media_hook != NULL); rcu_assign_pointer(nf_nat_sip_hook, ip_nat_sip); + rcu_assign_pointer(nf_nat_sip_seq_adjust_hook, ip_nat_sip_seq_adjust); rcu_assign_pointer(nf_nat_sip_expect_hook, ip_nat_sip_expect); rcu_assign_pointer(nf_nat_sdp_addr_hook, ip_nat_sdp_addr); rcu_assign_pointer(nf_nat_sdp_port_hook, ip_nat_sdp_port); diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c index d9521f6f9ed0..0b9c7ce3d6c5 100644 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c @@ -1038,7 +1038,7 @@ static int snmp_parse_mangle(unsigned char *msg, unsigned int cls, con, tag, vers, pdutype; struct asn1_ctx ctx; struct asn1_octstr comm; - struct snmp_object **obj; + struct snmp_object *obj; if (debug > 1) hex_dump(msg, len); @@ -1148,43 +1148,34 @@ static int snmp_parse_mangle(unsigned char *msg, if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ) return 0; - obj = kmalloc(sizeof(struct snmp_object), GFP_ATOMIC); - if (obj == NULL) { - if (net_ratelimit()) - printk(KERN_WARNING "OOM in bsalg(%d)\n", __LINE__); - return 0; - } - while (!asn1_eoc_decode(&ctx, eoc)) { unsigned int i; - if (!snmp_object_decode(&ctx, obj)) { - if (*obj) { - kfree((*obj)->id); - kfree(*obj); + if (!snmp_object_decode(&ctx, &obj)) { + if (obj) { + kfree(obj->id); + kfree(obj); } - kfree(obj); return 0; } if (debug > 1) { printk(KERN_DEBUG "bsalg: object: "); - for (i = 0; i < (*obj)->id_len; i++) { + for (i = 0; i < obj->id_len; i++) { if (i > 0) printk("."); - printk("%lu", (*obj)->id[i]); + printk("%lu", obj->id[i]); } - printk(": type=%u\n", (*obj)->type); + printk(": type=%u\n", obj->type); } - if ((*obj)->type == SNMP_IPADDR) + if (obj->type == SNMP_IPADDR) mangle_address(ctx.begin, ctx.pointer - 4 , map, check); - kfree((*obj)->id); - kfree(*obj); + kfree(obj->id); + kfree(obj); } - kfree(obj); if (!asn1_eoc_decode(&ctx, eoc)) return 0; diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 8a7e0f52e177..4185099c2943 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -29,6 +29,7 @@ #include <linux/netfilter_ipv6/ip6_tables.h> #include <linux/netfilter/x_tables.h> #include <net/netfilter/nf_log.h> +#include "../../netfilter/xt_repldata.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); @@ -67,6 +68,12 @@ do { \ #define inline #endif +void *ip6t_alloc_initial_table(const struct xt_table *info) +{ + return xt_alloc_initial_table(ip6t, IP6T); +} +EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table); + /* We keep a set of rules for each CPU, so we can avoid write-locking them in the softirq when updating the counters and therefore @@ -201,7 +208,7 @@ ip6t_error(struct sk_buff *skb, const struct xt_target_param *par) /* Performance critical - called for every packet */ static inline bool -do_match(struct ip6t_entry_match *m, const struct sk_buff *skb, +do_match(const struct ip6t_entry_match *m, const struct sk_buff *skb, struct xt_match_param *par) { par->match = m->u.kernel.match; @@ -215,7 +222,7 @@ do_match(struct ip6t_entry_match *m, const struct sk_buff *skb, } static inline struct ip6t_entry * -get_entry(void *base, unsigned int offset) +get_entry(const void *base, unsigned int offset) { return (struct ip6t_entry *)(base + offset); } @@ -229,6 +236,12 @@ static inline bool unconditional(const struct ip6t_ip6 *ipv6) return memcmp(ipv6, &uncond, sizeof(uncond)) == 0; } +static inline const struct ip6t_entry_target * +ip6t_get_target_c(const struct ip6t_entry *e) +{ + return ip6t_get_target((struct ip6t_entry *)e); +} + #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) /* This cries for unification! */ @@ -264,11 +277,11 @@ static struct nf_loginfo trace_loginfo = { /* Mildly perf critical (only if packet tracing is on) */ static inline int -get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e, +get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e, const char *hookname, const char **chainname, const char **comment, unsigned int *rulenum) { - struct ip6t_standard_target *t = (void *)ip6t_get_target(s); + const struct ip6t_standard_target *t = (void *)ip6t_get_target_c(s); if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) { /* Head of user chain: ERROR target with chainname */ @@ -294,15 +307,15 @@ get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e, return 0; } -static void trace_packet(struct sk_buff *skb, +static void trace_packet(const struct sk_buff *skb, unsigned int hook, const struct net_device *in, const struct net_device *out, const char *tablename, - struct xt_table_info *private, - struct ip6t_entry *e) + const struct xt_table_info *private, + const struct ip6t_entry *e) { - void *table_base; + const void *table_base; const struct ip6t_entry *root; const char *hookname, *chainname, *comment; unsigned int rulenum = 0; @@ -345,9 +358,9 @@ ip6t_do_table(struct sk_buff *skb, /* Initializing verdict to NF_DROP keeps gcc happy. */ unsigned int verdict = NF_DROP; const char *indev, *outdev; - void *table_base; + const void *table_base; struct ip6t_entry *e, *back; - struct xt_table_info *private; + const struct xt_table_info *private; struct xt_match_param mtpar; struct xt_target_param tgpar; @@ -378,7 +391,7 @@ ip6t_do_table(struct sk_buff *skb, back = get_entry(table_base, private->underflow[hook]); do { - struct ip6t_entry_target *t; + const struct ip6t_entry_target *t; IP_NF_ASSERT(e); IP_NF_ASSERT(back); @@ -393,7 +406,7 @@ ip6t_do_table(struct sk_buff *skb, ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr), 1); - t = ip6t_get_target(e); + t = ip6t_get_target_c(e); IP_NF_ASSERT(t->u.kernel.target); #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ @@ -475,7 +488,7 @@ ip6t_do_table(struct sk_buff *skb, /* Figures out from what hook each rule can be called: returns 0 if there are loops. Puts hook bitmask in comefrom. */ static int -mark_source_chains(struct xt_table_info *newinfo, +mark_source_chains(const struct xt_table_info *newinfo, unsigned int valid_hooks, void *entry0) { unsigned int hook; @@ -493,8 +506,8 @@ mark_source_chains(struct xt_table_info *newinfo, e->counters.pcnt = pos; for (;;) { - struct ip6t_standard_target *t - = (void *)ip6t_get_target(e); + const struct ip6t_standard_target *t + = (void *)ip6t_get_target_c(e); int visited = e->comefrom & (1 << hook); if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { @@ -585,13 +598,14 @@ mark_source_chains(struct xt_table_info *newinfo, } static int -cleanup_match(struct ip6t_entry_match *m, unsigned int *i) +cleanup_match(struct ip6t_entry_match *m, struct net *net, unsigned int *i) { struct xt_mtdtor_param par; if (i && (*i)-- == 0) return 1; + par.net = net; par.match = m->u.kernel.match; par.matchinfo = m->data; par.family = NFPROTO_IPV6; @@ -602,9 +616,9 @@ cleanup_match(struct ip6t_entry_match *m, unsigned int *i) } static int -check_entry(struct ip6t_entry *e, const char *name) +check_entry(const struct ip6t_entry *e, const char *name) { - struct ip6t_entry_target *t; + const struct ip6t_entry_target *t; if (!ip6_checkentry(&e->ipv6)) { duprintf("ip_tables: ip check failed %p %s.\n", e, name); @@ -615,7 +629,7 @@ check_entry(struct ip6t_entry *e, const char *name) e->next_offset) return -EINVAL; - t = ip6t_get_target(e); + t = ip6t_get_target_c(e); if (e->target_offset + t->u.target_size > e->next_offset) return -EINVAL; @@ -668,10 +682,11 @@ err: return ret; } -static int check_target(struct ip6t_entry *e, const char *name) +static int check_target(struct ip6t_entry *e, struct net *net, const char *name) { struct ip6t_entry_target *t = ip6t_get_target(e); struct xt_tgchk_param par = { + .net = net, .table = name, .entryinfo = e, .target = t->u.kernel.target, @@ -693,8 +708,8 @@ static int check_target(struct ip6t_entry *e, const char *name) } static int -find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size, - unsigned int *i) +find_check_entry(struct ip6t_entry *e, struct net *net, const char *name, + unsigned int size, unsigned int *i) { struct ip6t_entry_target *t; struct xt_target *target; @@ -707,6 +722,7 @@ find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size, return ret; j = 0; + mtpar.net = net; mtpar.table = name; mtpar.entryinfo = &e->ipv6; mtpar.hook_mask = e->comefrom; @@ -727,7 +743,7 @@ find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size, } t->u.kernel.target = target; - ret = check_target(e, name); + ret = check_target(e, net, name); if (ret) goto err; @@ -736,18 +752,18 @@ find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size, err: module_put(t->u.kernel.target->me); cleanup_matches: - IP6T_MATCH_ITERATE(e, cleanup_match, &j); + IP6T_MATCH_ITERATE(e, cleanup_match, net, &j); return ret; } -static bool check_underflow(struct ip6t_entry *e) +static bool check_underflow(const struct ip6t_entry *e) { const struct ip6t_entry_target *t; unsigned int verdict; if (!unconditional(&e->ipv6)) return false; - t = ip6t_get_target(e); + t = ip6t_get_target_c(e); if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) return false; verdict = ((struct ip6t_standard_target *)t)->verdict; @@ -758,8 +774,8 @@ static bool check_underflow(struct ip6t_entry *e) static int check_entry_size_and_hooks(struct ip6t_entry *e, struct xt_table_info *newinfo, - unsigned char *base, - unsigned char *limit, + const unsigned char *base, + const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, unsigned int valid_hooks, @@ -806,7 +822,7 @@ check_entry_size_and_hooks(struct ip6t_entry *e, } static int -cleanup_entry(struct ip6t_entry *e, unsigned int *i) +cleanup_entry(struct ip6t_entry *e, struct net *net, unsigned int *i) { struct xt_tgdtor_param par; struct ip6t_entry_target *t; @@ -815,9 +831,10 @@ cleanup_entry(struct ip6t_entry *e, unsigned int *i) return 1; /* Cleanup all matches */ - IP6T_MATCH_ITERATE(e, cleanup_match, NULL); + IP6T_MATCH_ITERATE(e, cleanup_match, net, NULL); t = ip6t_get_target(e); + par.net = net; par.target = t->u.kernel.target; par.targinfo = t->data; par.family = NFPROTO_IPV6; @@ -830,7 +847,8 @@ cleanup_entry(struct ip6t_entry *e, unsigned int *i) /* Checks and translates the user-supplied table segment (held in newinfo) */ static int -translate_table(const char *name, +translate_table(struct net *net, + const char *name, unsigned int valid_hooks, struct xt_table_info *newinfo, void *entry0, @@ -892,11 +910,11 @@ translate_table(const char *name, /* Finally, each sanity check must pass */ i = 0; ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size, - find_check_entry, name, size, &i); + find_check_entry, net, name, size, &i); if (ret != 0) { IP6T_ENTRY_ITERATE(entry0, newinfo->size, - cleanup_entry, &i); + cleanup_entry, net, &i); return ret; } @@ -972,11 +990,11 @@ get_counters(const struct xt_table_info *t, local_bh_enable(); } -static struct xt_counters *alloc_counters(struct xt_table *table) +static struct xt_counters *alloc_counters(const struct xt_table *table) { unsigned int countersize; struct xt_counters *counters; - struct xt_table_info *private = table->private; + const struct xt_table_info *private = table->private; /* We need atomic snapshot of counters: rest doesn't change (other than comefrom, which userspace doesn't care @@ -994,11 +1012,11 @@ static struct xt_counters *alloc_counters(struct xt_table *table) static int copy_entries_to_user(unsigned int total_size, - struct xt_table *table, + const struct xt_table *table, void __user *userptr) { unsigned int off, num; - struct ip6t_entry *e; + const struct ip6t_entry *e; struct xt_counters *counters; const struct xt_table_info *private = table->private; int ret = 0; @@ -1050,7 +1068,7 @@ copy_entries_to_user(unsigned int total_size, } } - t = ip6t_get_target(e); + t = ip6t_get_target_c(e); if (copy_to_user(userptr + off + e->target_offset + offsetof(struct ip6t_entry_target, u.user.name), @@ -1067,7 +1085,7 @@ copy_entries_to_user(unsigned int total_size, } #ifdef CONFIG_COMPAT -static void compat_standard_from_user(void *dst, void *src) +static void compat_standard_from_user(void *dst, const void *src) { int v = *(compat_int_t *)src; @@ -1076,7 +1094,7 @@ static void compat_standard_from_user(void *dst, void *src) memcpy(dst, &v, sizeof(v)); } -static int compat_standard_to_user(void __user *dst, void *src) +static int compat_standard_to_user(void __user *dst, const void *src) { compat_int_t cv = *(int *)src; @@ -1086,24 +1104,24 @@ static int compat_standard_to_user(void __user *dst, void *src) } static inline int -compat_calc_match(struct ip6t_entry_match *m, int *size) +compat_calc_match(const struct ip6t_entry_match *m, int *size) { *size += xt_compat_match_offset(m->u.kernel.match); return 0; } -static int compat_calc_entry(struct ip6t_entry *e, +static int compat_calc_entry(const struct ip6t_entry *e, const struct xt_table_info *info, - void *base, struct xt_table_info *newinfo) + const void *base, struct xt_table_info *newinfo) { - struct ip6t_entry_target *t; + const struct ip6t_entry_target *t; unsigned int entry_offset; int off, i, ret; off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); entry_offset = (void *)e - base; IP6T_MATCH_ITERATE(e, compat_calc_match, &off); - t = ip6t_get_target(e); + t = ip6t_get_target_c(e); off += xt_compat_target_offset(t->u.kernel.target); newinfo->size -= off; ret = xt_compat_add_offset(AF_INET6, entry_offset, off); @@ -1139,7 +1157,8 @@ static int compat_table_info(const struct xt_table_info *info, } #endif -static int get_info(struct net *net, void __user *user, int *len, int compat) +static int get_info(struct net *net, void __user *user, + const int *len, int compat) { char name[IP6T_TABLE_MAXNAMELEN]; struct xt_table *t; @@ -1199,7 +1218,8 @@ static int get_info(struct net *net, void __user *user, int *len, int compat) } static int -get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len) +get_entries(struct net *net, struct ip6t_get_entries __user *uptr, + const int *len) { int ret; struct ip6t_get_entries get; @@ -1291,7 +1311,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, /* Decrease module usage counts and free resource */ loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, - NULL); + net, NULL); xt_free_table_info(oldinfo); if (copy_to_user(counters_ptr, counters, sizeof(struct xt_counters) * num_counters) != 0) @@ -1310,7 +1330,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, } static int -do_replace(struct net *net, void __user *user, unsigned int len) +do_replace(struct net *net, const void __user *user, unsigned int len) { int ret; struct ip6t_replace tmp; @@ -1336,7 +1356,7 @@ do_replace(struct net *net, void __user *user, unsigned int len) goto free_newinfo; } - ret = translate_table(tmp.name, tmp.valid_hooks, + ret = translate_table(net, tmp.name, tmp.valid_hooks, newinfo, loc_cpu_entry, tmp.size, tmp.num_entries, tmp.hook_entry, tmp.underflow); if (ret != 0) @@ -1351,7 +1371,7 @@ do_replace(struct net *net, void __user *user, unsigned int len) return 0; free_newinfo_untrans: - IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL); + IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, net, NULL); free_newinfo: xt_free_table_info(newinfo); return ret; @@ -1371,7 +1391,7 @@ add_counter_to_entry(struct ip6t_entry *e, } static int -do_add_counters(struct net *net, void __user *user, unsigned int len, +do_add_counters(struct net *net, const void __user *user, unsigned int len, int compat) { unsigned int i, curcpu; @@ -1570,10 +1590,10 @@ static int check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, struct xt_table_info *newinfo, unsigned int *size, - unsigned char *base, - unsigned char *limit, - unsigned int *hook_entries, - unsigned int *underflows, + const unsigned char *base, + const unsigned char *limit, + const unsigned int *hook_entries, + const unsigned int *underflows, unsigned int *i, const char *name) { @@ -1690,14 +1710,15 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr, return ret; } -static int compat_check_entry(struct ip6t_entry *e, const char *name, - unsigned int *i) +static int compat_check_entry(struct ip6t_entry *e, struct net *net, + const char *name, unsigned int *i) { unsigned int j; int ret; struct xt_mtchk_param mtpar; j = 0; + mtpar.net = net; mtpar.table = name; mtpar.entryinfo = &e->ipv6; mtpar.hook_mask = e->comefrom; @@ -1706,7 +1727,7 @@ static int compat_check_entry(struct ip6t_entry *e, const char *name, if (ret) goto cleanup_matches; - ret = check_target(e, name); + ret = check_target(e, net, name); if (ret) goto cleanup_matches; @@ -1714,12 +1735,13 @@ static int compat_check_entry(struct ip6t_entry *e, const char *name, return 0; cleanup_matches: - IP6T_MATCH_ITERATE(e, cleanup_match, &j); + IP6T_MATCH_ITERATE(e, cleanup_match, net, &j); return ret; } static int -translate_compat_table(const char *name, +translate_compat_table(struct net *net, + const char *name, unsigned int valid_hooks, struct xt_table_info **pinfo, void **pentry0, @@ -1808,12 +1830,12 @@ translate_compat_table(const char *name, i = 0; ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry, - name, &i); + net, name, &i); if (ret) { j -= i; COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i, compat_release_entry, &j); - IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i); + IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, net, &i); xt_free_table_info(newinfo); return ret; } @@ -1868,7 +1890,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) goto free_newinfo; } - ret = translate_compat_table(tmp.name, tmp.valid_hooks, + ret = translate_compat_table(net, tmp.name, tmp.valid_hooks, &newinfo, &loc_cpu_entry, tmp.size, tmp.num_entries, tmp.hook_entry, tmp.underflow); @@ -1884,7 +1906,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) return 0; free_newinfo_untrans: - IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL); + IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, net, NULL); free_newinfo: xt_free_table_info(newinfo); return ret; @@ -2121,7 +2143,7 @@ struct xt_table *ip6t_register_table(struct net *net, loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; memcpy(loc_cpu_entry, repl->entries, repl->size); - ret = translate_table(table->name, table->valid_hooks, + ret = translate_table(net, table->name, table->valid_hooks, newinfo, loc_cpu_entry, repl->size, repl->num_entries, repl->hook_entry, @@ -2142,7 +2164,7 @@ out: return ERR_PTR(ret); } -void ip6t_unregister_table(struct xt_table *table) +void ip6t_unregister_table(struct net *net, struct xt_table *table) { struct xt_table_info *private; void *loc_cpu_entry; @@ -2152,7 +2174,7 @@ void ip6t_unregister_table(struct xt_table *table) /* Decrease module usage counts and free resources */ loc_cpu_entry = private->entries[raw_smp_processor_id()]; - IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL); + IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, net, NULL); if (private->number > private->initial_entries) module_put(table_owner); xt_free_table_info(private); diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c index ad378efd0eb8..36b72cafc227 100644 --- a/net/ipv6/netfilter/ip6table_filter.c +++ b/net/ipv6/netfilter/ip6table_filter.c @@ -21,99 +21,26 @@ MODULE_DESCRIPTION("ip6tables filter table"); (1 << NF_INET_FORWARD) | \ (1 << NF_INET_LOCAL_OUT)) -static struct -{ - struct ip6t_replace repl; - struct ip6t_standard entries[3]; - struct ip6t_error term; -} initial_table __net_initdata = { - .repl = { - .name = "filter", - .valid_hooks = FILTER_VALID_HOOKS, - .num_entries = 4, - .size = sizeof(struct ip6t_standard) * 3 + sizeof(struct ip6t_error), - .hook_entry = { - [NF_INET_LOCAL_IN] = 0, - [NF_INET_FORWARD] = sizeof(struct ip6t_standard), - [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2 - }, - .underflow = { - [NF_INET_LOCAL_IN] = 0, - [NF_INET_FORWARD] = sizeof(struct ip6t_standard), - [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2 - }, - }, - .entries = { - IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */ - IP6T_STANDARD_INIT(NF_ACCEPT), /* FORWARD */ - IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */ - }, - .term = IP6T_ERROR_INIT, /* ERROR */ -}; - static const struct xt_table packet_filter = { .name = "filter", .valid_hooks = FILTER_VALID_HOOKS, .me = THIS_MODULE, .af = NFPROTO_IPV6, + .priority = NF_IP6_PRI_FILTER, }; /* The work comes in here from netfilter.c. */ static unsigned int -ip6t_in_hook(unsigned int hook, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) -{ - return ip6t_do_table(skb, hook, in, out, - dev_net(in)->ipv6.ip6table_filter); -} - -static unsigned int -ip6t_local_out_hook(unsigned int hook, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +ip6table_filter_hook(unsigned int hook, struct sk_buff *skb, + const struct net_device *in, const struct net_device *out, + int (*okfn)(struct sk_buff *)) { -#if 0 - /* root is playing with raw sockets. */ - if (skb->len < sizeof(struct iphdr) || - ip_hdrlen(skb) < sizeof(struct iphdr)) { - if (net_ratelimit()) - printk("ip6t_hook: happy cracking.\n"); - return NF_ACCEPT; - } -#endif + const struct net *net = dev_net((in != NULL) ? in : out); - return ip6t_do_table(skb, hook, in, out, - dev_net(out)->ipv6.ip6table_filter); + return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_filter); } -static struct nf_hook_ops ip6t_ops[] __read_mostly = { - { - .hook = ip6t_in_hook, - .owner = THIS_MODULE, - .pf = NFPROTO_IPV6, - .hooknum = NF_INET_LOCAL_IN, - .priority = NF_IP6_PRI_FILTER, - }, - { - .hook = ip6t_in_hook, - .owner = THIS_MODULE, - .pf = NFPROTO_IPV6, - .hooknum = NF_INET_FORWARD, - .priority = NF_IP6_PRI_FILTER, - }, - { - .hook = ip6t_local_out_hook, - .owner = THIS_MODULE, - .pf = NFPROTO_IPV6, - .hooknum = NF_INET_LOCAL_OUT, - .priority = NF_IP6_PRI_FILTER, - }, -}; +static struct nf_hook_ops *filter_ops __read_mostly; /* Default to forward because I got too much mail already. */ static int forward = NF_ACCEPT; @@ -121,9 +48,18 @@ module_param(forward, bool, 0000); static int __net_init ip6table_filter_net_init(struct net *net) { - /* Register table */ + struct ip6t_replace *repl; + + repl = ip6t_alloc_initial_table(&packet_filter); + if (repl == NULL) + return -ENOMEM; + /* Entry 1 is the FORWARD hook */ + ((struct ip6t_standard *)repl->entries)[1].target.verdict = + -forward - 1; + net->ipv6.ip6table_filter = - ip6t_register_table(net, &packet_filter, &initial_table.repl); + ip6t_register_table(net, &packet_filter, repl); + kfree(repl); if (IS_ERR(net->ipv6.ip6table_filter)) return PTR_ERR(net->ipv6.ip6table_filter); return 0; @@ -131,7 +67,7 @@ static int __net_init ip6table_filter_net_init(struct net *net) static void __net_exit ip6table_filter_net_exit(struct net *net) { - ip6t_unregister_table(net->ipv6.ip6table_filter); + ip6t_unregister_table(net, net->ipv6.ip6table_filter); } static struct pernet_operations ip6table_filter_net_ops = { @@ -148,17 +84,16 @@ static int __init ip6table_filter_init(void) return -EINVAL; } - /* Entry 1 is the FORWARD hook */ - initial_table.entries[1].target.verdict = -forward - 1; - ret = register_pernet_subsys(&ip6table_filter_net_ops); if (ret < 0) return ret; /* Register hooks */ - ret = nf_register_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); - if (ret < 0) + filter_ops = xt_hook_link(&packet_filter, ip6table_filter_hook); + if (IS_ERR(filter_ops)) { + ret = PTR_ERR(filter_ops); goto cleanup_table; + } return ret; @@ -169,7 +104,7 @@ static int __init ip6table_filter_init(void) static void __exit ip6table_filter_fini(void) { - nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); + xt_hook_unlink(&packet_filter, filter_ops); unregister_pernet_subsys(&ip6table_filter_net_ops); } diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c index a929c19d30e3..7844e557c0ec 100644 --- a/net/ipv6/netfilter/ip6table_mangle.c +++ b/net/ipv6/netfilter/ip6table_mangle.c @@ -21,80 +21,17 @@ MODULE_DESCRIPTION("ip6tables mangle table"); (1 << NF_INET_LOCAL_OUT) | \ (1 << NF_INET_POST_ROUTING)) -static const struct -{ - struct ip6t_replace repl; - struct ip6t_standard entries[5]; - struct ip6t_error term; -} initial_table __net_initdata = { - .repl = { - .name = "mangle", - .valid_hooks = MANGLE_VALID_HOOKS, - .num_entries = 6, - .size = sizeof(struct ip6t_standard) * 5 + sizeof(struct ip6t_error), - .hook_entry = { - [NF_INET_PRE_ROUTING] = 0, - [NF_INET_LOCAL_IN] = sizeof(struct ip6t_standard), - [NF_INET_FORWARD] = sizeof(struct ip6t_standard) * 2, - [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 3, - [NF_INET_POST_ROUTING] = sizeof(struct ip6t_standard) * 4, - }, - .underflow = { - [NF_INET_PRE_ROUTING] = 0, - [NF_INET_LOCAL_IN] = sizeof(struct ip6t_standard), - [NF_INET_FORWARD] = sizeof(struct ip6t_standard) * 2, - [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 3, - [NF_INET_POST_ROUTING] = sizeof(struct ip6t_standard) * 4, - }, - }, - .entries = { - IP6T_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */ - IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */ - IP6T_STANDARD_INIT(NF_ACCEPT), /* FORWARD */ - IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */ - IP6T_STANDARD_INIT(NF_ACCEPT), /* POST_ROUTING */ - }, - .term = IP6T_ERROR_INIT, /* ERROR */ -}; - static const struct xt_table packet_mangler = { .name = "mangle", .valid_hooks = MANGLE_VALID_HOOKS, .me = THIS_MODULE, .af = NFPROTO_IPV6, + .priority = NF_IP6_PRI_MANGLE, }; -/* The work comes in here from netfilter.c. */ -static unsigned int -ip6t_in_hook(unsigned int hook, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) -{ - return ip6t_do_table(skb, hook, in, out, - dev_net(in)->ipv6.ip6table_mangle); -} - -static unsigned int -ip6t_post_routing_hook(unsigned int hook, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) -{ - return ip6t_do_table(skb, hook, in, out, - dev_net(out)->ipv6.ip6table_mangle); -} - static unsigned int -ip6t_local_out_hook(unsigned int hook, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out) { - unsigned int ret; struct in6_addr saddr, daddr; u_int8_t hop_limit; @@ -119,7 +56,7 @@ ip6t_local_out_hook(unsigned int hook, /* flowlabel and prio (includes version, which shouldn't change either */ flowlabel = *((u_int32_t *)ipv6_hdr(skb)); - ret = ip6t_do_table(skb, hook, in, out, + ret = ip6t_do_table(skb, NF_INET_LOCAL_OUT, NULL, out, dev_net(out)->ipv6.ip6table_mangle); if (ret != NF_DROP && ret != NF_STOLEN && @@ -132,49 +69,33 @@ ip6t_local_out_hook(unsigned int hook, return ret; } -static struct nf_hook_ops ip6t_ops[] __read_mostly = { - { - .hook = ip6t_in_hook, - .owner = THIS_MODULE, - .pf = NFPROTO_IPV6, - .hooknum = NF_INET_PRE_ROUTING, - .priority = NF_IP6_PRI_MANGLE, - }, - { - .hook = ip6t_in_hook, - .owner = THIS_MODULE, - .pf = NFPROTO_IPV6, - .hooknum = NF_INET_LOCAL_IN, - .priority = NF_IP6_PRI_MANGLE, - }, - { - .hook = ip6t_in_hook, - .owner = THIS_MODULE, - .pf = NFPROTO_IPV6, - .hooknum = NF_INET_FORWARD, - .priority = NF_IP6_PRI_MANGLE, - }, - { - .hook = ip6t_local_out_hook, - .owner = THIS_MODULE, - .pf = NFPROTO_IPV6, - .hooknum = NF_INET_LOCAL_OUT, - .priority = NF_IP6_PRI_MANGLE, - }, - { - .hook = ip6t_post_routing_hook, - .owner = THIS_MODULE, - .pf = NFPROTO_IPV6, - .hooknum = NF_INET_POST_ROUTING, - .priority = NF_IP6_PRI_MANGLE, - }, -}; +/* The work comes in here from netfilter.c. */ +static unsigned int +ip6table_mangle_hook(unsigned int hook, struct sk_buff *skb, + const struct net_device *in, const struct net_device *out, + int (*okfn)(struct sk_buff *)) +{ + if (hook == NF_INET_LOCAL_OUT) + return ip6t_mangle_out(skb, out); + if (hook == NF_INET_POST_ROUTING) + return ip6t_do_table(skb, hook, in, out, + dev_net(out)->ipv6.ip6table_mangle); + /* INPUT/FORWARD */ + return ip6t_do_table(skb, hook, in, out, + dev_net(in)->ipv6.ip6table_mangle); +} +static struct nf_hook_ops *mangle_ops __read_mostly; static int __net_init ip6table_mangle_net_init(struct net *net) { - /* Register table */ + struct ip6t_replace *repl; + + repl = ip6t_alloc_initial_table(&packet_mangler); + if (repl == NULL) + return -ENOMEM; net->ipv6.ip6table_mangle = - ip6t_register_table(net, &packet_mangler, &initial_table.repl); + ip6t_register_table(net, &packet_mangler, repl); + kfree(repl); if (IS_ERR(net->ipv6.ip6table_mangle)) return PTR_ERR(net->ipv6.ip6table_mangle); return 0; @@ -182,7 +103,7 @@ static int __net_init ip6table_mangle_net_init(struct net *net) static void __net_exit ip6table_mangle_net_exit(struct net *net) { - ip6t_unregister_table(net->ipv6.ip6table_mangle); + ip6t_unregister_table(net, net->ipv6.ip6table_mangle); } static struct pernet_operations ip6table_mangle_net_ops = { @@ -199,9 +120,11 @@ static int __init ip6table_mangle_init(void) return ret; /* Register hooks */ - ret = nf_register_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); - if (ret < 0) + mangle_ops = xt_hook_link(&packet_mangler, ip6table_mangle_hook); + if (IS_ERR(mangle_ops)) { + ret = PTR_ERR(mangle_ops); goto cleanup_table; + } return ret; @@ -212,7 +135,7 @@ static int __init ip6table_mangle_init(void) static void __exit ip6table_mangle_fini(void) { - nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); + xt_hook_unlink(&packet_mangler, mangle_ops); unregister_pernet_subsys(&ip6table_mangle_net_ops); } diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c index ed1a1180f3b3..aef31a29de9e 100644 --- a/net/ipv6/netfilter/ip6table_raw.c +++ b/net/ipv6/netfilter/ip6table_raw.c @@ -8,85 +8,37 @@ #define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT)) -static const struct -{ - struct ip6t_replace repl; - struct ip6t_standard entries[2]; - struct ip6t_error term; -} initial_table __net_initdata = { - .repl = { - .name = "raw", - .valid_hooks = RAW_VALID_HOOKS, - .num_entries = 3, - .size = sizeof(struct ip6t_standard) * 2 + sizeof(struct ip6t_error), - .hook_entry = { - [NF_INET_PRE_ROUTING] = 0, - [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) - }, - .underflow = { - [NF_INET_PRE_ROUTING] = 0, - [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) - }, - }, - .entries = { - IP6T_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */ - IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */ - }, - .term = IP6T_ERROR_INIT, /* ERROR */ -}; - static const struct xt_table packet_raw = { .name = "raw", .valid_hooks = RAW_VALID_HOOKS, .me = THIS_MODULE, .af = NFPROTO_IPV6, + .priority = NF_IP6_PRI_FIRST, }; /* The work comes in here from netfilter.c. */ static unsigned int -ip6t_pre_routing_hook(unsigned int hook, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +ip6table_raw_hook(unsigned int hook, struct sk_buff *skb, + const struct net_device *in, const struct net_device *out, + int (*okfn)(struct sk_buff *)) { - return ip6t_do_table(skb, hook, in, out, - dev_net(in)->ipv6.ip6table_raw); -} + const struct net *net = dev_net((in != NULL) ? in : out); -static unsigned int -ip6t_local_out_hook(unsigned int hook, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) -{ - return ip6t_do_table(skb, hook, in, out, - dev_net(out)->ipv6.ip6table_raw); + return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_raw); } -static struct nf_hook_ops ip6t_ops[] __read_mostly = { - { - .hook = ip6t_pre_routing_hook, - .pf = NFPROTO_IPV6, - .hooknum = NF_INET_PRE_ROUTING, - .priority = NF_IP6_PRI_FIRST, - .owner = THIS_MODULE, - }, - { - .hook = ip6t_local_out_hook, - .pf = NFPROTO_IPV6, - .hooknum = NF_INET_LOCAL_OUT, - .priority = NF_IP6_PRI_FIRST, - .owner = THIS_MODULE, - }, -}; +static struct nf_hook_ops *rawtable_ops __read_mostly; static int __net_init ip6table_raw_net_init(struct net *net) { - /* Register table */ + struct ip6t_replace *repl; + + repl = ip6t_alloc_initial_table(&packet_raw); + if (repl == NULL) + return -ENOMEM; net->ipv6.ip6table_raw = - ip6t_register_table(net, &packet_raw, &initial_table.repl); + ip6t_register_table(net, &packet_raw, repl); + kfree(repl); if (IS_ERR(net->ipv6.ip6table_raw)) return PTR_ERR(net->ipv6.ip6table_raw); return 0; @@ -94,7 +46,7 @@ static int __net_init ip6table_raw_net_init(struct net *net) static void __net_exit ip6table_raw_net_exit(struct net *net) { - ip6t_unregister_table(net->ipv6.ip6table_raw); + ip6t_unregister_table(net, net->ipv6.ip6table_raw); } static struct pernet_operations ip6table_raw_net_ops = { @@ -111,9 +63,11 @@ static int __init ip6table_raw_init(void) return ret; /* Register hooks */ - ret = nf_register_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); - if (ret < 0) + rawtable_ops = xt_hook_link(&packet_raw, ip6table_raw_hook); + if (IS_ERR(rawtable_ops)) { + ret = PTR_ERR(rawtable_ops); goto cleanup_table; + } return ret; @@ -124,7 +78,7 @@ static int __init ip6table_raw_init(void) static void __exit ip6table_raw_fini(void) { - nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); + xt_hook_unlink(&packet_raw, rawtable_ops); unregister_pernet_subsys(&ip6table_raw_net_ops); } diff --git a/net/ipv6/netfilter/ip6table_security.c b/net/ipv6/netfilter/ip6table_security.c index 41b444c60934..0824d865aa9b 100644 --- a/net/ipv6/netfilter/ip6table_security.c +++ b/net/ipv6/netfilter/ip6table_security.c @@ -26,106 +26,37 @@ MODULE_DESCRIPTION("ip6tables security table, for MAC rules"); (1 << NF_INET_FORWARD) | \ (1 << NF_INET_LOCAL_OUT) -static const struct -{ - struct ip6t_replace repl; - struct ip6t_standard entries[3]; - struct ip6t_error term; -} initial_table __net_initdata = { - .repl = { - .name = "security", - .valid_hooks = SECURITY_VALID_HOOKS, - .num_entries = 4, - .size = sizeof(struct ip6t_standard) * 3 + sizeof(struct ip6t_error), - .hook_entry = { - [NF_INET_LOCAL_IN] = 0, - [NF_INET_FORWARD] = sizeof(struct ip6t_standard), - [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2, - }, - .underflow = { - [NF_INET_LOCAL_IN] = 0, - [NF_INET_FORWARD] = sizeof(struct ip6t_standard), - [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2, - }, - }, - .entries = { - IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */ - IP6T_STANDARD_INIT(NF_ACCEPT), /* FORWARD */ - IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */ - }, - .term = IP6T_ERROR_INIT, /* ERROR */ -}; - static const struct xt_table security_table = { .name = "security", .valid_hooks = SECURITY_VALID_HOOKS, .me = THIS_MODULE, .af = NFPROTO_IPV6, + .priority = NF_IP6_PRI_SECURITY, }; static unsigned int -ip6t_local_in_hook(unsigned int hook, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) -{ - return ip6t_do_table(skb, hook, in, out, - dev_net(in)->ipv6.ip6table_security); -} - -static unsigned int -ip6t_forward_hook(unsigned int hook, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +ip6table_security_hook(unsigned int hook, struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) { - return ip6t_do_table(skb, hook, in, out, - dev_net(in)->ipv6.ip6table_security); -} + const struct net *net = dev_net((in != NULL) ? in : out); -static unsigned int -ip6t_local_out_hook(unsigned int hook, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) -{ - /* TBD: handle short packets via raw socket */ - return ip6t_do_table(skb, hook, in, out, - dev_net(out)->ipv6.ip6table_security); + return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_security); } -static struct nf_hook_ops ip6t_ops[] __read_mostly = { - { - .hook = ip6t_local_in_hook, - .owner = THIS_MODULE, - .pf = NFPROTO_IPV6, - .hooknum = NF_INET_LOCAL_IN, - .priority = NF_IP6_PRI_SECURITY, - }, - { - .hook = ip6t_forward_hook, - .owner = THIS_MODULE, - .pf = NFPROTO_IPV6, - .hooknum = NF_INET_FORWARD, - .priority = NF_IP6_PRI_SECURITY, - }, - { - .hook = ip6t_local_out_hook, - .owner = THIS_MODULE, - .pf = NFPROTO_IPV6, - .hooknum = NF_INET_LOCAL_OUT, - .priority = NF_IP6_PRI_SECURITY, - }, -}; +static struct nf_hook_ops *sectbl_ops __read_mostly; static int __net_init ip6table_security_net_init(struct net *net) { - net->ipv6.ip6table_security = - ip6t_register_table(net, &security_table, &initial_table.repl); + struct ip6t_replace *repl; + repl = ip6t_alloc_initial_table(&security_table); + if (repl == NULL) + return -ENOMEM; + net->ipv6.ip6table_security = + ip6t_register_table(net, &security_table, repl); + kfree(repl); if (IS_ERR(net->ipv6.ip6table_security)) return PTR_ERR(net->ipv6.ip6table_security); @@ -134,7 +65,7 @@ static int __net_init ip6table_security_net_init(struct net *net) static void __net_exit ip6table_security_net_exit(struct net *net) { - ip6t_unregister_table(net->ipv6.ip6table_security); + ip6t_unregister_table(net, net->ipv6.ip6table_security); } static struct pernet_operations ip6table_security_net_ops = { @@ -150,9 +81,11 @@ static int __init ip6table_security_init(void) if (ret < 0) return ret; - ret = nf_register_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); - if (ret < 0) + sectbl_ops = xt_hook_link(&security_table, ip6table_security_hook); + if (IS_ERR(sectbl_ops)) { + ret = PTR_ERR(sectbl_ops); goto cleanup_table; + } return ret; @@ -163,7 +96,7 @@ cleanup_table: static void __exit ip6table_security_fini(void) { - nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); + xt_hook_unlink(&security_table, sectbl_ops); unregister_pernet_subsys(&ip6table_security_net_ops); } diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index 0956ebabbff2..996c3f41fecd 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c @@ -27,6 +27,7 @@ #include <net/netfilter/nf_conntrack_l4proto.h> #include <net/netfilter/nf_conntrack_l3proto.h> #include <net/netfilter/nf_conntrack_core.h> +#include <net/netfilter/nf_conntrack_zones.h> #include <net/netfilter/ipv6/nf_conntrack_ipv6.h> #include <net/netfilter/nf_log.h> @@ -191,15 +192,20 @@ out: static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum, struct sk_buff *skb) { + u16 zone = NF_CT_DEFAULT_ZONE; + + if (skb->nfct) + zone = nf_ct_zone((struct nf_conn *)skb->nfct); + #ifdef CONFIG_BRIDGE_NETFILTER if (skb->nf_bridge && skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING) - return IP6_DEFRAG_CONNTRACK_BRIDGE_IN; + return IP6_DEFRAG_CONNTRACK_BRIDGE_IN + zone; #endif if (hooknum == NF_INET_PRE_ROUTING) - return IP6_DEFRAG_CONNTRACK_IN; + return IP6_DEFRAG_CONNTRACK_IN + zone; else - return IP6_DEFRAG_CONNTRACK_OUT; + return IP6_DEFRAG_CONNTRACK_OUT + zone; } @@ -212,7 +218,7 @@ static unsigned int ipv6_defrag(unsigned int hooknum, struct sk_buff *reasm; /* Previously seen (loopback)? */ - if (skb->nfct) + if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct)) return NF_ACCEPT; reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb)); diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c index c7b8bd1d7984..9be81776415e 100644 --- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c +++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c @@ -23,6 +23,7 @@ #include <net/netfilter/nf_conntrack_tuple.h> #include <net/netfilter/nf_conntrack_l4proto.h> #include <net/netfilter/nf_conntrack_core.h> +#include <net/netfilter/nf_conntrack_zones.h> #include <net/netfilter/ipv6/nf_conntrack_icmpv6.h> #include <net/netfilter/nf_log.h> @@ -128,7 +129,7 @@ static bool icmpv6_new(struct nf_conn *ct, const struct sk_buff *skb, } static int -icmpv6_error_message(struct net *net, +icmpv6_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, unsigned int icmp6off, enum ip_conntrack_info *ctinfo, @@ -137,6 +138,7 @@ icmpv6_error_message(struct net *net, struct nf_conntrack_tuple intuple, origtuple; const struct nf_conntrack_tuple_hash *h; const struct nf_conntrack_l4proto *inproto; + u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE; NF_CT_ASSERT(skb->nfct == NULL); @@ -163,7 +165,7 @@ icmpv6_error_message(struct net *net, *ctinfo = IP_CT_RELATED; - h = nf_conntrack_find_get(net, &intuple); + h = nf_conntrack_find_get(net, zone, &intuple); if (!h) { pr_debug("icmpv6_error: no match\n"); return -NF_ACCEPT; @@ -179,7 +181,8 @@ icmpv6_error_message(struct net *net, } static int -icmpv6_error(struct net *net, struct sk_buff *skb, unsigned int dataoff, +icmpv6_error(struct net *net, struct nf_conn *tmpl, + struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum) { const struct icmp6hdr *icmp6h; @@ -215,7 +218,7 @@ icmpv6_error(struct net *net, struct sk_buff *skb, unsigned int dataoff, if (icmp6h->icmp6_type >= 128) return NF_ACCEPT; - return icmpv6_error_message(net, skb, dataoff, ctinfo, hooknum); + return icmpv6_error_message(net, tmpl, skb, dataoff, ctinfo, hooknum); } #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 624a54832a7c..ad1fcda6898b 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -45,9 +45,6 @@ #include <linux/kernel.h> #include <linux/module.h> -#define NF_CT_FRAG6_HIGH_THRESH 262144 /* == 256*1024 */ -#define NF_CT_FRAG6_LOW_THRESH 196608 /* == 192*1024 */ -#define NF_CT_FRAG6_TIMEOUT IPV6_FRAG_TIMEOUT struct nf_ct_frag6_skb_cb { @@ -670,8 +667,8 @@ int nf_ct_frag6_init(void) nf_frags.frag_expire = nf_ct_frag6_expire; nf_frags.secret_interval = 10 * 60 * HZ; nf_init_frags.timeout = IPV6_FRAG_TIMEOUT; - nf_init_frags.high_thresh = 256 * 1024; - nf_init_frags.low_thresh = 192 * 1024; + nf_init_frags.high_thresh = IPV6_FRAG_HIGH_THRESH; + nf_init_frags.low_thresh = IPV6_FRAG_LOW_THRESH; inet_frags_init_net(&nf_init_frags); inet_frags_init(&nf_frags); diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index fe27eb4264d6..b2847ed6a7d9 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -742,8 +742,8 @@ static inline void ip6_frags_sysctl_unregister(void) static int __net_init ipv6_frags_init_net(struct net *net) { - net->ipv6.frags.high_thresh = 256 * 1024; - net->ipv6.frags.low_thresh = 192 * 1024; + net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH; + net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH; net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT; inet_frags_init_net(&net->ipv6.frags); diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 634d14affc8d..18d77b5c351a 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -83,6 +83,19 @@ config NF_CONNTRACK_SECMARK If unsure, say 'N'. +config NF_CONNTRACK_ZONES + bool 'Connection tracking zones' + depends on NETFILTER_ADVANCED + depends on NETFILTER_XT_TARGET_CT + help + This option enables support for connection tracking zones. + Normally, each connection needs to have a unique system wide + identity. Connection tracking zones allow to have multiple + connections using the same identity, as long as they are + contained in different zones. + + If unsure, say `N'. + config NF_CONNTRACK_EVENTS bool "Connection tracking events" depends on NETFILTER_ADVANCED @@ -341,6 +354,18 @@ config NETFILTER_XT_TARGET_CONNSECMARK To compile it as a module, choose M here. If unsure, say N. +config NETFILTER_XT_TARGET_CT + tristate '"CT" target support' + depends on NF_CONNTRACK + depends on IP_NF_RAW || IP6_NF_RAW + depends on NETFILTER_ADVANCED + help + This options adds a `CT' target, which allows to specify initial + connection tracking parameters like events to be delivered and + the helper to be used. + + To compile it as a module, choose M here. If unsure, say N. + config NETFILTER_XT_TARGET_DSCP tristate '"DSCP" and "TOS" target support' depends on IP_NF_MANGLE || IP6_NF_MANGLE diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index 49f62ee4e9ff..f873644f02f6 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -44,6 +44,7 @@ obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o +obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig index f2d76238b9b5..817a8898203b 100644 --- a/net/netfilter/ipvs/Kconfig +++ b/net/netfilter/ipvs/Kconfig @@ -68,6 +68,10 @@ config IP_VS_TAB_BITS each hash entry uses 8 bytes, so you can estimate how much memory is needed for your box. + You can overwrite this number setting conn_tab_bits module parameter + or by appending ip_vs.conn_tab_bits=? to the kernel command line + if IP VS was compiled built-in. + comment "IPVS transport protocol load balancing support" config IP_VS_PROTO_TCP diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index 27c30cf933da..60bb41a8d8d4 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -40,6 +40,21 @@ #include <net/ip_vs.h> +#ifndef CONFIG_IP_VS_TAB_BITS +#define CONFIG_IP_VS_TAB_BITS 12 +#endif + +/* + * Connection hash size. Default is what was selected at compile time. +*/ +int ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS; +module_param_named(conn_tab_bits, ip_vs_conn_tab_bits, int, 0444); +MODULE_PARM_DESC(conn_tab_bits, "Set connections' hash size"); + +/* size and mask values */ +int ip_vs_conn_tab_size; +int ip_vs_conn_tab_mask; + /* * Connection hash table: for input and output packets lookups of IPVS */ @@ -125,11 +140,11 @@ static unsigned int ip_vs_conn_hashkey(int af, unsigned proto, if (af == AF_INET6) return jhash_3words(jhash(addr, 16, ip_vs_conn_rnd), (__force u32)port, proto, ip_vs_conn_rnd) - & IP_VS_CONN_TAB_MASK; + & ip_vs_conn_tab_mask; #endif return jhash_3words((__force u32)addr->ip, (__force u32)port, proto, ip_vs_conn_rnd) - & IP_VS_CONN_TAB_MASK; + & ip_vs_conn_tab_mask; } @@ -760,7 +775,7 @@ static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos) int idx; struct ip_vs_conn *cp; - for(idx = 0; idx < IP_VS_CONN_TAB_SIZE; idx++) { + for (idx = 0; idx < ip_vs_conn_tab_size; idx++) { ct_read_lock_bh(idx); list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { if (pos-- == 0) { @@ -797,7 +812,7 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos) idx = l - ip_vs_conn_tab; ct_read_unlock_bh(idx); - while (++idx < IP_VS_CONN_TAB_SIZE) { + while (++idx < ip_vs_conn_tab_size) { ct_read_lock_bh(idx); list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { seq->private = &ip_vs_conn_tab[idx]; @@ -976,8 +991,8 @@ void ip_vs_random_dropentry(void) /* * Randomly scan 1/32 of the whole table every second */ - for (idx = 0; idx < (IP_VS_CONN_TAB_SIZE>>5); idx++) { - unsigned hash = net_random() & IP_VS_CONN_TAB_MASK; + for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) { + unsigned hash = net_random() & ip_vs_conn_tab_mask; /* * Lock is actually needed in this loop. @@ -1029,7 +1044,7 @@ static void ip_vs_conn_flush(void) struct ip_vs_conn *cp; flush_again: - for (idx=0; idx<IP_VS_CONN_TAB_SIZE; idx++) { + for (idx = 0; idx < ip_vs_conn_tab_size; idx++) { /* * Lock is actually needed in this loop. */ @@ -1060,10 +1075,15 @@ int __init ip_vs_conn_init(void) { int idx; + /* Compute size and mask */ + ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits; + ip_vs_conn_tab_mask = ip_vs_conn_tab_size - 1; + /* * Allocate the connection hash table and initialize its list heads */ - ip_vs_conn_tab = vmalloc(IP_VS_CONN_TAB_SIZE*sizeof(struct list_head)); + ip_vs_conn_tab = vmalloc(ip_vs_conn_tab_size * + sizeof(struct list_head)); if (!ip_vs_conn_tab) return -ENOMEM; @@ -1078,12 +1098,12 @@ int __init ip_vs_conn_init(void) pr_info("Connection hash table configured " "(size=%d, memory=%ldKbytes)\n", - IP_VS_CONN_TAB_SIZE, - (long)(IP_VS_CONN_TAB_SIZE*sizeof(struct list_head))/1024); + ip_vs_conn_tab_size, + (long)(ip_vs_conn_tab_size*sizeof(struct list_head))/1024); IP_VS_DBG(0, "Each connection entry needs %Zd bytes at least\n", sizeof(struct ip_vs_conn)); - for (idx = 0; idx < IP_VS_CONN_TAB_SIZE; idx++) { + for (idx = 0; idx < ip_vs_conn_tab_size; idx++) { INIT_LIST_HEAD(&ip_vs_conn_tab[idx]); } diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index c37ac2d7bec4..00d0b152db39 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -1843,7 +1843,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v) if (v == SEQ_START_TOKEN) { seq_printf(seq, "IP Virtual Server version %d.%d.%d (size=%d)\n", - NVERSION(IP_VS_VERSION_CODE), IP_VS_CONN_TAB_SIZE); + NVERSION(IP_VS_VERSION_CODE), ip_vs_conn_tab_size); seq_puts(seq, "Prot LocalAddress:Port Scheduler Flags\n"); seq_puts(seq, @@ -2386,7 +2386,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) char buf[64]; sprintf(buf, "IP Virtual Server version %d.%d.%d (size=%d)", - NVERSION(IP_VS_VERSION_CODE), IP_VS_CONN_TAB_SIZE); + NVERSION(IP_VS_VERSION_CODE), ip_vs_conn_tab_size); if (copy_to_user(user, buf, strlen(buf)+1) != 0) { ret = -EFAULT; goto out; @@ -2399,7 +2399,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { struct ip_vs_getinfo info; info.version = IP_VS_VERSION_CODE; - info.size = IP_VS_CONN_TAB_SIZE; + info.size = ip_vs_conn_tab_size; info.num_services = ip_vs_num_services; if (copy_to_user(user, &info, sizeof(info)) != 0) ret = -EFAULT; @@ -3243,7 +3243,7 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info) case IPVS_CMD_GET_INFO: NLA_PUT_U32(msg, IPVS_INFO_ATTR_VERSION, IP_VS_VERSION_CODE); NLA_PUT_U32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE, - IP_VS_CONN_TAB_SIZE); + ip_vs_conn_tab_size); break; } diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c index 33e2c799cba7..73f38ea98f25 100644 --- a/net/netfilter/ipvs/ip_vs_ftp.c +++ b/net/netfilter/ipvs/ip_vs_ftp.c @@ -208,7 +208,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, */ from.ip = n_cp->vaddr.ip; port = n_cp->vport; - sprintf(buf, "%d,%d,%d,%d,%d,%d", NIPQUAD(from.ip), + sprintf(buf, "%u,%u,%u,%u,%u,%u", NIPQUAD(from.ip), (ntohs(port)>>8)&255, ntohs(port)&255); buf_len = strlen(buf); diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 4d79e3c1616c..0c9bbe93cc16 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -42,6 +42,7 @@ #include <net/netfilter/nf_conntrack_extend.h> #include <net/netfilter/nf_conntrack_acct.h> #include <net/netfilter/nf_conntrack_ecache.h> +#include <net/netfilter/nf_conntrack_zones.h> #include <net/netfilter/nf_nat.h> #include <net/netfilter/nf_nat_core.h> @@ -68,7 +69,7 @@ static int nf_conntrack_hash_rnd_initted; static unsigned int nf_conntrack_hash_rnd; static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple, - unsigned int size, unsigned int rnd) + u16 zone, unsigned int size, unsigned int rnd) { unsigned int n; u_int32_t h; @@ -79,16 +80,16 @@ static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple, */ n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32); h = jhash2((u32 *)tuple, n, - rnd ^ (((__force __u16)tuple->dst.u.all << 16) | - tuple->dst.protonum)); + zone ^ rnd ^ (((__force __u16)tuple->dst.u.all << 16) | + tuple->dst.protonum)); return ((u64)h * size) >> 32; } -static inline u_int32_t hash_conntrack(const struct net *net, +static inline u_int32_t hash_conntrack(const struct net *net, u16 zone, const struct nf_conntrack_tuple *tuple) { - return __hash_conntrack(tuple, net->ct.htable_size, + return __hash_conntrack(tuple, zone, net->ct.htable_size, nf_conntrack_hash_rnd); } @@ -292,11 +293,12 @@ static void death_by_timeout(unsigned long ul_conntrack) * - Caller must lock nf_conntrack_lock before calling this function */ struct nf_conntrack_tuple_hash * -__nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple) +__nf_conntrack_find(struct net *net, u16 zone, + const struct nf_conntrack_tuple *tuple) { struct nf_conntrack_tuple_hash *h; struct hlist_nulls_node *n; - unsigned int hash = hash_conntrack(net, tuple); + unsigned int hash = hash_conntrack(net, zone, tuple); /* Disable BHs the entire time since we normally need to disable them * at least once for the stats anyway. @@ -304,7 +306,8 @@ __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple) local_bh_disable(); begin: hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { - if (nf_ct_tuple_equal(tuple, &h->tuple)) { + if (nf_ct_tuple_equal(tuple, &h->tuple) && + nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) { NF_CT_STAT_INC(net, found); local_bh_enable(); return h; @@ -326,21 +329,23 @@ EXPORT_SYMBOL_GPL(__nf_conntrack_find); /* Find a connection corresponding to a tuple. */ struct nf_conntrack_tuple_hash * -nf_conntrack_find_get(struct net *net, const struct nf_conntrack_tuple *tuple) +nf_conntrack_find_get(struct net *net, u16 zone, + const struct nf_conntrack_tuple *tuple) { struct nf_conntrack_tuple_hash *h; struct nf_conn *ct; rcu_read_lock(); begin: - h = __nf_conntrack_find(net, tuple); + h = __nf_conntrack_find(net, zone, tuple); if (h) { ct = nf_ct_tuplehash_to_ctrack(h); if (unlikely(nf_ct_is_dying(ct) || !atomic_inc_not_zero(&ct->ct_general.use))) h = NULL; else { - if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple))) { + if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) || + nf_ct_zone(ct) != zone)) { nf_ct_put(ct); goto begin; } @@ -368,9 +373,11 @@ void nf_conntrack_hash_insert(struct nf_conn *ct) { struct net *net = nf_ct_net(ct); unsigned int hash, repl_hash; + u16 zone; - hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); - repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); + zone = nf_ct_zone(ct); + hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); + repl_hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); __nf_conntrack_hash_insert(ct, hash, repl_hash); } @@ -387,6 +394,7 @@ __nf_conntrack_confirm(struct sk_buff *skb) struct hlist_nulls_node *n; enum ip_conntrack_info ctinfo; struct net *net; + u16 zone; ct = nf_ct_get(skb, &ctinfo); net = nf_ct_net(ct); @@ -398,8 +406,9 @@ __nf_conntrack_confirm(struct sk_buff *skb) if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) return NF_ACCEPT; - hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); - repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); + zone = nf_ct_zone(ct); + hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); + repl_hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); /* We're not in hash table, and we refuse to set up related connections for unconfirmed conns. But packet copies and @@ -418,11 +427,13 @@ __nf_conntrack_confirm(struct sk_buff *skb) not in the hash. If there is, we lost race. */ hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode) if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, - &h->tuple)) + &h->tuple) && + zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) goto out; hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode) if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, - &h->tuple)) + &h->tuple) && + zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) goto out; /* Remove from unconfirmed list */ @@ -469,15 +480,19 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, struct net *net = nf_ct_net(ignored_conntrack); struct nf_conntrack_tuple_hash *h; struct hlist_nulls_node *n; - unsigned int hash = hash_conntrack(net, tuple); + struct nf_conn *ct; + u16 zone = nf_ct_zone(ignored_conntrack); + unsigned int hash = hash_conntrack(net, zone, tuple); /* Disable BHs the entire time since we need to disable them at * least once for the stats anyway. */ rcu_read_lock_bh(); hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { - if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack && - nf_ct_tuple_equal(tuple, &h->tuple)) { + ct = nf_ct_tuplehash_to_ctrack(h); + if (ct != ignored_conntrack && + nf_ct_tuple_equal(tuple, &h->tuple) && + nf_ct_zone(ct) == zone) { NF_CT_STAT_INC(net, found); rcu_read_unlock_bh(); return 1; @@ -540,7 +555,7 @@ static noinline int early_drop(struct net *net, unsigned int hash) return dropped; } -struct nf_conn *nf_conntrack_alloc(struct net *net, +struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone, const struct nf_conntrack_tuple *orig, const struct nf_conntrack_tuple *repl, gfp_t gfp) @@ -558,7 +573,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net, if (nf_conntrack_max && unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { - unsigned int hash = hash_conntrack(net, orig); + unsigned int hash = hash_conntrack(net, zone, orig); if (!early_drop(net, hash)) { atomic_dec(&net->ct.count); if (net_ratelimit()) @@ -595,13 +610,28 @@ struct nf_conn *nf_conntrack_alloc(struct net *net, #ifdef CONFIG_NET_NS ct->ct_net = net; #endif - +#ifdef CONFIG_NF_CONNTRACK_ZONES + if (zone) { + struct nf_conntrack_zone *nf_ct_zone; + + nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, GFP_ATOMIC); + if (!nf_ct_zone) + goto out_free; + nf_ct_zone->id = zone; + } +#endif /* * changes to lookup keys must be done before setting refcnt to 1 */ smp_wmb(); atomic_set(&ct->ct_general.use, 1); return ct; + +#ifdef CONFIG_NF_CONNTRACK_ZONES +out_free: + kmem_cache_free(net->ct.nf_conntrack_cachep, ct); + return ERR_PTR(-ENOMEM); +#endif } EXPORT_SYMBOL_GPL(nf_conntrack_alloc); @@ -619,7 +649,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_free); /* Allocate a new conntrack: we return -ENOMEM if classification failed due to stress. Otherwise it really is unclassifiable. */ static struct nf_conntrack_tuple_hash * -init_conntrack(struct net *net, +init_conntrack(struct net *net, struct nf_conn *tmpl, const struct nf_conntrack_tuple *tuple, struct nf_conntrack_l3proto *l3proto, struct nf_conntrack_l4proto *l4proto, @@ -629,14 +659,16 @@ init_conntrack(struct net *net, struct nf_conn *ct; struct nf_conn_help *help; struct nf_conntrack_tuple repl_tuple; + struct nf_conntrack_ecache *ecache; struct nf_conntrack_expect *exp; + u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE; if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) { pr_debug("Can't invert tuple.\n"); return NULL; } - ct = nf_conntrack_alloc(net, tuple, &repl_tuple, GFP_ATOMIC); + ct = nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC); if (IS_ERR(ct)) { pr_debug("Can't allocate conntrack.\n"); return (struct nf_conntrack_tuple_hash *)ct; @@ -649,10 +681,14 @@ init_conntrack(struct net *net, } nf_ct_acct_ext_add(ct, GFP_ATOMIC); - nf_ct_ecache_ext_add(ct, GFP_ATOMIC); + + ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL; + nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0, + ecache ? ecache->expmask : 0, + GFP_ATOMIC); spin_lock_bh(&nf_conntrack_lock); - exp = nf_ct_find_expectation(net, tuple); + exp = nf_ct_find_expectation(net, zone, tuple); if (exp) { pr_debug("conntrack: expectation arrives ct=%p exp=%p\n", ct, exp); @@ -674,7 +710,7 @@ init_conntrack(struct net *net, nf_conntrack_get(&ct->master->ct_general); NF_CT_STAT_INC(net, expect_new); } else { - __nf_ct_try_assign_helper(ct, GFP_ATOMIC); + __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC); NF_CT_STAT_INC(net, new); } @@ -695,7 +731,7 @@ init_conntrack(struct net *net, /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */ static inline struct nf_conn * -resolve_normal_ct(struct net *net, +resolve_normal_ct(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, u_int16_t l3num, @@ -708,6 +744,7 @@ resolve_normal_ct(struct net *net, struct nf_conntrack_tuple tuple; struct nf_conntrack_tuple_hash *h; struct nf_conn *ct; + u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE; if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num, protonum, &tuple, l3proto, @@ -717,9 +754,10 @@ resolve_normal_ct(struct net *net, } /* look for tuple match */ - h = nf_conntrack_find_get(net, &tuple); + h = nf_conntrack_find_get(net, zone, &tuple); if (!h) { - h = init_conntrack(net, &tuple, l3proto, l4proto, skb, dataoff); + h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto, + skb, dataoff); if (!h) return NULL; if (IS_ERR(h)) @@ -756,7 +794,7 @@ unsigned int nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, struct sk_buff *skb) { - struct nf_conn *ct; + struct nf_conn *ct, *tmpl = NULL; enum ip_conntrack_info ctinfo; struct nf_conntrack_l3proto *l3proto; struct nf_conntrack_l4proto *l4proto; @@ -765,10 +803,14 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, int set_reply = 0; int ret; - /* Previously seen (loopback or untracked)? Ignore. */ if (skb->nfct) { - NF_CT_STAT_INC_ATOMIC(net, ignore); - return NF_ACCEPT; + /* Previously seen (loopback or untracked)? Ignore. */ + tmpl = (struct nf_conn *)skb->nfct; + if (!nf_ct_is_template(tmpl)) { + NF_CT_STAT_INC_ATOMIC(net, ignore); + return NF_ACCEPT; + } + skb->nfct = NULL; } /* rcu_read_lock()ed by nf_hook_slow */ @@ -779,7 +821,8 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, pr_debug("not prepared to track yet or error occured\n"); NF_CT_STAT_INC_ATOMIC(net, error); NF_CT_STAT_INC_ATOMIC(net, invalid); - return -ret; + ret = -ret; + goto out; } l4proto = __nf_ct_l4proto_find(pf, protonum); @@ -788,26 +831,30 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, * inverse of the return code tells to the netfilter * core what to do with the packet. */ if (l4proto->error != NULL) { - ret = l4proto->error(net, skb, dataoff, &ctinfo, pf, hooknum); + ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo, + pf, hooknum); if (ret <= 0) { NF_CT_STAT_INC_ATOMIC(net, error); NF_CT_STAT_INC_ATOMIC(net, invalid); - return -ret; + ret = -ret; + goto out; } } - ct = resolve_normal_ct(net, skb, dataoff, pf, protonum, + ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum, l3proto, l4proto, &set_reply, &ctinfo); if (!ct) { /* Not valid part of a connection */ NF_CT_STAT_INC_ATOMIC(net, invalid); - return NF_ACCEPT; + ret = NF_ACCEPT; + goto out; } if (IS_ERR(ct)) { /* Too stressed to deal. */ NF_CT_STAT_INC_ATOMIC(net, drop); - return NF_DROP; + ret = NF_DROP; + goto out; } NF_CT_ASSERT(skb->nfct); @@ -822,11 +869,15 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, NF_CT_STAT_INC_ATOMIC(net, invalid); if (ret == -NF_DROP) NF_CT_STAT_INC_ATOMIC(net, drop); - return -ret; + ret = -ret; + goto out; } if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) - nf_conntrack_event_cache(IPCT_STATUS, ct); + nf_conntrack_event_cache(IPCT_REPLY, ct); +out: + if (tmpl) + nf_ct_put(tmpl); return ret; } @@ -865,7 +916,7 @@ void nf_conntrack_alter_reply(struct nf_conn *ct, return; rcu_read_lock(); - __nf_ct_try_assign_helper(ct, GFP_ATOMIC); + __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC); rcu_read_unlock(); } EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply); @@ -939,6 +990,14 @@ bool __nf_ct_kill_acct(struct nf_conn *ct, } EXPORT_SYMBOL_GPL(__nf_ct_kill_acct); +#ifdef CONFIG_NF_CONNTRACK_ZONES +static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = { + .len = sizeof(struct nf_conntrack_zone), + .align = __alignof__(struct nf_conntrack_zone), + .id = NF_CT_EXT_ZONE, +}; +#endif + #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) #include <linux/netfilter/nfnetlink.h> @@ -1120,6 +1179,9 @@ static void nf_conntrack_cleanup_init_net(void) nf_conntrack_helper_fini(); nf_conntrack_proto_fini(); +#ifdef CONFIG_NF_CONNTRACK_ZONES + nf_ct_extend_unregister(&nf_ct_zone_extend); +#endif } static void nf_conntrack_cleanup_net(struct net *net) @@ -1195,6 +1257,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) unsigned int hashsize, old_size; struct hlist_nulls_head *hash, *old_hash; struct nf_conntrack_tuple_hash *h; + struct nf_conn *ct; if (current->nsproxy->net_ns != &init_net) return -EOPNOTSUPP; @@ -1221,8 +1284,10 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) while (!hlist_nulls_empty(&init_net.ct.hash[i])) { h = hlist_nulls_entry(init_net.ct.hash[i].first, struct nf_conntrack_tuple_hash, hnnode); + ct = nf_ct_tuplehash_to_ctrack(h); hlist_nulls_del_rcu(&h->hnnode); - bucket = __hash_conntrack(&h->tuple, hashsize, + bucket = __hash_conntrack(&h->tuple, nf_ct_zone(ct), + hashsize, nf_conntrack_hash_rnd); hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); } @@ -1280,6 +1345,11 @@ static int nf_conntrack_init_init_net(void) if (ret < 0) goto err_helper; +#ifdef CONFIG_NF_CONNTRACK_ZONES + ret = nf_ct_extend_register(&nf_ct_zone_extend); + if (ret < 0) + goto err_extend; +#endif /* Set up fake conntrack: to never be deleted, not in any hashes */ #ifdef CONFIG_NET_NS nf_conntrack_untracked.ct_net = &init_net; @@ -1290,6 +1360,10 @@ static int nf_conntrack_init_init_net(void) return 0; +#ifdef CONFIG_NF_CONNTRACK_ZONES +err_extend: + nf_conntrack_helper_fini(); +#endif err_helper: nf_conntrack_proto_fini(); err_proto: diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 2f25ff610982..acb29ccaa41f 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -27,6 +27,7 @@ #include <net/netfilter/nf_conntrack_expect.h> #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_tuple.h> +#include <net/netfilter/nf_conntrack_zones.h> unsigned int nf_ct_expect_hsize __read_mostly; EXPORT_SYMBOL_GPL(nf_ct_expect_hsize); @@ -84,7 +85,8 @@ static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple } struct nf_conntrack_expect * -__nf_ct_expect_find(struct net *net, const struct nf_conntrack_tuple *tuple) +__nf_ct_expect_find(struct net *net, u16 zone, + const struct nf_conntrack_tuple *tuple) { struct nf_conntrack_expect *i; struct hlist_node *n; @@ -95,7 +97,8 @@ __nf_ct_expect_find(struct net *net, const struct nf_conntrack_tuple *tuple) h = nf_ct_expect_dst_hash(tuple); hlist_for_each_entry_rcu(i, n, &net->ct.expect_hash[h], hnode) { - if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) + if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) && + nf_ct_zone(i->master) == zone) return i; } return NULL; @@ -104,12 +107,13 @@ EXPORT_SYMBOL_GPL(__nf_ct_expect_find); /* Just find a expectation corresponding to a tuple. */ struct nf_conntrack_expect * -nf_ct_expect_find_get(struct net *net, const struct nf_conntrack_tuple *tuple) +nf_ct_expect_find_get(struct net *net, u16 zone, + const struct nf_conntrack_tuple *tuple) { struct nf_conntrack_expect *i; rcu_read_lock(); - i = __nf_ct_expect_find(net, tuple); + i = __nf_ct_expect_find(net, zone, tuple); if (i && !atomic_inc_not_zero(&i->use)) i = NULL; rcu_read_unlock(); @@ -121,7 +125,8 @@ EXPORT_SYMBOL_GPL(nf_ct_expect_find_get); /* If an expectation for this connection is found, it gets delete from * global list then returned. */ struct nf_conntrack_expect * -nf_ct_find_expectation(struct net *net, const struct nf_conntrack_tuple *tuple) +nf_ct_find_expectation(struct net *net, u16 zone, + const struct nf_conntrack_tuple *tuple) { struct nf_conntrack_expect *i, *exp = NULL; struct hlist_node *n; @@ -133,7 +138,8 @@ nf_ct_find_expectation(struct net *net, const struct nf_conntrack_tuple *tuple) h = nf_ct_expect_dst_hash(tuple); hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) { if (!(i->flags & NF_CT_EXPECT_INACTIVE) && - nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) { + nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) && + nf_ct_zone(i->master) == zone) { exp = i; break; } @@ -204,7 +210,8 @@ static inline int expect_matches(const struct nf_conntrack_expect *a, { return a->master == b->master && a->class == b->class && nf_ct_tuple_equal(&a->tuple, &b->tuple) && - nf_ct_tuple_mask_equal(&a->mask, &b->mask); + nf_ct_tuple_mask_equal(&a->mask, &b->mask) && + nf_ct_zone(a->master) == nf_ct_zone(b->master); } /* Generally a bad idea to call this: could have matched already. */ @@ -232,7 +239,6 @@ struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me) new->master = me; atomic_set(&new->use, 1); - INIT_RCU_HEAD(&new->rcu); return new; } EXPORT_SYMBOL_GPL(nf_ct_expect_alloc); @@ -500,6 +506,7 @@ static void exp_seq_stop(struct seq_file *seq, void *v) static int exp_seq_show(struct seq_file *s, void *v) { struct nf_conntrack_expect *expect; + struct nf_conntrack_helper *helper; struct hlist_node *n = v; char *delim = ""; @@ -525,6 +532,14 @@ static int exp_seq_show(struct seq_file *s, void *v) if (expect->flags & NF_CT_EXPECT_INACTIVE) seq_printf(s, "%sINACTIVE", delim); + helper = rcu_dereference(nfct_help(expect->master)->helper); + if (helper) { + seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name); + if (helper->expect_policy[expect->class].name) + seq_printf(s, "/%s", + helper->expect_policy[expect->class].name); + } + return seq_putc(s, '\n'); } diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c index fef95be334bd..fdc8fb4ae10f 100644 --- a/net/netfilter/nf_conntrack_extend.c +++ b/net/netfilter/nf_conntrack_extend.c @@ -59,7 +59,6 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp) if (!*ext) return NULL; - INIT_RCU_HEAD(&(*ext)->rcu); (*ext)->offset[id] = off; (*ext)->len = len; diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c index 66369490230e..a1c8dd917e12 100644 --- a/net/netfilter/nf_conntrack_h323_main.c +++ b/net/netfilter/nf_conntrack_h323_main.c @@ -29,6 +29,7 @@ #include <net/netfilter/nf_conntrack_expect.h> #include <net/netfilter/nf_conntrack_ecache.h> #include <net/netfilter/nf_conntrack_helper.h> +#include <net/netfilter/nf_conntrack_zones.h> #include <linux/netfilter/nf_conntrack_h323.h> /* Parameters */ @@ -1216,7 +1217,7 @@ static struct nf_conntrack_expect *find_expect(struct nf_conn *ct, tuple.dst.u.tcp.port = port; tuple.dst.protonum = IPPROTO_TCP; - exp = __nf_ct_expect_find(net, &tuple); + exp = __nf_ct_expect_find(net, nf_ct_zone(ct), &tuple); if (exp && exp->master == ct) return exp; return NULL; diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 4b1a56bd074c..4509fa6726f8 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -65,7 +65,7 @@ __nf_ct_helper_find(const struct nf_conntrack_tuple *tuple) } struct nf_conntrack_helper * -__nf_conntrack_helper_find_byname(const char *name) +__nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum) { struct nf_conntrack_helper *h; struct hlist_node *n; @@ -73,13 +73,34 @@ __nf_conntrack_helper_find_byname(const char *name) for (i = 0; i < nf_ct_helper_hsize; i++) { hlist_for_each_entry_rcu(h, n, &nf_ct_helper_hash[i], hnode) { - if (!strcmp(h->name, name)) + if (!strcmp(h->name, name) && + h->tuple.src.l3num == l3num && + h->tuple.dst.protonum == protonum) return h; } } return NULL; } -EXPORT_SYMBOL_GPL(__nf_conntrack_helper_find_byname); +EXPORT_SYMBOL_GPL(__nf_conntrack_helper_find); + +struct nf_conntrack_helper * +nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum) +{ + struct nf_conntrack_helper *h; + + h = __nf_conntrack_helper_find(name, l3num, protonum); +#ifdef CONFIG_MODULES + if (h == NULL) { + if (request_module("nfct-helper-%s", name) == 0) + h = __nf_conntrack_helper_find(name, l3num, protonum); + } +#endif + if (h != NULL && !try_module_get(h->me)) + h = NULL; + + return h; +} +EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get); struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp) { @@ -94,13 +115,22 @@ struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp) } EXPORT_SYMBOL_GPL(nf_ct_helper_ext_add); -int __nf_ct_try_assign_helper(struct nf_conn *ct, gfp_t flags) +int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl, + gfp_t flags) { + struct nf_conntrack_helper *helper = NULL; + struct nf_conn_help *help; int ret = 0; - struct nf_conntrack_helper *helper; - struct nf_conn_help *help = nfct_help(ct); - helper = __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); + if (tmpl != NULL) { + help = nfct_help(tmpl); + if (help != NULL) + helper = help->helper; + } + + help = nfct_help(ct); + if (helper == NULL) + helper = __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); if (helper == NULL) { if (help) rcu_assign_pointer(help->helper, NULL); diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 0ffe689dfe97..8b05f364b2f2 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -30,6 +30,7 @@ #include <linux/netfilter.h> #include <net/netlink.h> +#include <net/sock.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_expect.h> @@ -38,6 +39,7 @@ #include <net/netfilter/nf_conntrack_l4proto.h> #include <net/netfilter/nf_conntrack_tuple.h> #include <net/netfilter/nf_conntrack_acct.h> +#include <net/netfilter/nf_conntrack_zones.h> #ifdef CONFIG_NF_NAT_NEEDED #include <net/netfilter/nf_nat_core.h> #include <net/netfilter/nf_nat_protocol.h> @@ -378,6 +380,9 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, goto nla_put_failure; nla_nest_end(skb, nest_parms); + if (nf_ct_zone(ct)) + NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct))); + if (ctnetlink_dump_status(skb, ct) < 0 || ctnetlink_dump_timeout(skb, ct) < 0 || ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || @@ -456,6 +461,7 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct) static int ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item) { + struct net *net; struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; struct nlattr *nest_parms; @@ -482,7 +488,8 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item) } else return 0; - if (!item->report && !nfnetlink_has_listeners(group)) + net = nf_ct_net(ct); + if (!item->report && !nfnetlink_has_listeners(net, group)) return 0; skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC); @@ -514,6 +521,9 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item) goto nla_put_failure; nla_nest_end(skb, nest_parms); + if (nf_ct_zone(ct)) + NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct))); + if (ctnetlink_dump_id(skb, ct) < 0) goto nla_put_failure; @@ -559,7 +569,8 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item) rcu_read_unlock(); nlmsg_end(skb, nlh); - err = nfnetlink_send(skb, item->pid, group, item->report, GFP_ATOMIC); + err = nfnetlink_send(skb, net, item->pid, group, item->report, + GFP_ATOMIC); if (err == -ENOBUFS || err == -EAGAIN) return -ENOBUFS; @@ -571,7 +582,7 @@ nla_put_failure: nlmsg_failure: kfree_skb(skb); errout: - nfnetlink_set_err(0, group, -ENOBUFS); + nfnetlink_set_err(net, 0, group, -ENOBUFS); return 0; } #endif /* CONFIG_NF_CONNTRACK_EVENTS */ @@ -586,6 +597,7 @@ static int ctnetlink_done(struct netlink_callback *cb) static int ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) { + struct net *net = sock_net(skb->sk); struct nf_conn *ct, *last; struct nf_conntrack_tuple_hash *h; struct hlist_nulls_node *n; @@ -594,9 +606,9 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) rcu_read_lock(); last = (struct nf_conn *)cb->args[1]; - for (; cb->args[0] < init_net.ct.htable_size; cb->args[0]++) { + for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) { restart: - hlist_nulls_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]], + hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[cb->args[0]], hnnode) { if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) continue; @@ -703,6 +715,11 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr, return ret; } +static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = { + [CTA_TUPLE_IP] = { .type = NLA_NESTED }, + [CTA_TUPLE_PROTO] = { .type = NLA_NESTED }, +}; + static int ctnetlink_parse_tuple(const struct nlattr * const cda[], struct nf_conntrack_tuple *tuple, @@ -713,7 +730,7 @@ ctnetlink_parse_tuple(const struct nlattr * const cda[], memset(tuple, 0, sizeof(*tuple)); - nla_parse_nested(tb, CTA_TUPLE_MAX, cda[type], NULL); + nla_parse_nested(tb, CTA_TUPLE_MAX, cda[type], tuple_nla_policy); if (!tb[CTA_TUPLE_IP]) return -EINVAL; @@ -740,12 +757,31 @@ ctnetlink_parse_tuple(const struct nlattr * const cda[], return 0; } +static int +ctnetlink_parse_zone(const struct nlattr *attr, u16 *zone) +{ + if (attr) +#ifdef CONFIG_NF_CONNTRACK_ZONES + *zone = ntohs(nla_get_be16(attr)); +#else + return -EOPNOTSUPP; +#endif + else + *zone = 0; + + return 0; +} + +static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = { + [CTA_HELP_NAME] = { .type = NLA_NUL_STRING }, +}; + static inline int ctnetlink_parse_help(const struct nlattr *attr, char **helper_name) { struct nlattr *tb[CTA_HELP_MAX+1]; - nla_parse_nested(tb, CTA_HELP_MAX, attr, NULL); + nla_parse_nested(tb, CTA_HELP_MAX, attr, help_nla_policy); if (!tb[CTA_HELP_NAME]) return -EINVAL; @@ -756,11 +792,18 @@ ctnetlink_parse_help(const struct nlattr *attr, char **helper_name) } static const struct nla_policy ct_nla_policy[CTA_MAX+1] = { + [CTA_TUPLE_ORIG] = { .type = NLA_NESTED }, + [CTA_TUPLE_REPLY] = { .type = NLA_NESTED }, [CTA_STATUS] = { .type = NLA_U32 }, + [CTA_PROTOINFO] = { .type = NLA_NESTED }, + [CTA_HELP] = { .type = NLA_NESTED }, + [CTA_NAT_SRC] = { .type = NLA_NESTED }, [CTA_TIMEOUT] = { .type = NLA_U32 }, [CTA_MARK] = { .type = NLA_U32 }, - [CTA_USE] = { .type = NLA_U32 }, [CTA_ID] = { .type = NLA_U32 }, + [CTA_NAT_DST] = { .type = NLA_NESTED }, + [CTA_TUPLE_MASTER] = { .type = NLA_NESTED }, + [CTA_ZONE] = { .type = NLA_U16 }, }; static int @@ -768,12 +811,18 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const cda[]) { + struct net *net = sock_net(ctnl); struct nf_conntrack_tuple_hash *h; struct nf_conntrack_tuple tuple; struct nf_conn *ct; struct nfgenmsg *nfmsg = nlmsg_data(nlh); u_int8_t u3 = nfmsg->nfgen_family; - int err = 0; + u16 zone; + int err; + + err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone); + if (err < 0) + return err; if (cda[CTA_TUPLE_ORIG]) err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3); @@ -781,7 +830,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb, err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3); else { /* Flush the whole table */ - nf_conntrack_flush_report(&init_net, + nf_conntrack_flush_report(net, NETLINK_CB(skb).pid, nlmsg_report(nlh)); return 0; @@ -790,7 +839,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb, if (err < 0) return err; - h = nf_conntrack_find_get(&init_net, &tuple); + h = nf_conntrack_find_get(net, zone, &tuple); if (!h) return -ENOENT; @@ -828,18 +877,24 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const cda[]) { + struct net *net = sock_net(ctnl); struct nf_conntrack_tuple_hash *h; struct nf_conntrack_tuple tuple; struct nf_conn *ct; struct sk_buff *skb2 = NULL; struct nfgenmsg *nfmsg = nlmsg_data(nlh); u_int8_t u3 = nfmsg->nfgen_family; - int err = 0; + u16 zone; + int err; if (nlh->nlmsg_flags & NLM_F_DUMP) return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table, ctnetlink_done); + err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone); + if (err < 0) + return err; + if (cda[CTA_TUPLE_ORIG]) err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3); else if (cda[CTA_TUPLE_REPLY]) @@ -850,7 +905,7 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, if (err < 0) return err; - h = nf_conntrack_find_get(&init_net, &tuple); + h = nf_conntrack_find_get(net, zone, &tuple); if (!h) return -ENOENT; @@ -994,7 +1049,8 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[]) return 0; } - helper = __nf_conntrack_helper_find_byname(helpname); + helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), + nf_ct_protonum(ct)); if (helper == NULL) { #ifdef CONFIG_MODULES spin_unlock_bh(&nf_conntrack_lock); @@ -1005,7 +1061,8 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[]) } spin_lock_bh(&nf_conntrack_lock); - helper = __nf_conntrack_helper_find_byname(helpname); + helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), + nf_ct_protonum(ct)); if (helper) return -EAGAIN; #endif @@ -1044,6 +1101,12 @@ ctnetlink_change_timeout(struct nf_conn *ct, const struct nlattr * const cda[]) return 0; } +static const struct nla_policy protoinfo_policy[CTA_PROTOINFO_MAX+1] = { + [CTA_PROTOINFO_TCP] = { .type = NLA_NESTED }, + [CTA_PROTOINFO_DCCP] = { .type = NLA_NESTED }, + [CTA_PROTOINFO_SCTP] = { .type = NLA_NESTED }, +}; + static inline int ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[]) { @@ -1052,7 +1115,7 @@ ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[] struct nf_conntrack_l4proto *l4proto; int err = 0; - nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, NULL); + nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, protoinfo_policy); rcu_read_lock(); l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); @@ -1064,12 +1127,18 @@ ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[] } #ifdef CONFIG_NF_NAT_NEEDED +static const struct nla_policy nat_seq_policy[CTA_NAT_SEQ_MAX+1] = { + [CTA_NAT_SEQ_CORRECTION_POS] = { .type = NLA_U32 }, + [CTA_NAT_SEQ_OFFSET_BEFORE] = { .type = NLA_U32 }, + [CTA_NAT_SEQ_OFFSET_AFTER] = { .type = NLA_U32 }, +}; + static inline int change_nat_seq_adj(struct nf_nat_seq *natseq, const struct nlattr * const attr) { struct nlattr *cda[CTA_NAT_SEQ_MAX+1]; - nla_parse_nested(cda, CTA_NAT_SEQ_MAX, attr, NULL); + nla_parse_nested(cda, CTA_NAT_SEQ_MAX, attr, nat_seq_policy); if (!cda[CTA_NAT_SEQ_CORRECTION_POS]) return -EINVAL; @@ -1175,7 +1244,8 @@ ctnetlink_change_conntrack(struct nf_conn *ct, } static struct nf_conn * -ctnetlink_create_conntrack(const struct nlattr * const cda[], +ctnetlink_create_conntrack(struct net *net, u16 zone, + const struct nlattr * const cda[], struct nf_conntrack_tuple *otuple, struct nf_conntrack_tuple *rtuple, u8 u3) @@ -1184,7 +1254,7 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[], int err = -EINVAL; struct nf_conntrack_helper *helper; - ct = nf_conntrack_alloc(&init_net, otuple, rtuple, GFP_ATOMIC); + ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC); if (IS_ERR(ct)) return ERR_PTR(-ENOMEM); @@ -1203,7 +1273,8 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[], if (err < 0) goto err2; - helper = __nf_conntrack_helper_find_byname(helpname); + helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), + nf_ct_protonum(ct)); if (helper == NULL) { rcu_read_unlock(); #ifdef CONFIG_MODULES @@ -1213,7 +1284,9 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[], } rcu_read_lock(); - helper = __nf_conntrack_helper_find_byname(helpname); + helper = __nf_conntrack_helper_find(helpname, + nf_ct_l3num(ct), + nf_ct_protonum(ct)); if (helper) { err = -EAGAIN; goto err2; @@ -1236,7 +1309,7 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[], } } else { /* try an implicit helper assignation */ - err = __nf_ct_try_assign_helper(ct, GFP_ATOMIC); + err = __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC); if (err < 0) goto err2; } @@ -1268,7 +1341,7 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[], } nf_ct_acct_ext_add(ct, GFP_ATOMIC); - nf_ct_ecache_ext_add(ct, GFP_ATOMIC); + nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC); #if defined(CONFIG_NF_CONNTRACK_MARK) if (cda[CTA_MARK]) @@ -1285,7 +1358,7 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[], if (err < 0) goto err2; - master_h = nf_conntrack_find_get(&init_net, &master); + master_h = nf_conntrack_find_get(net, zone, &master); if (master_h == NULL) { err = -ENOENT; goto err2; @@ -1313,11 +1386,17 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const cda[]) { + struct net *net = sock_net(ctnl); struct nf_conntrack_tuple otuple, rtuple; struct nf_conntrack_tuple_hash *h = NULL; struct nfgenmsg *nfmsg = nlmsg_data(nlh); u_int8_t u3 = nfmsg->nfgen_family; - int err = 0; + u16 zone; + int err; + + err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone); + if (err < 0) + return err; if (cda[CTA_TUPLE_ORIG]) { err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG, u3); @@ -1333,9 +1412,9 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, spin_lock_bh(&nf_conntrack_lock); if (cda[CTA_TUPLE_ORIG]) - h = __nf_conntrack_find(&init_net, &otuple); + h = __nf_conntrack_find(net, zone, &otuple); else if (cda[CTA_TUPLE_REPLY]) - h = __nf_conntrack_find(&init_net, &rtuple); + h = __nf_conntrack_find(net, zone, &rtuple); if (h == NULL) { err = -ENOENT; @@ -1343,7 +1422,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, struct nf_conn *ct; enum ip_conntrack_events events; - ct = ctnetlink_create_conntrack(cda, &otuple, + ct = ctnetlink_create_conntrack(net, zone, cda, &otuple, &rtuple, u3); if (IS_ERR(ct)) { err = PTR_ERR(ct); @@ -1357,7 +1436,8 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, else events = IPCT_NEW; - nf_conntrack_eventmask_report((1 << IPCT_STATUS) | + nf_conntrack_eventmask_report((1 << IPCT_REPLY) | + (1 << IPCT_ASSURED) | (1 << IPCT_HELPER) | (1 << IPCT_PROTOINFO) | (1 << IPCT_NATSEQADJ) | @@ -1382,7 +1462,8 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, if (err == 0) { nf_conntrack_get(&ct->ct_general); spin_unlock_bh(&nf_conntrack_lock); - nf_conntrack_eventmask_report((1 << IPCT_STATUS) | + nf_conntrack_eventmask_report((1 << IPCT_REPLY) | + (1 << IPCT_ASSURED) | (1 << IPCT_HELPER) | (1 << IPCT_PROTOINFO) | (1 << IPCT_NATSEQADJ) | @@ -1469,6 +1550,7 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb, const struct nf_conntrack_expect *exp) { struct nf_conn *master = exp->master; + struct nf_conntrack_helper *helper; long timeout = (exp->timeout.expires - jiffies) / HZ; if (timeout < 0) @@ -1485,6 +1567,9 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb, NLA_PUT_BE32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)); NLA_PUT_BE32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)); + helper = rcu_dereference(nfct_help(master)->helper); + if (helper) + NLA_PUT_STRING(skb, CTA_EXPECT_HELP_NAME, helper->name); return 0; @@ -1526,9 +1611,10 @@ nla_put_failure: static int ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item) { + struct nf_conntrack_expect *exp = item->exp; + struct net *net = nf_ct_exp_net(exp); struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; - struct nf_conntrack_expect *exp = item->exp; struct sk_buff *skb; unsigned int type; int flags = 0; @@ -1540,7 +1626,7 @@ ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item) return 0; if (!item->report && - !nfnetlink_has_listeners(NFNLGRP_CONNTRACK_EXP_NEW)) + !nfnetlink_has_listeners(net, NFNLGRP_CONNTRACK_EXP_NEW)) return 0; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); @@ -1563,7 +1649,7 @@ ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item) rcu_read_unlock(); nlmsg_end(skb, nlh); - nfnetlink_send(skb, item->pid, NFNLGRP_CONNTRACK_EXP_NEW, + nfnetlink_send(skb, net, item->pid, NFNLGRP_CONNTRACK_EXP_NEW, item->report, GFP_ATOMIC); return 0; @@ -1573,7 +1659,7 @@ nla_put_failure: nlmsg_failure: kfree_skb(skb); errout: - nfnetlink_set_err(0, 0, -ENOBUFS); + nfnetlink_set_err(net, 0, 0, -ENOBUFS); return 0; } #endif @@ -1587,7 +1673,7 @@ static int ctnetlink_exp_done(struct netlink_callback *cb) static int ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb) { - struct net *net = &init_net; + struct net *net = sock_net(skb->sk); struct nf_conntrack_expect *exp, *last; struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); struct hlist_node *n; @@ -1631,8 +1717,12 @@ out: } static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = { + [CTA_EXPECT_MASTER] = { .type = NLA_NESTED }, + [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED }, + [CTA_EXPECT_MASK] = { .type = NLA_NESTED }, [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 }, [CTA_EXPECT_ID] = { .type = NLA_U32 }, + [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING }, }; static int @@ -1640,12 +1730,14 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const cda[]) { + struct net *net = sock_net(ctnl); struct nf_conntrack_tuple tuple; struct nf_conntrack_expect *exp; struct sk_buff *skb2; struct nfgenmsg *nfmsg = nlmsg_data(nlh); u_int8_t u3 = nfmsg->nfgen_family; - int err = 0; + u16 zone; + int err; if (nlh->nlmsg_flags & NLM_F_DUMP) { return netlink_dump_start(ctnl, skb, nlh, @@ -1653,6 +1745,10 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, ctnetlink_exp_done); } + err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone); + if (err < 0) + return err; + if (cda[CTA_EXPECT_MASTER]) err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3); else @@ -1661,7 +1757,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, if (err < 0) return err; - exp = nf_ct_expect_find_get(&init_net, &tuple); + exp = nf_ct_expect_find_get(net, zone, &tuple); if (!exp) return -ENOENT; @@ -1701,23 +1797,28 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const cda[]) { + struct net *net = sock_net(ctnl); struct nf_conntrack_expect *exp; struct nf_conntrack_tuple tuple; - struct nf_conntrack_helper *h; struct nfgenmsg *nfmsg = nlmsg_data(nlh); struct hlist_node *n, *next; u_int8_t u3 = nfmsg->nfgen_family; unsigned int i; + u16 zone; int err; if (cda[CTA_EXPECT_TUPLE]) { /* delete a single expect by tuple */ + err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone); + if (err < 0) + return err; + err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3); if (err < 0) return err; /* bump usage count to 2 */ - exp = nf_ct_expect_find_get(&init_net, &tuple); + exp = nf_ct_expect_find_get(net, zone, &tuple); if (!exp) return -ENOENT; @@ -1740,18 +1841,13 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, /* delete all expectations for this helper */ spin_lock_bh(&nf_conntrack_lock); - h = __nf_conntrack_helper_find_byname(name); - if (!h) { - spin_unlock_bh(&nf_conntrack_lock); - return -EOPNOTSUPP; - } for (i = 0; i < nf_ct_expect_hsize; i++) { hlist_for_each_entry_safe(exp, n, next, - &init_net.ct.expect_hash[i], + &net->ct.expect_hash[i], hnode) { m_help = nfct_help(exp->master); - if (m_help->helper == h - && del_timer(&exp->timeout)) { + if (!strcmp(m_help->helper->name, name) && + del_timer(&exp->timeout)) { nf_ct_unlink_expect(exp); nf_ct_expect_put(exp); } @@ -1763,7 +1859,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, spin_lock_bh(&nf_conntrack_lock); for (i = 0; i < nf_ct_expect_hsize; i++) { hlist_for_each_entry_safe(exp, n, next, - &init_net.ct.expect_hash[i], + &net->ct.expect_hash[i], hnode) { if (del_timer(&exp->timeout)) { nf_ct_unlink_expect(exp); @@ -1784,7 +1880,9 @@ ctnetlink_change_expect(struct nf_conntrack_expect *x, } static int -ctnetlink_create_expect(const struct nlattr * const cda[], u_int8_t u3, +ctnetlink_create_expect(struct net *net, u16 zone, + const struct nlattr * const cda[], + u_int8_t u3, u32 pid, int report) { struct nf_conntrack_tuple tuple, mask, master_tuple; @@ -1806,7 +1904,7 @@ ctnetlink_create_expect(const struct nlattr * const cda[], u_int8_t u3, return err; /* Look for master conntrack of this expectation */ - h = nf_conntrack_find_get(&init_net, &master_tuple); + h = nf_conntrack_find_get(net, zone, &master_tuple); if (!h) return -ENOENT; ct = nf_ct_tuplehash_to_ctrack(h); @@ -1846,29 +1944,35 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const cda[]) { + struct net *net = sock_net(ctnl); struct nf_conntrack_tuple tuple; struct nf_conntrack_expect *exp; struct nfgenmsg *nfmsg = nlmsg_data(nlh); u_int8_t u3 = nfmsg->nfgen_family; - int err = 0; + u16 zone; + int err; if (!cda[CTA_EXPECT_TUPLE] || !cda[CTA_EXPECT_MASK] || !cda[CTA_EXPECT_MASTER]) return -EINVAL; + err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone); + if (err < 0) + return err; + err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3); if (err < 0) return err; spin_lock_bh(&nf_conntrack_lock); - exp = __nf_ct_expect_find(&init_net, &tuple); + exp = __nf_ct_expect_find(net, zone, &tuple); if (!exp) { spin_unlock_bh(&nf_conntrack_lock); err = -ENOENT; if (nlh->nlmsg_flags & NLM_F_CREATE) { - err = ctnetlink_create_expect(cda, + err = ctnetlink_create_expect(net, zone, cda, u3, NETLINK_CB(skb).pid, nlmsg_report(nlh)); diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c index 3807ac7faf4c..088944824e13 100644 --- a/net/netfilter/nf_conntrack_pptp.c +++ b/net/netfilter/nf_conntrack_pptp.c @@ -28,6 +28,7 @@ #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_helper.h> +#include <net/netfilter/nf_conntrack_zones.h> #include <linux/netfilter/nf_conntrack_proto_gre.h> #include <linux/netfilter/nf_conntrack_pptp.h> @@ -123,7 +124,7 @@ static void pptp_expectfn(struct nf_conn *ct, pr_debug("trying to unexpect other dir: "); nf_ct_dump_tuple(&inv_t); - exp_other = nf_ct_expect_find_get(net, &inv_t); + exp_other = nf_ct_expect_find_get(net, nf_ct_zone(ct), &inv_t); if (exp_other) { /* delete other expectation. */ pr_debug("found\n"); @@ -136,17 +137,18 @@ static void pptp_expectfn(struct nf_conn *ct, rcu_read_unlock(); } -static int destroy_sibling_or_exp(struct net *net, +static int destroy_sibling_or_exp(struct net *net, struct nf_conn *ct, const struct nf_conntrack_tuple *t) { const struct nf_conntrack_tuple_hash *h; struct nf_conntrack_expect *exp; struct nf_conn *sibling; + u16 zone = nf_ct_zone(ct); pr_debug("trying to timeout ct or exp for tuple "); nf_ct_dump_tuple(t); - h = nf_conntrack_find_get(net, t); + h = nf_conntrack_find_get(net, zone, t); if (h) { sibling = nf_ct_tuplehash_to_ctrack(h); pr_debug("setting timeout of conntrack %p to 0\n", sibling); @@ -157,7 +159,7 @@ static int destroy_sibling_or_exp(struct net *net, nf_ct_put(sibling); return 1; } else { - exp = nf_ct_expect_find_get(net, t); + exp = nf_ct_expect_find_get(net, zone, t); if (exp) { pr_debug("unexpect_related of expect %p\n", exp); nf_ct_unexpect_related(exp); @@ -182,7 +184,7 @@ static void pptp_destroy_siblings(struct nf_conn *ct) t.dst.protonum = IPPROTO_GRE; t.src.u.gre.key = help->help.ct_pptp_info.pns_call_id; t.dst.u.gre.key = help->help.ct_pptp_info.pac_call_id; - if (!destroy_sibling_or_exp(net, &t)) + if (!destroy_sibling_or_exp(net, ct, &t)) pr_debug("failed to timeout original pns->pac ct/exp\n"); /* try reply (pac->pns) tuple */ @@ -190,7 +192,7 @@ static void pptp_destroy_siblings(struct nf_conn *ct) t.dst.protonum = IPPROTO_GRE; t.src.u.gre.key = help->help.ct_pptp_info.pac_call_id; t.dst.u.gre.key = help->help.ct_pptp_info.pns_call_id; - if (!destroy_sibling_or_exp(net, &t)) + if (!destroy_sibling_or_exp(net, ct, &t)) pr_debug("failed to timeout reply pac->pns ct/exp\n"); } diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index dd375500dccc..9a2815549375 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c @@ -561,8 +561,9 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb, return NF_ACCEPT; } -static int dccp_error(struct net *net, struct sk_buff *skb, - unsigned int dataoff, enum ip_conntrack_info *ctinfo, +static int dccp_error(struct net *net, struct nf_conn *tmpl, + struct sk_buff *skb, unsigned int dataoff, + enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum) { struct dccp_hdr _dh, *dh; diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index c99cfba64ddc..d899b1a69940 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c @@ -241,7 +241,7 @@ static int gre_packet(struct nf_conn *ct, ct->proto.gre.stream_timeout); /* Also, more likely to be important, and not a probe. */ set_bit(IPS_ASSURED_BIT, &ct->status); - nf_conntrack_event_cache(IPCT_STATUS, ct); + nf_conntrack_event_cache(IPCT_ASSURED, ct); } else nf_ct_refresh_acct(ct, ctinfo, skb, ct->proto.gre.timeout); diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index f9d930f80276..b68ff15ed979 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -377,7 +377,7 @@ static int sctp_packet(struct nf_conn *ct, new_state == SCTP_CONNTRACK_ESTABLISHED) { pr_debug("Setting assured bit\n"); set_bit(IPS_ASSURED_BIT, &ct->status); - nf_conntrack_event_cache(IPCT_STATUS, ct); + nf_conntrack_event_cache(IPCT_ASSURED, ct); } return NF_ACCEPT; diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 3c96437b45ad..9dd8cd4fb6e6 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -760,7 +760,7 @@ static const u8 tcp_valid_flags[(TH_FIN|TH_SYN|TH_RST|TH_ACK|TH_URG) + 1] = }; /* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c. */ -static int tcp_error(struct net *net, +static int tcp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info *ctinfo, @@ -1045,7 +1045,7 @@ static int tcp_packet(struct nf_conn *ct, after SYN_RECV or a valid answer for a picked up connection. */ set_bit(IPS_ASSURED_BIT, &ct->status); - nf_conntrack_event_cache(IPCT_STATUS, ct); + nf_conntrack_event_cache(IPCT_ASSURED, ct); } nf_ct_refresh_acct(ct, ctinfo, skb, timeout); diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c index 5c5518bedb4b..8289088b8218 100644 --- a/net/netfilter/nf_conntrack_proto_udp.c +++ b/net/netfilter/nf_conntrack_proto_udp.c @@ -77,7 +77,7 @@ static int udp_packet(struct nf_conn *ct, nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udp_timeout_stream); /* Also, more likely to be important, and not a probe */ if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) - nf_conntrack_event_cache(IPCT_STATUS, ct); + nf_conntrack_event_cache(IPCT_ASSURED, ct); } else nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udp_timeout); @@ -91,8 +91,8 @@ static bool udp_new(struct nf_conn *ct, const struct sk_buff *skb, return true; } -static int udp_error(struct net *net, struct sk_buff *skb, unsigned int dataoff, - enum ip_conntrack_info *ctinfo, +static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, + unsigned int dataoff, enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum) { diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c index 458655bb2106..263b5a72588d 100644 --- a/net/netfilter/nf_conntrack_proto_udplite.c +++ b/net/netfilter/nf_conntrack_proto_udplite.c @@ -75,7 +75,7 @@ static int udplite_packet(struct nf_conn *ct, nf_ct_udplite_timeout_stream); /* Also, more likely to be important, and not a probe */ if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) - nf_conntrack_event_cache(IPCT_STATUS, ct); + nf_conntrack_event_cache(IPCT_ASSURED, ct); } else nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udplite_timeout); @@ -89,7 +89,7 @@ static bool udplite_new(struct nf_conn *ct, const struct sk_buff *skb, return true; } -static int udplite_error(struct net *net, +static int udplite_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info *ctinfo, diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 023966b569bf..8dd75d90efc0 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c @@ -16,12 +16,14 @@ #include <linux/inet.h> #include <linux/in.h> #include <linux/udp.h> +#include <linux/tcp.h> #include <linux/netfilter.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_expect.h> #include <net/netfilter/nf_conntrack_helper.h> +#include <net/netfilter/nf_conntrack_zones.h> #include <linux/netfilter/nf_conntrack_sip.h> MODULE_LICENSE("GPL"); @@ -50,12 +52,16 @@ module_param(sip_direct_media, int, 0600); MODULE_PARM_DESC(sip_direct_media, "Expect Media streams between signalling " "endpoints only (default 1)"); -unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb, +unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen) __read_mostly; EXPORT_SYMBOL_GPL(nf_nat_sip_hook); +void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb, s16 off) __read_mostly; +EXPORT_SYMBOL_GPL(nf_nat_sip_seq_adjust_hook); + unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb, + unsigned int dataoff, const char **dptr, unsigned int *datalen, struct nf_conntrack_expect *exp, @@ -63,17 +69,17 @@ unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb, unsigned int matchlen) __read_mostly; EXPORT_SYMBOL_GPL(nf_nat_sip_expect_hook); -unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb, +unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb, unsigned int dataoff, const char **dptr, - unsigned int dataoff, unsigned int *datalen, + unsigned int sdpoff, enum sdp_header_types type, enum sdp_header_types term, const union nf_inet_addr *addr) __read_mostly; EXPORT_SYMBOL_GPL(nf_nat_sdp_addr_hook); -unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb, +unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, unsigned int matchoff, @@ -82,14 +88,15 @@ unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb, EXPORT_SYMBOL_GPL(nf_nat_sdp_port_hook); unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb, - const char **dptr, unsigned int dataoff, + const char **dptr, unsigned int *datalen, + unsigned int sdpoff, const union nf_inet_addr *addr) __read_mostly; EXPORT_SYMBOL_GPL(nf_nat_sdp_session_hook); -unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb, +unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, struct nf_conntrack_expect *rtp_exp, @@ -236,12 +243,13 @@ int ct_sip_parse_request(const struct nf_conn *ct, return 0; /* Find SIP URI */ - limit -= strlen("sip:"); - for (; dptr < limit; dptr++) { + for (; dptr < limit - strlen("sip:"); dptr++) { if (*dptr == '\r' || *dptr == '\n') return -1; - if (strnicmp(dptr, "sip:", strlen("sip:")) == 0) + if (strnicmp(dptr, "sip:", strlen("sip:")) == 0) { + dptr += strlen("sip:"); break; + } } if (!skp_epaddr_len(ct, dptr, limit, &shift)) return 0; @@ -284,7 +292,8 @@ static const struct sip_header ct_sip_hdrs[] = { [SIP_HDR_FROM] = SIP_HDR("From", "f", "sip:", skp_epaddr_len), [SIP_HDR_TO] = SIP_HDR("To", "t", "sip:", skp_epaddr_len), [SIP_HDR_CONTACT] = SIP_HDR("Contact", "m", "sip:", skp_epaddr_len), - [SIP_HDR_VIA] = SIP_HDR("Via", "v", "UDP ", epaddr_len), + [SIP_HDR_VIA_UDP] = SIP_HDR("Via", "v", "UDP ", epaddr_len), + [SIP_HDR_VIA_TCP] = SIP_HDR("Via", "v", "TCP ", epaddr_len), [SIP_HDR_EXPIRES] = SIP_HDR("Expires", NULL, NULL, digits_len), [SIP_HDR_CONTENT_LENGTH] = SIP_HDR("Content-Length", "l", NULL, digits_len), }; @@ -516,6 +525,33 @@ int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr, } EXPORT_SYMBOL_GPL(ct_sip_parse_header_uri); +static int ct_sip_parse_param(const struct nf_conn *ct, const char *dptr, + unsigned int dataoff, unsigned int datalen, + const char *name, + unsigned int *matchoff, unsigned int *matchlen) +{ + const char *limit = dptr + datalen; + const char *start; + const char *end; + + limit = ct_sip_header_search(dptr + dataoff, limit, ",", strlen(",")); + if (!limit) + limit = dptr + datalen; + + start = ct_sip_header_search(dptr + dataoff, limit, name, strlen(name)); + if (!start) + return 0; + start += strlen(name); + + end = ct_sip_header_search(start, limit, ";", strlen(";")); + if (!end) + end = limit; + + *matchoff = start - dptr; + *matchlen = end - start; + return 1; +} + /* Parse address from header parameter and return address, offset and length */ int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr, unsigned int dataoff, unsigned int datalen, @@ -574,6 +610,29 @@ int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr, } EXPORT_SYMBOL_GPL(ct_sip_parse_numerical_param); +static int ct_sip_parse_transport(struct nf_conn *ct, const char *dptr, + unsigned int dataoff, unsigned int datalen, + u8 *proto) +{ + unsigned int matchoff, matchlen; + + if (ct_sip_parse_param(ct, dptr, dataoff, datalen, "transport=", + &matchoff, &matchlen)) { + if (!strnicmp(dptr + matchoff, "TCP", strlen("TCP"))) + *proto = IPPROTO_TCP; + else if (!strnicmp(dptr + matchoff, "UDP", strlen("UDP"))) + *proto = IPPROTO_UDP; + else + return 0; + + if (*proto != nf_ct_protonum(ct)) + return 0; + } else + *proto = nf_ct_protonum(ct); + + return 1; +} + /* SDP header parsing: a SDP session description contains an ordered set of * headers, starting with a section containing general session parameters, * optionally followed by multiple media descriptions. @@ -682,7 +741,7 @@ static int ct_sip_parse_sdp_addr(const struct nf_conn *ct, const char *dptr, static int refresh_signalling_expectation(struct nf_conn *ct, union nf_inet_addr *addr, - __be16 port, + u8 proto, __be16 port, unsigned int expires) { struct nf_conn_help *help = nfct_help(ct); @@ -694,6 +753,7 @@ static int refresh_signalling_expectation(struct nf_conn *ct, hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) { if (exp->class != SIP_EXPECT_SIGNALLING || !nf_inet_addr_cmp(&exp->tuple.dst.u3, addr) || + exp->tuple.dst.protonum != proto || exp->tuple.dst.u.udp.port != port) continue; if (!del_timer(&exp->timeout)) @@ -728,7 +788,7 @@ static void flush_expectations(struct nf_conn *ct, bool media) spin_unlock_bh(&nf_conntrack_lock); } -static int set_expected_rtp_rtcp(struct sk_buff *skb, +static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, union nf_inet_addr *daddr, __be16 port, enum sip_expectation_classes class, @@ -777,7 +837,7 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, rcu_read_lock(); do { - exp = __nf_ct_expect_find(net, &tuple); + exp = __nf_ct_expect_find(net, nf_ct_zone(ct), &tuple); if (!exp || exp->master == ct || nfct_help(exp->master)->helper != nfct_help(ct)->helper || @@ -805,7 +865,7 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, if (direct_rtp) { nf_nat_sdp_port = rcu_dereference(nf_nat_sdp_port_hook); if (nf_nat_sdp_port && - !nf_nat_sdp_port(skb, dptr, datalen, + !nf_nat_sdp_port(skb, dataoff, dptr, datalen, mediaoff, medialen, ntohs(rtp_port))) goto err1; } @@ -827,7 +887,8 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, nf_nat_sdp_media = rcu_dereference(nf_nat_sdp_media_hook); if (nf_nat_sdp_media && ct->status & IPS_NAT_MASK && !direct_rtp) - ret = nf_nat_sdp_media(skb, dptr, datalen, rtp_exp, rtcp_exp, + ret = nf_nat_sdp_media(skb, dataoff, dptr, datalen, + rtp_exp, rtcp_exp, mediaoff, medialen, daddr); else { if (nf_ct_expect_related(rtp_exp) == 0) { @@ -847,6 +908,7 @@ err1: static const struct sdp_media_type sdp_media_types[] = { SDP_MEDIA_TYPE("audio ", SIP_EXPECT_AUDIO), SDP_MEDIA_TYPE("video ", SIP_EXPECT_VIDEO), + SDP_MEDIA_TYPE("image ", SIP_EXPECT_IMAGE), }; static const struct sdp_media_type *sdp_media_type(const char *dptr, @@ -866,13 +928,12 @@ static const struct sdp_media_type *sdp_media_type(const char *dptr, return NULL; } -static int process_sdp(struct sk_buff *skb, +static int process_sdp(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, unsigned int cseq) { enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); - struct nf_conn_help *help = nfct_help(ct); unsigned int matchoff, matchlen; unsigned int mediaoff, medialen; unsigned int sdpoff; @@ -941,7 +1002,7 @@ static int process_sdp(struct sk_buff *skb, else return NF_DROP; - ret = set_expected_rtp_rtcp(skb, dptr, datalen, + ret = set_expected_rtp_rtcp(skb, dataoff, dptr, datalen, &rtp_addr, htons(port), t->class, mediaoff, medialen); if (ret != NF_ACCEPT) @@ -949,8 +1010,9 @@ static int process_sdp(struct sk_buff *skb, /* Update media connection address if present */ if (maddr_len && nf_nat_sdp_addr && ct->status & IPS_NAT_MASK) { - ret = nf_nat_sdp_addr(skb, dptr, mediaoff, datalen, - c_hdr, SDP_HDR_MEDIA, &rtp_addr); + ret = nf_nat_sdp_addr(skb, dataoff, dptr, datalen, + mediaoff, c_hdr, SDP_HDR_MEDIA, + &rtp_addr); if (ret != NF_ACCEPT) return ret; } @@ -960,14 +1022,12 @@ static int process_sdp(struct sk_buff *skb, /* Update session connection and owner addresses */ nf_nat_sdp_session = rcu_dereference(nf_nat_sdp_session_hook); if (nf_nat_sdp_session && ct->status & IPS_NAT_MASK) - ret = nf_nat_sdp_session(skb, dptr, sdpoff, datalen, &rtp_addr); - - if (ret == NF_ACCEPT && i > 0) - help->help.ct_sip_info.invite_cseq = cseq; + ret = nf_nat_sdp_session(skb, dataoff, dptr, datalen, sdpoff, + &rtp_addr); return ret; } -static int process_invite_response(struct sk_buff *skb, +static int process_invite_response(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, unsigned int cseq, unsigned int code) { @@ -977,13 +1037,13 @@ static int process_invite_response(struct sk_buff *skb, if ((code >= 100 && code <= 199) || (code >= 200 && code <= 299)) - return process_sdp(skb, dptr, datalen, cseq); + return process_sdp(skb, dataoff, dptr, datalen, cseq); else if (help->help.ct_sip_info.invite_cseq == cseq) flush_expectations(ct, true); return NF_ACCEPT; } -static int process_update_response(struct sk_buff *skb, +static int process_update_response(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, unsigned int cseq, unsigned int code) { @@ -993,13 +1053,13 @@ static int process_update_response(struct sk_buff *skb, if ((code >= 100 && code <= 199) || (code >= 200 && code <= 299)) - return process_sdp(skb, dptr, datalen, cseq); + return process_sdp(skb, dataoff, dptr, datalen, cseq); else if (help->help.ct_sip_info.invite_cseq == cseq) flush_expectations(ct, true); return NF_ACCEPT; } -static int process_prack_response(struct sk_buff *skb, +static int process_prack_response(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, unsigned int cseq, unsigned int code) { @@ -1009,13 +1069,29 @@ static int process_prack_response(struct sk_buff *skb, if ((code >= 100 && code <= 199) || (code >= 200 && code <= 299)) - return process_sdp(skb, dptr, datalen, cseq); + return process_sdp(skb, dataoff, dptr, datalen, cseq); else if (help->help.ct_sip_info.invite_cseq == cseq) flush_expectations(ct, true); return NF_ACCEPT; } -static int process_bye_request(struct sk_buff *skb, +static int process_invite_request(struct sk_buff *skb, unsigned int dataoff, + const char **dptr, unsigned int *datalen, + unsigned int cseq) +{ + enum ip_conntrack_info ctinfo; + struct nf_conn *ct = nf_ct_get(skb, &ctinfo); + struct nf_conn_help *help = nfct_help(ct); + unsigned int ret; + + flush_expectations(ct, true); + ret = process_sdp(skb, dataoff, dptr, datalen, cseq); + if (ret == NF_ACCEPT) + help->help.ct_sip_info.invite_cseq = cseq; + return ret; +} + +static int process_bye_request(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, unsigned int cseq) { @@ -1030,7 +1106,7 @@ static int process_bye_request(struct sk_buff *skb, * signalling connections. The expectation is marked inactive and is activated * when receiving a response indicating success from the registrar. */ -static int process_register_request(struct sk_buff *skb, +static int process_register_request(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, unsigned int cseq) { @@ -1042,6 +1118,7 @@ static int process_register_request(struct sk_buff *skb, struct nf_conntrack_expect *exp; union nf_inet_addr *saddr, daddr; __be16 port; + u8 proto; unsigned int expires = 0; int ret; typeof(nf_nat_sip_expect_hook) nf_nat_sip_expect; @@ -1074,6 +1151,10 @@ static int process_register_request(struct sk_buff *skb, if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, &daddr)) return NF_ACCEPT; + if (ct_sip_parse_transport(ct, *dptr, matchoff + matchlen, *datalen, + &proto) == 0) + return NF_ACCEPT; + if (ct_sip_parse_numerical_param(ct, *dptr, matchoff + matchlen, *datalen, "expires=", NULL, NULL, &expires) < 0) @@ -1093,14 +1174,14 @@ static int process_register_request(struct sk_buff *skb, saddr = &ct->tuplehash[!dir].tuple.src.u3; nf_ct_expect_init(exp, SIP_EXPECT_SIGNALLING, nf_ct_l3num(ct), - saddr, &daddr, IPPROTO_UDP, NULL, &port); + saddr, &daddr, proto, NULL, &port); exp->timeout.expires = sip_timeout * HZ; exp->helper = nfct_help(ct)->helper; exp->flags = NF_CT_EXPECT_PERMANENT | NF_CT_EXPECT_INACTIVE; nf_nat_sip_expect = rcu_dereference(nf_nat_sip_expect_hook); if (nf_nat_sip_expect && ct->status & IPS_NAT_MASK) - ret = nf_nat_sip_expect(skb, dptr, datalen, exp, + ret = nf_nat_sip_expect(skb, dataoff, dptr, datalen, exp, matchoff, matchlen); else { if (nf_ct_expect_related(exp) != 0) @@ -1116,7 +1197,7 @@ store_cseq: return ret; } -static int process_register_response(struct sk_buff *skb, +static int process_register_response(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, unsigned int cseq, unsigned int code) { @@ -1126,7 +1207,8 @@ static int process_register_response(struct sk_buff *skb, enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); union nf_inet_addr addr; __be16 port; - unsigned int matchoff, matchlen, dataoff = 0; + u8 proto; + unsigned int matchoff, matchlen, coff = 0; unsigned int expires = 0; int in_contact = 0, ret; @@ -1153,7 +1235,7 @@ static int process_register_response(struct sk_buff *skb, while (1) { unsigned int c_expires = expires; - ret = ct_sip_parse_header_uri(ct, *dptr, &dataoff, *datalen, + ret = ct_sip_parse_header_uri(ct, *dptr, &coff, *datalen, SIP_HDR_CONTACT, &in_contact, &matchoff, &matchlen, &addr, &port); @@ -1166,6 +1248,10 @@ static int process_register_response(struct sk_buff *skb, if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, &addr)) continue; + if (ct_sip_parse_transport(ct, *dptr, matchoff + matchlen, + *datalen, &proto) == 0) + continue; + ret = ct_sip_parse_numerical_param(ct, *dptr, matchoff + matchlen, *datalen, "expires=", @@ -1174,7 +1260,8 @@ static int process_register_response(struct sk_buff *skb, return NF_DROP; if (c_expires == 0) break; - if (refresh_signalling_expectation(ct, &addr, port, c_expires)) + if (refresh_signalling_expectation(ct, &addr, proto, port, + c_expires)) return NF_ACCEPT; } @@ -1184,7 +1271,7 @@ flush: } static const struct sip_handler sip_handlers[] = { - SIP_HANDLER("INVITE", process_sdp, process_invite_response), + SIP_HANDLER("INVITE", process_invite_request, process_invite_response), SIP_HANDLER("UPDATE", process_sdp, process_update_response), SIP_HANDLER("ACK", process_sdp, NULL), SIP_HANDLER("PRACK", process_sdp, process_prack_response), @@ -1192,13 +1279,13 @@ static const struct sip_handler sip_handlers[] = { SIP_HANDLER("REGISTER", process_register_request, process_register_response), }; -static int process_sip_response(struct sk_buff *skb, +static int process_sip_response(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen) { enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); - unsigned int matchoff, matchlen; - unsigned int code, cseq, dataoff, i; + unsigned int matchoff, matchlen, matchend; + unsigned int code, cseq, i; if (*datalen < strlen("SIP/2.0 200")) return NF_ACCEPT; @@ -1212,7 +1299,7 @@ static int process_sip_response(struct sk_buff *skb, cseq = simple_strtoul(*dptr + matchoff, NULL, 10); if (!cseq) return NF_DROP; - dataoff = matchoff + matchlen + 1; + matchend = matchoff + matchlen + 1; for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) { const struct sip_handler *handler; @@ -1220,15 +1307,16 @@ static int process_sip_response(struct sk_buff *skb, handler = &sip_handlers[i]; if (handler->response == NULL) continue; - if (*datalen < dataoff + handler->len || - strnicmp(*dptr + dataoff, handler->method, handler->len)) + if (*datalen < matchend + handler->len || + strnicmp(*dptr + matchend, handler->method, handler->len)) continue; - return handler->response(skb, dptr, datalen, cseq, code); + return handler->response(skb, dataoff, dptr, datalen, + cseq, code); } return NF_ACCEPT; } -static int process_sip_request(struct sk_buff *skb, +static int process_sip_request(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen) { enum ip_conntrack_info ctinfo; @@ -1253,69 +1341,157 @@ static int process_sip_request(struct sk_buff *skb, if (!cseq) return NF_DROP; - return handler->request(skb, dptr, datalen, cseq); + return handler->request(skb, dataoff, dptr, datalen, cseq); } return NF_ACCEPT; } -static int sip_help(struct sk_buff *skb, - unsigned int protoff, - struct nf_conn *ct, - enum ip_conntrack_info ctinfo) +static int process_sip_msg(struct sk_buff *skb, struct nf_conn *ct, + unsigned int dataoff, const char **dptr, + unsigned int *datalen) +{ + typeof(nf_nat_sip_hook) nf_nat_sip; + int ret; + + if (strnicmp(*dptr, "SIP/2.0 ", strlen("SIP/2.0 ")) != 0) + ret = process_sip_request(skb, dataoff, dptr, datalen); + else + ret = process_sip_response(skb, dataoff, dptr, datalen); + + if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) { + nf_nat_sip = rcu_dereference(nf_nat_sip_hook); + if (nf_nat_sip && !nf_nat_sip(skb, dataoff, dptr, datalen)) + ret = NF_DROP; + } + + return ret; +} + +static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff, + struct nf_conn *ct, enum ip_conntrack_info ctinfo) { + struct tcphdr *th, _tcph; unsigned int dataoff, datalen; - const char *dptr; + unsigned int matchoff, matchlen, clen; + unsigned int msglen, origlen; + const char *dptr, *end; + s16 diff, tdiff = 0; int ret; - typeof(nf_nat_sip_hook) nf_nat_sip; + typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust; + + if (ctinfo != IP_CT_ESTABLISHED && + ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) + return NF_ACCEPT; /* No Data ? */ - dataoff = protoff + sizeof(struct udphdr); + th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph); + if (th == NULL) + return NF_ACCEPT; + dataoff = protoff + th->doff * 4; if (dataoff >= skb->len) return NF_ACCEPT; nf_ct_refresh(ct, skb, sip_timeout * HZ); - if (!skb_is_nonlinear(skb)) - dptr = skb->data + dataoff; - else { + if (skb_is_nonlinear(skb)) { pr_debug("Copy of skbuff not supported yet.\n"); return NF_ACCEPT; } + dptr = skb->data + dataoff; datalen = skb->len - dataoff; if (datalen < strlen("SIP/2.0 200")) return NF_ACCEPT; - if (strnicmp(dptr, "SIP/2.0 ", strlen("SIP/2.0 ")) != 0) - ret = process_sip_request(skb, &dptr, &datalen); - else - ret = process_sip_response(skb, &dptr, &datalen); + while (1) { + if (ct_sip_get_header(ct, dptr, 0, datalen, + SIP_HDR_CONTENT_LENGTH, + &matchoff, &matchlen) <= 0) + break; + + clen = simple_strtoul(dptr + matchoff, (char **)&end, 10); + if (dptr + matchoff == end) + break; + + if (end + strlen("\r\n\r\n") > dptr + datalen) + break; + if (end[0] != '\r' || end[1] != '\n' || + end[2] != '\r' || end[3] != '\n') + break; + end += strlen("\r\n\r\n") + clen; + + msglen = origlen = end - dptr; + + ret = process_sip_msg(skb, ct, dataoff, &dptr, &msglen); + if (ret != NF_ACCEPT) + break; + diff = msglen - origlen; + tdiff += diff; + + dataoff += msglen; + dptr += msglen; + datalen = datalen + diff - msglen; + } if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) { - nf_nat_sip = rcu_dereference(nf_nat_sip_hook); - if (nf_nat_sip && !nf_nat_sip(skb, &dptr, &datalen)) - ret = NF_DROP; + nf_nat_sip_seq_adjust = rcu_dereference(nf_nat_sip_seq_adjust_hook); + if (nf_nat_sip_seq_adjust) + nf_nat_sip_seq_adjust(skb, tdiff); } return ret; } -static struct nf_conntrack_helper sip[MAX_PORTS][2] __read_mostly; -static char sip_names[MAX_PORTS][2][sizeof("sip-65535")] __read_mostly; +static int sip_help_udp(struct sk_buff *skb, unsigned int protoff, + struct nf_conn *ct, enum ip_conntrack_info ctinfo) +{ + unsigned int dataoff, datalen; + const char *dptr; + + /* No Data ? */ + dataoff = protoff + sizeof(struct udphdr); + if (dataoff >= skb->len) + return NF_ACCEPT; + + nf_ct_refresh(ct, skb, sip_timeout * HZ); + + if (skb_is_nonlinear(skb)) { + pr_debug("Copy of skbuff not supported yet.\n"); + return NF_ACCEPT; + } + + dptr = skb->data + dataoff; + datalen = skb->len - dataoff; + if (datalen < strlen("SIP/2.0 200")) + return NF_ACCEPT; + + return process_sip_msg(skb, ct, dataoff, &dptr, &datalen); +} + +static struct nf_conntrack_helper sip[MAX_PORTS][4] __read_mostly; +static char sip_names[MAX_PORTS][4][sizeof("sip-65535")] __read_mostly; static const struct nf_conntrack_expect_policy sip_exp_policy[SIP_EXPECT_MAX + 1] = { [SIP_EXPECT_SIGNALLING] = { + .name = "signalling", .max_expected = 1, .timeout = 3 * 60, }, [SIP_EXPECT_AUDIO] = { + .name = "audio", .max_expected = 2 * IP_CT_DIR_MAX, .timeout = 3 * 60, }, [SIP_EXPECT_VIDEO] = { + .name = "video", .max_expected = 2 * IP_CT_DIR_MAX, .timeout = 3 * 60, }, + [SIP_EXPECT_IMAGE] = { + .name = "image", + .max_expected = IP_CT_DIR_MAX, + .timeout = 3 * 60, + }, }; static void nf_conntrack_sip_fini(void) @@ -1323,7 +1499,7 @@ static void nf_conntrack_sip_fini(void) int i, j; for (i = 0; i < ports_c; i++) { - for (j = 0; j < 2; j++) { + for (j = 0; j < ARRAY_SIZE(sip[i]); j++) { if (sip[i][j].me == NULL) continue; nf_conntrack_helper_unregister(&sip[i][j]); @@ -1343,14 +1519,24 @@ static int __init nf_conntrack_sip_init(void) memset(&sip[i], 0, sizeof(sip[i])); sip[i][0].tuple.src.l3num = AF_INET; - sip[i][1].tuple.src.l3num = AF_INET6; - for (j = 0; j < 2; j++) { - sip[i][j].tuple.dst.protonum = IPPROTO_UDP; + sip[i][0].tuple.dst.protonum = IPPROTO_UDP; + sip[i][0].help = sip_help_udp; + sip[i][1].tuple.src.l3num = AF_INET; + sip[i][1].tuple.dst.protonum = IPPROTO_TCP; + sip[i][1].help = sip_help_tcp; + + sip[i][2].tuple.src.l3num = AF_INET6; + sip[i][2].tuple.dst.protonum = IPPROTO_UDP; + sip[i][2].help = sip_help_udp; + sip[i][3].tuple.src.l3num = AF_INET6; + sip[i][3].tuple.dst.protonum = IPPROTO_TCP; + sip[i][3].help = sip_help_tcp; + + for (j = 0; j < ARRAY_SIZE(sip[i]); j++) { sip[i][j].tuple.src.u.udp.port = htons(ports[i]); sip[i][j].expect_policy = sip_exp_policy; sip[i][j].expect_class_max = SIP_EXPECT_MAX; sip[i][j].me = THIS_MODULE; - sip[i][j].help = sip_help; tmpname = &sip_names[i][j][0]; if (ports[i] == SIP_PORT) diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index e310f1561bb2..24a42efe62ef 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -26,6 +26,7 @@ #include <net/netfilter/nf_conntrack_expect.h> #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_acct.h> +#include <net/netfilter/nf_conntrack_zones.h> MODULE_LICENSE("GPL"); @@ -171,6 +172,11 @@ static int ct_seq_show(struct seq_file *s, void *v) goto release; #endif +#ifdef CONFIG_NF_CONNTRACK_ZONES + if (seq_printf(s, "zone=%u ", nf_ct_zone(ct))) + goto release; +#endif + if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use))) goto release; diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index eedc0c1ac7a4..8eb0cc23ada3 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -40,7 +40,6 @@ MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER); static char __initdata nfversion[] = "0.30"; -static struct sock *nfnl = NULL; static const struct nfnetlink_subsystem *subsys_table[NFNL_SUBSYS_COUNT]; static DEFINE_MUTEX(nfnl_mutex); @@ -101,34 +100,35 @@ nfnetlink_find_client(u_int16_t type, const struct nfnetlink_subsystem *ss) return &ss->cb[cb_id]; } -int nfnetlink_has_listeners(unsigned int group) +int nfnetlink_has_listeners(struct net *net, unsigned int group) { - return netlink_has_listeners(nfnl, group); + return netlink_has_listeners(net->nfnl, group); } EXPORT_SYMBOL_GPL(nfnetlink_has_listeners); -int nfnetlink_send(struct sk_buff *skb, u32 pid, +int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned group, int echo, gfp_t flags) { - return nlmsg_notify(nfnl, skb, pid, group, echo, flags); + return nlmsg_notify(net->nfnl, skb, pid, group, echo, flags); } EXPORT_SYMBOL_GPL(nfnetlink_send); -void nfnetlink_set_err(u32 pid, u32 group, int error) +void nfnetlink_set_err(struct net *net, u32 pid, u32 group, int error) { - netlink_set_err(nfnl, pid, group, error); + netlink_set_err(net->nfnl, pid, group, error); } EXPORT_SYMBOL_GPL(nfnetlink_set_err); -int nfnetlink_unicast(struct sk_buff *skb, u_int32_t pid, int flags) +int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u_int32_t pid, int flags) { - return netlink_unicast(nfnl, skb, pid, flags); + return netlink_unicast(net->nfnl, skb, pid, flags); } EXPORT_SYMBOL_GPL(nfnetlink_unicast); /* Process one complete nfnetlink message. */ static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) { + struct net *net = sock_net(skb->sk); const struct nfnl_callback *nc; const struct nfnetlink_subsystem *ss; int type, err; @@ -170,7 +170,7 @@ replay: if (err < 0) return err; - err = nc->call(nfnl, skb, nlh, (const struct nlattr **)cda); + err = nc->call(net->nfnl, skb, nlh, (const struct nlattr **)cda); if (err == -EAGAIN) goto replay; return err; @@ -184,26 +184,45 @@ static void nfnetlink_rcv(struct sk_buff *skb) nfnl_unlock(); } -static void __exit nfnetlink_exit(void) +static int __net_init nfnetlink_net_init(struct net *net) { - printk("Removing netfilter NETLINK layer.\n"); - netlink_kernel_release(nfnl); - return; + struct sock *nfnl; + + nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, NFNLGRP_MAX, + nfnetlink_rcv, NULL, THIS_MODULE); + if (!nfnl) + return -ENOMEM; + net->nfnl_stash = nfnl; + rcu_assign_pointer(net->nfnl, nfnl); + return 0; } -static int __init nfnetlink_init(void) +static void __net_exit nfnetlink_net_exit_batch(struct list_head *net_exit_list) { - printk("Netfilter messages via NETLINK v%s.\n", nfversion); + struct net *net; - nfnl = netlink_kernel_create(&init_net, NETLINK_NETFILTER, NFNLGRP_MAX, - nfnetlink_rcv, NULL, THIS_MODULE); - if (!nfnl) { - printk(KERN_ERR "cannot initialize nfnetlink!\n"); - return -ENOMEM; - } + list_for_each_entry(net, net_exit_list, exit_list) + rcu_assign_pointer(net->nfnl, NULL); + synchronize_net(); + list_for_each_entry(net, net_exit_list, exit_list) + netlink_kernel_release(net->nfnl_stash); +} - return 0; +static struct pernet_operations nfnetlink_net_ops = { + .init = nfnetlink_net_init, + .exit_batch = nfnetlink_net_exit_batch, +}; + +static int __init nfnetlink_init(void) +{ + printk("Netfilter messages via NETLINK v%s.\n", nfversion); + return register_pernet_subsys(&nfnetlink_net_ops); } +static void __exit nfnetlink_exit(void) +{ + printk("Removing netfilter NETLINK layer.\n"); + unregister_pernet_subsys(&nfnetlink_net_ops); +} module_init(nfnetlink_init); module_exit(nfnetlink_exit); diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 9de0470d557e..285e9029a9ff 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -323,7 +323,8 @@ __nfulnl_send(struct nfulnl_instance *inst) NLMSG_DONE, sizeof(struct nfgenmsg)); - status = nfnetlink_unicast(inst->skb, inst->peer_pid, MSG_DONTWAIT); + status = nfnetlink_unicast(inst->skb, &init_net, inst->peer_pid, + MSG_DONTWAIT); inst->qlen = 0; inst->skb = NULL; diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 7e3fa410641e..7ba4abc405c9 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -112,7 +112,6 @@ instance_create(u_int16_t queue_num, int pid) inst->copy_mode = NFQNL_COPY_NONE; spin_lock_init(&inst->lock); INIT_LIST_HEAD(&inst->queue_list); - INIT_RCU_HEAD(&inst->rcu); if (!try_module_get(THIS_MODULE)) { err = -EAGAIN; @@ -414,13 +413,13 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) queue->queue_dropped++; if (net_ratelimit()) printk(KERN_WARNING "nf_queue: full at %d entries, " - "dropping packets(s). Dropped: %d\n", - queue->queue_total, queue->queue_dropped); + "dropping packets(s).\n", + queue->queue_total); goto err_out_free_nskb; } /* nfnetlink_unicast will either free the nskb or add it to a socket */ - err = nfnetlink_unicast(nskb, queue->peer_pid, MSG_DONTWAIT); + err = nfnetlink_unicast(nskb, &init_net, queue->peer_pid, MSG_DONTWAIT); if (err < 0) { queue->queue_user_dropped++; goto err_out_unlock; diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index f01955cce314..0a12cedfe9e3 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -26,7 +26,9 @@ #include <linux/netfilter/x_tables.h> #include <linux/netfilter_arp.h> - +#include <linux/netfilter_ipv4/ip_tables.h> +#include <linux/netfilter_ipv6/ip6_tables.h> +#include <linux/netfilter_arp/arp_tables.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); @@ -37,7 +39,7 @@ MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module"); struct compat_delta { struct compat_delta *next; unsigned int offset; - short delta; + int delta; }; struct xt_af { @@ -364,8 +366,10 @@ int xt_check_match(struct xt_mtchk_param *par, * ebt_among is exempt from centralized matchsize checking * because it uses a dynamic-size data set. */ - pr_err("%s_tables: %s match: invalid size %Zu != %u\n", + pr_err("%s_tables: %s.%u match: invalid size " + "%u (kernel) != (user) %u\n", xt_prefix[par->family], par->match->name, + par->match->revision, XT_ALIGN(par->match->matchsize), size); return -EINVAL; } @@ -435,10 +439,10 @@ void xt_compat_flush_offsets(u_int8_t af) } EXPORT_SYMBOL_GPL(xt_compat_flush_offsets); -short xt_compat_calc_jump(u_int8_t af, unsigned int offset) +int xt_compat_calc_jump(u_int8_t af, unsigned int offset) { struct compat_delta *tmp; - short delta; + int delta; for (tmp = xt[af].compat_offsets, delta = 0; tmp; tmp = tmp->next) if (tmp->offset < offset) @@ -481,8 +485,8 @@ int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, } EXPORT_SYMBOL_GPL(xt_compat_match_from_user); -int xt_compat_match_to_user(struct xt_entry_match *m, void __user **dstptr, - unsigned int *size) +int xt_compat_match_to_user(const struct xt_entry_match *m, + void __user **dstptr, unsigned int *size) { const struct xt_match *match = m->u.kernel.match; struct compat_xt_entry_match __user *cm = *dstptr; @@ -514,8 +518,10 @@ int xt_check_target(struct xt_tgchk_param *par, unsigned int size, u_int8_t proto, bool inv_proto) { if (XT_ALIGN(par->target->targetsize) != size) { - pr_err("%s_tables: %s target: invalid size %Zu != %u\n", + pr_err("%s_tables: %s.%u target: invalid size " + "%u (kernel) != (user) %u\n", xt_prefix[par->family], par->target->name, + par->target->revision, XT_ALIGN(par->target->targetsize), size); return -EINVAL; } @@ -582,8 +588,8 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, } EXPORT_SYMBOL_GPL(xt_compat_target_from_user); -int xt_compat_target_to_user(struct xt_entry_target *t, void __user **dstptr, - unsigned int *size) +int xt_compat_target_to_user(const struct xt_entry_target *t, + void __user **dstptr, unsigned int *size) { const struct xt_target *target = t->u.kernel.target; struct compat_xt_entry_target __user *ct = *dstptr; @@ -1091,6 +1097,60 @@ static const struct file_operations xt_target_ops = { #endif /* CONFIG_PROC_FS */ +/** + * xt_hook_link - set up hooks for a new table + * @table: table with metadata needed to set up hooks + * @fn: Hook function + * + * This function will take care of creating and registering the necessary + * Netfilter hooks for XT tables. + */ +struct nf_hook_ops *xt_hook_link(const struct xt_table *table, nf_hookfn *fn) +{ + unsigned int hook_mask = table->valid_hooks; + uint8_t i, num_hooks = hweight32(hook_mask); + uint8_t hooknum; + struct nf_hook_ops *ops; + int ret; + + ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL); + if (ops == NULL) + return ERR_PTR(-ENOMEM); + + for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0; + hook_mask >>= 1, ++hooknum) { + if (!(hook_mask & 1)) + continue; + ops[i].hook = fn; + ops[i].owner = table->me; + ops[i].pf = table->af; + ops[i].hooknum = hooknum; + ops[i].priority = table->priority; + ++i; + } + + ret = nf_register_hooks(ops, num_hooks); + if (ret < 0) { + kfree(ops); + return ERR_PTR(ret); + } + + return ops; +} +EXPORT_SYMBOL_GPL(xt_hook_link); + +/** + * xt_hook_unlink - remove hooks for a table + * @ops: nf_hook_ops array as returned by nf_hook_link + * @hook_mask: the very same mask that was passed to nf_hook_link + */ +void xt_hook_unlink(const struct xt_table *table, struct nf_hook_ops *ops) +{ + nf_unregister_hooks(ops, hweight32(table->valid_hooks)); + kfree(ops); +} +EXPORT_SYMBOL_GPL(xt_hook_unlink); + int xt_proto_init(struct net *net, u_int8_t af) { #ifdef CONFIG_PROC_FS diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c new file mode 100644 index 000000000000..61c50fa84703 --- /dev/null +++ b/net/netfilter/xt_CT.c @@ -0,0 +1,164 @@ +/* + * Copyright (c) 2010 Patrick McHardy <kaber@trash.net> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/skbuff.h> +#include <linux/selinux.h> +#include <linux/netfilter_ipv4/ip_tables.h> +#include <linux/netfilter_ipv6/ip6_tables.h> +#include <linux/netfilter/x_tables.h> +#include <linux/netfilter/xt_CT.h> +#include <net/netfilter/nf_conntrack.h> +#include <net/netfilter/nf_conntrack_helper.h> +#include <net/netfilter/nf_conntrack_ecache.h> +#include <net/netfilter/nf_conntrack_zones.h> + +static unsigned int xt_ct_target(struct sk_buff *skb, + const struct xt_target_param *par) +{ + const struct xt_ct_target_info *info = par->targinfo; + struct nf_conn *ct = info->ct; + + /* Previously seen (loopback)? Ignore. */ + if (skb->nfct != NULL) + return XT_CONTINUE; + + atomic_inc(&ct->ct_general.use); + skb->nfct = &ct->ct_general; + skb->nfctinfo = IP_CT_NEW; + + return XT_CONTINUE; +} + +static u8 xt_ct_find_proto(const struct xt_tgchk_param *par) +{ + if (par->family == AF_INET) { + const struct ipt_entry *e = par->entryinfo; + + if (e->ip.invflags & IPT_INV_PROTO) + return 0; + return e->ip.proto; + } else if (par->family == AF_INET6) { + const struct ip6t_entry *e = par->entryinfo; + + if (e->ipv6.invflags & IP6T_INV_PROTO) + return 0; + return e->ipv6.proto; + } else + return 0; +} + +static bool xt_ct_tg_check(const struct xt_tgchk_param *par) +{ + struct xt_ct_target_info *info = par->targinfo; + struct nf_conntrack_tuple t; + struct nf_conn_help *help; + struct nf_conn *ct; + u8 proto; + + if (info->flags & ~XT_CT_NOTRACK) + return false; + + if (info->flags & XT_CT_NOTRACK) { + ct = &nf_conntrack_untracked; + atomic_inc(&ct->ct_general.use); + goto out; + } + +#ifndef CONFIG_NF_CONNTRACK_ZONES + if (info->zone) + goto err1; +#endif + + if (nf_ct_l3proto_try_module_get(par->family) < 0) + goto err1; + + memset(&t, 0, sizeof(t)); + ct = nf_conntrack_alloc(par->net, info->zone, &t, &t, GFP_KERNEL); + if (IS_ERR(ct)) + goto err2; + + if ((info->ct_events || info->exp_events) && + !nf_ct_ecache_ext_add(ct, info->ct_events, info->exp_events, + GFP_KERNEL)) + goto err3; + + if (info->helper[0]) { + proto = xt_ct_find_proto(par); + if (!proto) + goto err3; + + help = nf_ct_helper_ext_add(ct, GFP_KERNEL); + if (help == NULL) + goto err3; + + help->helper = nf_conntrack_helper_try_module_get(info->helper, + par->family, + proto); + if (help->helper == NULL) + goto err3; + } + + __set_bit(IPS_TEMPLATE_BIT, &ct->status); + __set_bit(IPS_CONFIRMED_BIT, &ct->status); +out: + info->ct = ct; + return true; + +err3: + nf_conntrack_free(ct); +err2: + nf_ct_l3proto_module_put(par->family); +err1: + return false; +} + +static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par) +{ + struct xt_ct_target_info *info = par->targinfo; + struct nf_conn *ct = info->ct; + struct nf_conn_help *help; + + if (ct != &nf_conntrack_untracked) { + help = nfct_help(ct); + if (help) + module_put(help->helper->me); + + nf_ct_l3proto_module_put(par->family); + } + nf_ct_put(info->ct); +} + +static struct xt_target xt_ct_tg __read_mostly = { + .name = "CT", + .family = NFPROTO_UNSPEC, + .targetsize = XT_ALIGN(sizeof(struct xt_ct_target_info)), + .checkentry = xt_ct_tg_check, + .destroy = xt_ct_tg_destroy, + .target = xt_ct_target, + .table = "raw", + .me = THIS_MODULE, +}; + +static int __init xt_ct_tg_init(void) +{ + return xt_register_target(&xt_ct_tg); +} + +static void __exit xt_ct_tg_exit(void) +{ + xt_unregister_target(&xt_ct_tg); +} + +module_init(xt_ct_tg_init); +module_exit(xt_ct_tg_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Xtables: connection tracking target"); +MODULE_ALIAS("ipt_CT"); +MODULE_ALIAS("ip6t_CT"); diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c index f28f6a5fc02d..12dcd7007c3e 100644 --- a/net/netfilter/xt_NFQUEUE.c +++ b/net/netfilter/xt_NFQUEUE.c @@ -28,6 +28,7 @@ MODULE_ALIAS("ip6t_NFQUEUE"); MODULE_ALIAS("arpt_NFQUEUE"); static u32 jhash_initval __read_mostly; +static bool rnd_inited __read_mostly; static unsigned int nfqueue_tg(struct sk_buff *skb, const struct xt_target_param *par) @@ -90,6 +91,10 @@ static bool nfqueue_tg_v1_check(const struct xt_tgchk_param *par) const struct xt_NFQ_info_v1 *info = par->targinfo; u32 maxid; + if (unlikely(!rnd_inited)) { + get_random_bytes(&jhash_initval, sizeof(jhash_initval)); + rnd_inited = true; + } if (info->queues_total == 0) { pr_err("NFQUEUE: number of total queues is 0\n"); return false; @@ -135,7 +140,6 @@ static struct xt_target nfqueue_tg_reg[] __read_mostly = { static int __init nfqueue_tg_init(void) { - get_random_bytes(&jhash_initval, sizeof(jhash_initval)); return xt_register_targets(nfqueue_tg_reg, ARRAY_SIZE(nfqueue_tg_reg)); } diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c index d80b8192e0d4..87ae97e5516f 100644 --- a/net/netfilter/xt_RATEEST.c +++ b/net/netfilter/xt_RATEEST.c @@ -23,6 +23,7 @@ static DEFINE_MUTEX(xt_rateest_mutex); #define RATEEST_HSIZE 16 static struct hlist_head rateest_hash[RATEEST_HSIZE] __read_mostly; static unsigned int jhash_rnd __read_mostly; +static bool rnd_inited __read_mostly; static unsigned int xt_rateest_hash(const char *name) { @@ -93,6 +94,11 @@ static bool xt_rateest_tg_checkentry(const struct xt_tgchk_param *par) struct gnet_estimator est; } cfg; + if (unlikely(!rnd_inited)) { + get_random_bytes(&jhash_rnd, sizeof(jhash_rnd)); + rnd_inited = true; + } + est = xt_rateest_lookup(info->name); if (est) { /* @@ -164,7 +170,6 @@ static int __init xt_rateest_tg_init(void) for (i = 0; i < ARRAY_SIZE(rateest_hash); i++) INIT_HLIST_HEAD(&rateest_hash[i]); - get_random_bytes(&jhash_rnd, sizeof(jhash_rnd)); return xt_register_target(&xt_rateest_tg_reg); } diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c index eda64c1cb1e5..6f21b4377dbb 100644 --- a/net/netfilter/xt_TCPMSS.c +++ b/net/netfilter/xt_TCPMSS.c @@ -60,17 +60,9 @@ tcpmss_mangle_packet(struct sk_buff *skb, tcplen = skb->len - tcphoff; tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); - /* Since it passed flags test in tcp match, we know it is is - not a fragment, and has data >= tcp header length. SYN - packets should not contain data: if they did, then we risk - running over MTU, sending Frag Needed and breaking things - badly. --RR */ - if (tcplen != tcph->doff*4) { - if (net_ratelimit()) - printk(KERN_ERR "xt_TCPMSS: bad length (%u bytes)\n", - skb->len); + /* Header cannot be larger than the packet */ + if (tcplen < tcph->doff*4) return -1; - } if (info->mss == XT_TCPMSS_CLAMP_PMTU) { if (dst_mtu(skb_dst(skb)) <= minlen) { @@ -115,6 +107,12 @@ tcpmss_mangle_packet(struct sk_buff *skb, } } + /* There is data after the header so the option can't be added + without moving it, and doing so may make the SYN packet + itself too large. Accept the packet unmodified instead. */ + if (tcplen > tcph->doff*4) + return 0; + /* * MSS Option not found ?! add it.. */ diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c index 38f03f75a636..26997ce90e48 100644 --- a/net/netfilter/xt_connlimit.c +++ b/net/netfilter/xt_connlimit.c @@ -28,6 +28,7 @@ #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_tuple.h> +#include <net/netfilter/nf_conntrack_zones.h> /* we will save the tuples of all connections we care about */ struct xt_connlimit_conn { @@ -40,15 +41,11 @@ struct xt_connlimit_data { spinlock_t lock; }; -static u_int32_t connlimit_rnd; -static bool connlimit_rnd_inited; +static u_int32_t connlimit_rnd __read_mostly; +static bool connlimit_rnd_inited __read_mostly; static inline unsigned int connlimit_iphash(__be32 addr) { - if (unlikely(!connlimit_rnd_inited)) { - get_random_bytes(&connlimit_rnd, sizeof(connlimit_rnd)); - connlimit_rnd_inited = true; - } return jhash_1word((__force __u32)addr, connlimit_rnd) & 0xFF; } @@ -59,11 +56,6 @@ connlimit_iphash6(const union nf_inet_addr *addr, union nf_inet_addr res; unsigned int i; - if (unlikely(!connlimit_rnd_inited)) { - get_random_bytes(&connlimit_rnd, sizeof(connlimit_rnd)); - connlimit_rnd_inited = true; - } - for (i = 0; i < ARRAY_SIZE(addr->ip6); ++i) res.ip6[i] = addr->ip6[i] & mask->ip6[i]; @@ -99,7 +91,8 @@ same_source_net(const union nf_inet_addr *addr, } } -static int count_them(struct xt_connlimit_data *data, +static int count_them(struct net *net, + struct xt_connlimit_data *data, const struct nf_conntrack_tuple *tuple, const union nf_inet_addr *addr, const union nf_inet_addr *mask, @@ -122,7 +115,8 @@ static int count_them(struct xt_connlimit_data *data, /* check the saved connections */ list_for_each_entry_safe(conn, tmp, hash, list) { - found = nf_conntrack_find_get(&init_net, &conn->tuple); + found = nf_conntrack_find_get(net, NF_CT_DEFAULT_ZONE, + &conn->tuple); found_ct = NULL; if (found != NULL) @@ -180,6 +174,7 @@ static int count_them(struct xt_connlimit_data *data, static bool connlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par) { + struct net *net = dev_net(par->in ? par->in : par->out); const struct xt_connlimit_info *info = par->matchinfo; union nf_inet_addr addr; struct nf_conntrack_tuple tuple; @@ -204,7 +199,7 @@ connlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par) } spin_lock_bh(&info->data->lock); - connections = count_them(info->data, tuple_ptr, &addr, + connections = count_them(net, info->data, tuple_ptr, &addr, &info->mask, par->family); spin_unlock_bh(&info->data->lock); @@ -226,6 +221,10 @@ static bool connlimit_mt_check(const struct xt_mtchk_param *par) struct xt_connlimit_info *info = par->matchinfo; unsigned int i; + if (unlikely(!connlimit_rnd_inited)) { + get_random_bytes(&connlimit_rnd, sizeof(connlimit_rnd)); + connlimit_rnd_inited = true; + } if (nf_ct_l3proto_try_module_get(par->family) < 0) { printk(KERN_WARNING "cannot load conntrack support for " "address family %u\n", par->family); diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index dd16e404424f..e47fb805ffb4 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -26,6 +26,7 @@ #endif #include <net/net_namespace.h> +#include <net/netns/generic.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_ipv4/ip_tables.h> @@ -40,9 +41,19 @@ MODULE_DESCRIPTION("Xtables: per hash-bucket rate-limit match"); MODULE_ALIAS("ipt_hashlimit"); MODULE_ALIAS("ip6t_hashlimit"); +struct hashlimit_net { + struct hlist_head htables; + struct proc_dir_entry *ipt_hashlimit; + struct proc_dir_entry *ip6t_hashlimit; +}; + +static int hashlimit_net_id; +static inline struct hashlimit_net *hashlimit_pernet(struct net *net) +{ + return net_generic(net, hashlimit_net_id); +} + /* need to declare this at the top */ -static struct proc_dir_entry *hashlimit_procdir4; -static struct proc_dir_entry *hashlimit_procdir6; static const struct file_operations dl_file_ops; /* hash table crap */ @@ -79,27 +90,26 @@ struct dsthash_ent { struct xt_hashlimit_htable { struct hlist_node node; /* global list of all htables */ - atomic_t use; + int use; u_int8_t family; + bool rnd_initialized; struct hashlimit_cfg1 cfg; /* config */ /* used internally */ spinlock_t lock; /* lock for list_head */ u_int32_t rnd; /* random seed for hash */ - int rnd_initialized; unsigned int count; /* number entries in table */ struct timer_list timer; /* timer for gc */ /* seq_file stuff */ struct proc_dir_entry *pde; + struct net *net; struct hlist_head hash[0]; /* hashtable itself */ }; -static DEFINE_SPINLOCK(hashlimit_lock); /* protects htables list */ -static DEFINE_MUTEX(hlimit_mutex); /* additional checkentry protection */ -static HLIST_HEAD(hashlimit_htables); +static DEFINE_MUTEX(hashlimit_mutex); /* protects htables list */ static struct kmem_cache *hashlimit_cachep __read_mostly; static inline bool dst_cmp(const struct dsthash_ent *ent, @@ -150,7 +160,7 @@ dsthash_alloc_init(struct xt_hashlimit_htable *ht, * the first hashtable entry */ if (!ht->rnd_initialized) { get_random_bytes(&ht->rnd, sizeof(ht->rnd)); - ht->rnd_initialized = 1; + ht->rnd_initialized = true; } if (ht->cfg.max && ht->count >= ht->cfg.max) { @@ -185,8 +195,9 @@ dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent) } static void htable_gc(unsigned long htlong); -static int htable_create_v0(struct xt_hashlimit_info *minfo, u_int8_t family) +static int htable_create_v0(struct net *net, struct xt_hashlimit_info *minfo, u_int8_t family) { + struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); struct xt_hashlimit_htable *hinfo; unsigned int size; unsigned int i; @@ -232,33 +243,36 @@ static int htable_create_v0(struct xt_hashlimit_info *minfo, u_int8_t family) for (i = 0; i < hinfo->cfg.size; i++) INIT_HLIST_HEAD(&hinfo->hash[i]); - atomic_set(&hinfo->use, 1); + hinfo->use = 1; hinfo->count = 0; hinfo->family = family; - hinfo->rnd_initialized = 0; + hinfo->rnd_initialized = false; spin_lock_init(&hinfo->lock); hinfo->pde = proc_create_data(minfo->name, 0, (family == NFPROTO_IPV4) ? - hashlimit_procdir4 : hashlimit_procdir6, + hashlimit_net->ipt_hashlimit : hashlimit_net->ip6t_hashlimit, &dl_file_ops, hinfo); if (!hinfo->pde) { vfree(hinfo); return -1; } + hinfo->net = net; setup_timer(&hinfo->timer, htable_gc, (unsigned long )hinfo); hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval); add_timer(&hinfo->timer); - spin_lock_bh(&hashlimit_lock); - hlist_add_head(&hinfo->node, &hashlimit_htables); - spin_unlock_bh(&hashlimit_lock); + mutex_lock(&hashlimit_mutex); + hlist_add_head(&hinfo->node, &hashlimit_net->htables); + mutex_unlock(&hashlimit_mutex); return 0; } -static int htable_create(struct xt_hashlimit_mtinfo1 *minfo, u_int8_t family) +static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo, + u_int8_t family) { + struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); struct xt_hashlimit_htable *hinfo; unsigned int size; unsigned int i; @@ -293,28 +307,29 @@ static int htable_create(struct xt_hashlimit_mtinfo1 *minfo, u_int8_t family) for (i = 0; i < hinfo->cfg.size; i++) INIT_HLIST_HEAD(&hinfo->hash[i]); - atomic_set(&hinfo->use, 1); + hinfo->use = 1; hinfo->count = 0; hinfo->family = family; - hinfo->rnd_initialized = 0; + hinfo->rnd_initialized = false; spin_lock_init(&hinfo->lock); hinfo->pde = proc_create_data(minfo->name, 0, (family == NFPROTO_IPV4) ? - hashlimit_procdir4 : hashlimit_procdir6, + hashlimit_net->ipt_hashlimit : hashlimit_net->ip6t_hashlimit, &dl_file_ops, hinfo); if (hinfo->pde == NULL) { vfree(hinfo); return -1; } + hinfo->net = net; setup_timer(&hinfo->timer, htable_gc, (unsigned long)hinfo); hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval); add_timer(&hinfo->timer); - spin_lock_bh(&hashlimit_lock); - hlist_add_head(&hinfo->node, &hashlimit_htables); - spin_unlock_bh(&hashlimit_lock); + mutex_lock(&hashlimit_mutex); + hlist_add_head(&hinfo->node, &hashlimit_net->htables); + mutex_unlock(&hashlimit_mutex); return 0; } @@ -364,43 +379,46 @@ static void htable_gc(unsigned long htlong) static void htable_destroy(struct xt_hashlimit_htable *hinfo) { + struct hashlimit_net *hashlimit_net = hashlimit_pernet(hinfo->net); + struct proc_dir_entry *parent; + del_timer_sync(&hinfo->timer); - /* remove proc entry */ - remove_proc_entry(hinfo->pde->name, - hinfo->family == NFPROTO_IPV4 ? hashlimit_procdir4 : - hashlimit_procdir6); + if (hinfo->family == NFPROTO_IPV4) + parent = hashlimit_net->ipt_hashlimit; + else + parent = hashlimit_net->ip6t_hashlimit; + remove_proc_entry(hinfo->pde->name, parent); htable_selective_cleanup(hinfo, select_all); vfree(hinfo); } -static struct xt_hashlimit_htable *htable_find_get(const char *name, +static struct xt_hashlimit_htable *htable_find_get(struct net *net, + const char *name, u_int8_t family) { + struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); struct xt_hashlimit_htable *hinfo; struct hlist_node *pos; - spin_lock_bh(&hashlimit_lock); - hlist_for_each_entry(hinfo, pos, &hashlimit_htables, node) { + hlist_for_each_entry(hinfo, pos, &hashlimit_net->htables, node) { if (!strcmp(name, hinfo->pde->name) && hinfo->family == family) { - atomic_inc(&hinfo->use); - spin_unlock_bh(&hashlimit_lock); + hinfo->use++; return hinfo; } } - spin_unlock_bh(&hashlimit_lock); return NULL; } static void htable_put(struct xt_hashlimit_htable *hinfo) { - if (atomic_dec_and_test(&hinfo->use)) { - spin_lock_bh(&hashlimit_lock); + mutex_lock(&hashlimit_mutex); + if (--hinfo->use == 0) { hlist_del(&hinfo->node); - spin_unlock_bh(&hashlimit_lock); htable_destroy(hinfo); } + mutex_unlock(&hashlimit_mutex); } /* The algorithm used is the Simple Token Bucket Filter (TBF) @@ -665,6 +683,7 @@ hashlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par) static bool hashlimit_mt_check_v0(const struct xt_mtchk_param *par) { + struct net *net = par->net; struct xt_hashlimit_info *r = par->matchinfo; /* Check for overflow. */ @@ -687,25 +706,20 @@ static bool hashlimit_mt_check_v0(const struct xt_mtchk_param *par) if (r->name[sizeof(r->name) - 1] != '\0') return false; - /* This is the best we've got: We cannot release and re-grab lock, - * since checkentry() is called before x_tables.c grabs xt_mutex. - * We also cannot grab the hashtable spinlock, since htable_create will - * call vmalloc, and that can sleep. And we cannot just re-search - * the list of htable's in htable_create(), since then we would - * create duplicate proc files. -HW */ - mutex_lock(&hlimit_mutex); - r->hinfo = htable_find_get(r->name, par->match->family); - if (!r->hinfo && htable_create_v0(r, par->match->family) != 0) { - mutex_unlock(&hlimit_mutex); + mutex_lock(&hashlimit_mutex); + r->hinfo = htable_find_get(net, r->name, par->match->family); + if (!r->hinfo && htable_create_v0(net, r, par->match->family) != 0) { + mutex_unlock(&hashlimit_mutex); return false; } - mutex_unlock(&hlimit_mutex); + mutex_unlock(&hashlimit_mutex); return true; } static bool hashlimit_mt_check(const struct xt_mtchk_param *par) { + struct net *net = par->net; struct xt_hashlimit_mtinfo1 *info = par->matchinfo; /* Check for overflow. */ @@ -728,19 +742,13 @@ static bool hashlimit_mt_check(const struct xt_mtchk_param *par) return false; } - /* This is the best we've got: We cannot release and re-grab lock, - * since checkentry() is called before x_tables.c grabs xt_mutex. - * We also cannot grab the hashtable spinlock, since htable_create will - * call vmalloc, and that can sleep. And we cannot just re-search - * the list of htable's in htable_create(), since then we would - * create duplicate proc files. -HW */ - mutex_lock(&hlimit_mutex); - info->hinfo = htable_find_get(info->name, par->match->family); - if (!info->hinfo && htable_create(info, par->match->family) != 0) { - mutex_unlock(&hlimit_mutex); + mutex_lock(&hashlimit_mutex); + info->hinfo = htable_find_get(net, info->name, par->match->family); + if (!info->hinfo && htable_create(net, info, par->match->family) != 0) { + mutex_unlock(&hashlimit_mutex); return false; } - mutex_unlock(&hlimit_mutex); + mutex_unlock(&hashlimit_mutex); return true; } @@ -767,7 +775,7 @@ struct compat_xt_hashlimit_info { compat_uptr_t master; }; -static void hashlimit_mt_compat_from_user(void *dst, void *src) +static void hashlimit_mt_compat_from_user(void *dst, const void *src) { int off = offsetof(struct compat_xt_hashlimit_info, hinfo); @@ -775,7 +783,7 @@ static void hashlimit_mt_compat_from_user(void *dst, void *src) memset(dst + off, 0, sizeof(struct compat_xt_hashlimit_info) - off); } -static int hashlimit_mt_compat_to_user(void __user *dst, void *src) +static int hashlimit_mt_compat_to_user(void __user *dst, const void *src) { int off = offsetof(struct compat_xt_hashlimit_info, hinfo); @@ -841,8 +849,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = { static void *dl_seq_start(struct seq_file *s, loff_t *pos) __acquires(htable->lock) { - struct proc_dir_entry *pde = s->private; - struct xt_hashlimit_htable *htable = pde->data; + struct xt_hashlimit_htable *htable = s->private; unsigned int *bucket; spin_lock_bh(&htable->lock); @@ -859,8 +866,7 @@ static void *dl_seq_start(struct seq_file *s, loff_t *pos) static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos) { - struct proc_dir_entry *pde = s->private; - struct xt_hashlimit_htable *htable = pde->data; + struct xt_hashlimit_htable *htable = s->private; unsigned int *bucket = (unsigned int *)v; *pos = ++(*bucket); @@ -874,8 +880,7 @@ static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos) static void dl_seq_stop(struct seq_file *s, void *v) __releases(htable->lock) { - struct proc_dir_entry *pde = s->private; - struct xt_hashlimit_htable *htable = pde->data; + struct xt_hashlimit_htable *htable = s->private; unsigned int *bucket = (unsigned int *)v; kfree(bucket); @@ -917,8 +922,7 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family, static int dl_seq_show(struct seq_file *s, void *v) { - struct proc_dir_entry *pde = s->private; - struct xt_hashlimit_htable *htable = pde->data; + struct xt_hashlimit_htable *htable = s->private; unsigned int *bucket = (unsigned int *)v; struct dsthash_ent *ent; struct hlist_node *pos; @@ -944,7 +948,7 @@ static int dl_proc_open(struct inode *inode, struct file *file) if (!ret) { struct seq_file *sf = file->private_data; - sf->private = PDE(inode); + sf->private = PDE(inode)->data; } return ret; } @@ -957,10 +961,61 @@ static const struct file_operations dl_file_ops = { .release = seq_release }; +static int __net_init hashlimit_proc_net_init(struct net *net) +{ + struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); + + hashlimit_net->ipt_hashlimit = proc_mkdir("ipt_hashlimit", net->proc_net); + if (!hashlimit_net->ipt_hashlimit) + return -ENOMEM; +#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) + hashlimit_net->ip6t_hashlimit = proc_mkdir("ip6t_hashlimit", net->proc_net); + if (!hashlimit_net->ip6t_hashlimit) { + proc_net_remove(net, "ipt_hashlimit"); + return -ENOMEM; + } +#endif + return 0; +} + +static void __net_exit hashlimit_proc_net_exit(struct net *net) +{ + proc_net_remove(net, "ipt_hashlimit"); +#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) + proc_net_remove(net, "ip6t_hashlimit"); +#endif +} + +static int __net_init hashlimit_net_init(struct net *net) +{ + struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); + + INIT_HLIST_HEAD(&hashlimit_net->htables); + return hashlimit_proc_net_init(net); +} + +static void __net_exit hashlimit_net_exit(struct net *net) +{ + struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); + + BUG_ON(!hlist_empty(&hashlimit_net->htables)); + hashlimit_proc_net_exit(net); +} + +static struct pernet_operations hashlimit_net_ops = { + .init = hashlimit_net_init, + .exit = hashlimit_net_exit, + .id = &hashlimit_net_id, + .size = sizeof(struct hashlimit_net), +}; + static int __init hashlimit_mt_init(void) { int err; + err = register_pernet_subsys(&hashlimit_net_ops); + if (err < 0) + return err; err = xt_register_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg)); if (err < 0) @@ -974,41 +1029,21 @@ static int __init hashlimit_mt_init(void) printk(KERN_ERR "xt_hashlimit: unable to create slab cache\n"); goto err2; } - hashlimit_procdir4 = proc_mkdir("ipt_hashlimit", init_net.proc_net); - if (!hashlimit_procdir4) { - printk(KERN_ERR "xt_hashlimit: unable to create proc dir " - "entry\n"); - goto err3; - } - err = 0; -#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) - hashlimit_procdir6 = proc_mkdir("ip6t_hashlimit", init_net.proc_net); - if (!hashlimit_procdir6) { - printk(KERN_ERR "xt_hashlimit: unable to create proc dir " - "entry\n"); - err = -ENOMEM; - } -#endif - if (!err) - return 0; - remove_proc_entry("ipt_hashlimit", init_net.proc_net); -err3: - kmem_cache_destroy(hashlimit_cachep); + return 0; + err2: xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg)); err1: + unregister_pernet_subsys(&hashlimit_net_ops); return err; } static void __exit hashlimit_mt_exit(void) { - remove_proc_entry("ipt_hashlimit", init_net.proc_net); -#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) - remove_proc_entry("ip6t_hashlimit", init_net.proc_net); -#endif kmem_cache_destroy(hashlimit_cachep); xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg)); + unregister_pernet_subsys(&hashlimit_net_ops); } module_init(hashlimit_mt_init); diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c index 2773be6a71dd..a0ca5339af41 100644 --- a/net/netfilter/xt_limit.c +++ b/net/netfilter/xt_limit.c @@ -148,7 +148,7 @@ struct compat_xt_rateinfo { /* To keep the full "prev" timestamp, the upper 32 bits are stored in the * master pointer, which does not need to be preserved. */ -static void limit_mt_compat_from_user(void *dst, void *src) +static void limit_mt_compat_from_user(void *dst, const void *src) { const struct compat_xt_rateinfo *cm = src; struct xt_rateinfo m = { @@ -162,7 +162,7 @@ static void limit_mt_compat_from_user(void *dst, void *src) memcpy(dst, &m, sizeof(m)); } -static int limit_mt_compat_to_user(void __user *dst, void *src) +static int limit_mt_compat_to_user(void __user *dst, const void *src) { const struct xt_rateinfo *m = src; struct compat_xt_rateinfo cm = { diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c index 4d1a41bbd5d7..4169e200588d 100644 --- a/net/netfilter/xt_osf.c +++ b/net/netfilter/xt_osf.c @@ -334,7 +334,7 @@ static bool xt_osf_match_packet(const struct sk_buff *skb, if (info->flags & XT_OSF_LOG) nf_log_packet(p->family, p->hooknum, skb, p->in, p->out, NULL, - "%s [%s:%s] : %pi4:%d -> %pi4:%d hops=%d\n", + "%s [%s:%s] : %pI4:%d -> %pI4:%d hops=%d\n", f->genre, f->version, f->subtype, &ip->saddr, ntohs(tcp->source), &ip->daddr, ntohs(tcp->dest), @@ -349,7 +349,7 @@ static bool xt_osf_match_packet(const struct sk_buff *skb, if (!fcount && (info->flags & XT_OSF_LOG)) nf_log_packet(p->family, p->hooknum, skb, p->in, p->out, NULL, - "Remote OS is not known: %pi4:%u -> %pi4:%u\n", + "Remote OS is not known: %pI4:%u -> %pI4:%u\n", &ip->saddr, ntohs(tcp->source), &ip->daddr, ntohs(tcp->dest)); diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index fc70a49c0afd..132cfaa84cdc 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c @@ -28,6 +28,7 @@ #include <linux/skbuff.h> #include <linux/inet.h> #include <net/net_namespace.h> +#include <net/netns/generic.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_recent.h> @@ -52,7 +53,7 @@ module_param(ip_list_perms, uint, 0400); module_param(ip_list_uid, uint, 0400); module_param(ip_list_gid, uint, 0400); MODULE_PARM_DESC(ip_list_tot, "number of IPs to remember per list"); -MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP to remember (max. 255)"); +MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP address to remember (max. 255)"); MODULE_PARM_DESC(ip_list_hash_size, "size of hash table used to look up IPs"); MODULE_PARM_DESC(ip_list_perms, "permissions on /proc/net/xt_recent/* files"); MODULE_PARM_DESC(ip_list_uid,"owner of /proc/net/xt_recent/* files"); @@ -78,37 +79,40 @@ struct recent_table { struct list_head iphash[0]; }; -static LIST_HEAD(tables); +struct recent_net { + struct list_head tables; +#ifdef CONFIG_PROC_FS + struct proc_dir_entry *xt_recent; +#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT + struct proc_dir_entry *ipt_recent; +#endif +#endif +}; + +static int recent_net_id; +static inline struct recent_net *recent_pernet(struct net *net) +{ + return net_generic(net, recent_net_id); +} + static DEFINE_SPINLOCK(recent_lock); static DEFINE_MUTEX(recent_mutex); #ifdef CONFIG_PROC_FS -#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT -static struct proc_dir_entry *proc_old_dir; -#endif -static struct proc_dir_entry *recent_proc_dir; static const struct file_operations recent_old_fops, recent_mt_fops; #endif -static u_int32_t hash_rnd; -static bool hash_rnd_initted; +static u_int32_t hash_rnd __read_mostly; +static bool hash_rnd_inited __read_mostly; -static unsigned int recent_entry_hash4(const union nf_inet_addr *addr) +static inline unsigned int recent_entry_hash4(const union nf_inet_addr *addr) { - if (!hash_rnd_initted) { - get_random_bytes(&hash_rnd, sizeof(hash_rnd)); - hash_rnd_initted = true; - } return jhash_1word((__force u32)addr->ip, hash_rnd) & (ip_list_hash_size - 1); } -static unsigned int recent_entry_hash6(const union nf_inet_addr *addr) +static inline unsigned int recent_entry_hash6(const union nf_inet_addr *addr) { - if (!hash_rnd_initted) { - get_random_bytes(&hash_rnd, sizeof(hash_rnd)); - hash_rnd_initted = true; - } return jhash2((u32 *)addr->ip6, ARRAY_SIZE(addr->ip6), hash_rnd) & (ip_list_hash_size - 1); } @@ -180,11 +184,12 @@ static void recent_entry_update(struct recent_table *t, struct recent_entry *e) list_move_tail(&e->lru_list, &t->lru_list); } -static struct recent_table *recent_table_lookup(const char *name) +static struct recent_table *recent_table_lookup(struct recent_net *recent_net, + const char *name) { struct recent_table *t; - list_for_each_entry(t, &tables, list) + list_for_each_entry(t, &recent_net->tables, list) if (!strcmp(t->name, name)) return t; return NULL; @@ -203,6 +208,8 @@ static void recent_table_flush(struct recent_table *t) static bool recent_mt(const struct sk_buff *skb, const struct xt_match_param *par) { + struct net *net = dev_net(par->in ? par->in : par->out); + struct recent_net *recent_net = recent_pernet(net); const struct xt_recent_mtinfo *info = par->matchinfo; struct recent_table *t; struct recent_entry *e; @@ -235,7 +242,7 @@ recent_mt(const struct sk_buff *skb, const struct xt_match_param *par) ttl++; spin_lock_bh(&recent_lock); - t = recent_table_lookup(info->name); + t = recent_table_lookup(recent_net, info->name); e = recent_entry_lookup(t, &addr, par->match->family, (info->check_set & XT_RECENT_TTL) ? ttl : 0); if (e == NULL) { @@ -279,6 +286,7 @@ out: static bool recent_mt_check(const struct xt_mtchk_param *par) { + struct recent_net *recent_net = recent_pernet(par->net); const struct xt_recent_mtinfo *info = par->matchinfo; struct recent_table *t; #ifdef CONFIG_PROC_FS @@ -287,6 +295,10 @@ static bool recent_mt_check(const struct xt_mtchk_param *par) unsigned i; bool ret = false; + if (unlikely(!hash_rnd_inited)) { + get_random_bytes(&hash_rnd, sizeof(hash_rnd)); + hash_rnd_inited = true; + } if (hweight8(info->check_set & (XT_RECENT_SET | XT_RECENT_REMOVE | XT_RECENT_CHECK | XT_RECENT_UPDATE)) != 1) @@ -294,14 +306,18 @@ static bool recent_mt_check(const struct xt_mtchk_param *par) if ((info->check_set & (XT_RECENT_SET | XT_RECENT_REMOVE)) && (info->seconds || info->hit_count)) return false; - if (info->hit_count > ip_pkt_list_tot) + if (info->hit_count > ip_pkt_list_tot) { + pr_info(KBUILD_MODNAME ": hitcount (%u) is larger than " + "packets to be remembered (%u)\n", + info->hit_count, ip_pkt_list_tot); return false; + } if (info->name[0] == '\0' || strnlen(info->name, XT_RECENT_NAME_LEN) == XT_RECENT_NAME_LEN) return false; mutex_lock(&recent_mutex); - t = recent_table_lookup(info->name); + t = recent_table_lookup(recent_net, info->name); if (t != NULL) { t->refcnt++; ret = true; @@ -318,7 +334,7 @@ static bool recent_mt_check(const struct xt_mtchk_param *par) for (i = 0; i < ip_list_hash_size; i++) INIT_LIST_HEAD(&t->iphash[i]); #ifdef CONFIG_PROC_FS - pde = proc_create_data(t->name, ip_list_perms, recent_proc_dir, + pde = proc_create_data(t->name, ip_list_perms, recent_net->xt_recent, &recent_mt_fops, t); if (pde == NULL) { kfree(t); @@ -327,10 +343,10 @@ static bool recent_mt_check(const struct xt_mtchk_param *par) pde->uid = ip_list_uid; pde->gid = ip_list_gid; #ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT - pde = proc_create_data(t->name, ip_list_perms, proc_old_dir, + pde = proc_create_data(t->name, ip_list_perms, recent_net->ipt_recent, &recent_old_fops, t); if (pde == NULL) { - remove_proc_entry(t->name, proc_old_dir); + remove_proc_entry(t->name, recent_net->xt_recent); kfree(t); goto out; } @@ -339,7 +355,7 @@ static bool recent_mt_check(const struct xt_mtchk_param *par) #endif #endif spin_lock_bh(&recent_lock); - list_add_tail(&t->list, &tables); + list_add_tail(&t->list, &recent_net->tables); spin_unlock_bh(&recent_lock); ret = true; out: @@ -349,20 +365,21 @@ out: static void recent_mt_destroy(const struct xt_mtdtor_param *par) { + struct recent_net *recent_net = recent_pernet(par->net); const struct xt_recent_mtinfo *info = par->matchinfo; struct recent_table *t; mutex_lock(&recent_mutex); - t = recent_table_lookup(info->name); + t = recent_table_lookup(recent_net, info->name); if (--t->refcnt == 0) { spin_lock_bh(&recent_lock); list_del(&t->list); spin_unlock_bh(&recent_lock); #ifdef CONFIG_PROC_FS #ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT - remove_proc_entry(t->name, proc_old_dir); + remove_proc_entry(t->name, recent_net->ipt_recent); #endif - remove_proc_entry(t->name, recent_proc_dir); + remove_proc_entry(t->name, recent_net->xt_recent); #endif recent_table_flush(t); kfree(t); @@ -611,8 +628,65 @@ static const struct file_operations recent_mt_fops = { .release = seq_release_private, .owner = THIS_MODULE, }; + +static int __net_init recent_proc_net_init(struct net *net) +{ + struct recent_net *recent_net = recent_pernet(net); + + recent_net->xt_recent = proc_mkdir("xt_recent", net->proc_net); + if (!recent_net->xt_recent) + return -ENOMEM; +#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT + recent_net->ipt_recent = proc_mkdir("ipt_recent", net->proc_net); + if (!recent_net->ipt_recent) { + proc_net_remove(net, "xt_recent"); + return -ENOMEM; + } +#endif + return 0; +} + +static void __net_exit recent_proc_net_exit(struct net *net) +{ +#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT + proc_net_remove(net, "ipt_recent"); +#endif + proc_net_remove(net, "xt_recent"); +} +#else +static inline int recent_proc_net_init(struct net *net) +{ + return 0; +} + +static inline void recent_proc_net_exit(struct net *net) +{ +} #endif /* CONFIG_PROC_FS */ +static int __net_init recent_net_init(struct net *net) +{ + struct recent_net *recent_net = recent_pernet(net); + + INIT_LIST_HEAD(&recent_net->tables); + return recent_proc_net_init(net); +} + +static void __net_exit recent_net_exit(struct net *net) +{ + struct recent_net *recent_net = recent_pernet(net); + + BUG_ON(!list_empty(&recent_net->tables)); + recent_proc_net_exit(net); +} + +static struct pernet_operations recent_net_ops = { + .init = recent_net_init, + .exit = recent_net_exit, + .id = &recent_net_id, + .size = sizeof(struct recent_net), +}; + static struct xt_match recent_mt_reg[] __read_mostly = { { .name = "recent", @@ -644,39 +718,19 @@ static int __init recent_mt_init(void) return -EINVAL; ip_list_hash_size = 1 << fls(ip_list_tot); - err = xt_register_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg)); -#ifdef CONFIG_PROC_FS + err = register_pernet_subsys(&recent_net_ops); if (err) return err; - recent_proc_dir = proc_mkdir("xt_recent", init_net.proc_net); - if (recent_proc_dir == NULL) { - xt_unregister_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg)); - err = -ENOMEM; - } -#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT - if (err < 0) - return err; - proc_old_dir = proc_mkdir("ipt_recent", init_net.proc_net); - if (proc_old_dir == NULL) { - remove_proc_entry("xt_recent", init_net.proc_net); - xt_unregister_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg)); - err = -ENOMEM; - } -#endif -#endif + err = xt_register_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg)); + if (err) + unregister_pernet_subsys(&recent_net_ops); return err; } static void __exit recent_mt_exit(void) { - BUG_ON(!list_empty(&tables)); xt_unregister_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg)); -#ifdef CONFIG_PROC_FS -#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT - remove_proc_entry("ipt_recent", init_net.proc_net); -#endif - remove_proc_entry("xt_recent", init_net.proc_net); -#endif + unregister_pernet_subsys(&recent_net_ops); } module_init(recent_mt_init); diff --git a/net/netfilter/xt_repldata.h b/net/netfilter/xt_repldata.h new file mode 100644 index 000000000000..6efe4e5a81c6 --- /dev/null +++ b/net/netfilter/xt_repldata.h @@ -0,0 +1,35 @@ +/* + * Today's hack: quantum tunneling in structs + * + * 'entries' and 'term' are never anywhere referenced by word in code. In fact, + * they serve as the hanging-off data accessed through repl.data[]. + */ + +#define xt_alloc_initial_table(type, typ2) ({ \ + unsigned int hook_mask = info->valid_hooks; \ + unsigned int nhooks = hweight32(hook_mask); \ + unsigned int bytes = 0, hooknum = 0, i = 0; \ + struct { \ + struct type##_replace repl; \ + struct type##_standard entries[nhooks]; \ + struct type##_error term; \ + } *tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); \ + if (tbl == NULL) \ + return NULL; \ + strncpy(tbl->repl.name, info->name, sizeof(tbl->repl.name)); \ + tbl->term = (struct type##_error)typ2##_ERROR_INIT; \ + tbl->repl.valid_hooks = hook_mask; \ + tbl->repl.num_entries = nhooks + 1; \ + tbl->repl.size = nhooks * sizeof(struct type##_standard) + \ + sizeof(struct type##_error); \ + for (; hook_mask != 0; hook_mask >>= 1, ++hooknum) { \ + if (!(hook_mask & 1)) \ + continue; \ + tbl->repl.hook_entry[hooknum] = bytes; \ + tbl->repl.underflow[hooknum] = bytes; \ + tbl->entries[i++] = (struct type##_standard) \ + typ2##_STANDARD_INIT(NF_ACCEPT); \ + bytes += sizeof(struct type##_standard); \ + } \ + tbl; \ +}) |