summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorPablo Neira Ayuso <pablo@netfilter.org>2009-06-13 12:26:29 +0200
committerPatrick McHardy <kaber@trash.net>2009-06-13 12:26:29 +0200
commita0891aa6a635f658f29bb061a00d6d3486941519 (patch)
tree40df3898f9f2e0892683c482d81deec4fd5a9257 /net
parent65cb9fda32be613216f601a330b311c3bd7a8436 (diff)
downloadblackbird-op-linux-a0891aa6a635f658f29bb061a00d6d3486941519.tar.gz
blackbird-op-linux-a0891aa6a635f658f29bb061a00d6d3486941519.zip
netfilter: conntrack: move event caching to conntrack extension infrastructure
This patch reworks the per-cpu event caching to use the conntrack extension infrastructure. The main drawback is that we consume more memory per conntrack if event delivery is enabled. This patch is required by the reliable event delivery that follows to this patch. BTW, this patch allows you to enable/disable event delivery via /proc/sys/net/netfilter/nf_conntrack_events in runtime, although you can still disable event caching as compilation option. Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> Signed-off-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'net')
-rw-r--r--net/netfilter/nf_conntrack_core.c15
-rw-r--r--net/netfilter/nf_conntrack_ecache.c185
-rw-r--r--net/netfilter/nf_conntrack_netlink.c49
3 files changed, 150 insertions, 99 deletions
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index d8dffe7ab509..bcacbb5373c3 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -39,6 +39,7 @@
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_extend.h>
#include <net/netfilter/nf_conntrack_acct.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_core.h>
@@ -577,6 +578,7 @@ init_conntrack(struct net *net,
}
nf_ct_acct_ext_add(ct, GFP_ATOMIC);
+ nf_ct_ecache_ext_add(ct, GFP_ATOMIC);
spin_lock_bh(&nf_conntrack_lock);
exp = nf_ct_find_expectation(net, tuple);
@@ -1031,8 +1033,6 @@ static void nf_conntrack_cleanup_init_net(void)
static void nf_conntrack_cleanup_net(struct net *net)
{
- nf_ct_event_cache_flush(net);
- nf_conntrack_ecache_fini(net);
i_see_dead_people:
nf_ct_iterate_cleanup(net, kill_all, NULL);
if (atomic_read(&net->ct.count) != 0) {
@@ -1045,6 +1045,7 @@ static void nf_conntrack_cleanup_net(struct net *net)
nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
nf_conntrack_htable_size);
+ nf_conntrack_ecache_fini(net);
nf_conntrack_acct_fini(net);
nf_conntrack_expect_fini(net);
free_percpu(net->ct.stat);
@@ -1220,9 +1221,6 @@ static int nf_conntrack_init_net(struct net *net)
ret = -ENOMEM;
goto err_stat;
}
- ret = nf_conntrack_ecache_init(net);
- if (ret < 0)
- goto err_ecache;
net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,
&net->ct.hash_vmalloc, 1);
if (!net->ct.hash) {
@@ -1236,6 +1234,9 @@ static int nf_conntrack_init_net(struct net *net)
ret = nf_conntrack_acct_init(net);
if (ret < 0)
goto err_acct;
+ ret = nf_conntrack_ecache_init(net);
+ if (ret < 0)
+ goto err_ecache;
/* Set up fake conntrack:
- to never be deleted, not in any hashes */
@@ -1248,14 +1249,14 @@ static int nf_conntrack_init_net(struct net *net)
return 0;
+err_ecache:
+ nf_conntrack_acct_fini(net);
err_acct:
nf_conntrack_expect_fini(net);
err_expect:
nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
nf_conntrack_htable_size);
err_hash:
- nf_conntrack_ecache_fini(net);
-err_ecache:
free_percpu(net->ct.stat);
err_stat:
return ret;
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 5516b3e64b43..683281b78047 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -21,6 +21,7 @@
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_extend.h>
static DEFINE_MUTEX(nf_ct_ecache_mutex);
@@ -32,94 +33,38 @@ EXPORT_SYMBOL_GPL(nf_expect_event_cb);
/* deliver cached events and clear cache entry - must be called with locally
* disabled softirqs */
-static inline void
-__nf_ct_deliver_cached_events(struct nf_conntrack_ecache *ecache)
+void nf_ct_deliver_cached_events(struct nf_conn *ct)
{
+ unsigned long events;
struct nf_ct_event_notifier *notify;
+ struct nf_conntrack_ecache *e;
rcu_read_lock();
notify = rcu_dereference(nf_conntrack_event_cb);
if (notify == NULL)
goto out_unlock;
- if (nf_ct_is_confirmed(ecache->ct) && !nf_ct_is_dying(ecache->ct)
- && ecache->events) {
+ e = nf_ct_ecache_find(ct);
+ if (e == NULL)
+ goto out_unlock;
+
+ events = xchg(&e->cache, 0);
+
+ if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct) && events) {
struct nf_ct_event item = {
- .ct = ecache->ct,
+ .ct = ct,
.pid = 0,
.report = 0
};
- notify->fcn(ecache->events, &item);
+ notify->fcn(events, &item);
}
- ecache->events = 0;
- nf_ct_put(ecache->ct);
- ecache->ct = NULL;
-
out_unlock:
rcu_read_unlock();
}
-
-/* Deliver all cached events for a particular conntrack. This is called
- * by code prior to async packet handling for freeing the skb */
-void nf_ct_deliver_cached_events(const struct nf_conn *ct)
-{
- struct net *net = nf_ct_net(ct);
- struct nf_conntrack_ecache *ecache;
-
- local_bh_disable();
- ecache = per_cpu_ptr(net->ct.ecache, raw_smp_processor_id());
- if (ecache->ct == ct)
- __nf_ct_deliver_cached_events(ecache);
- local_bh_enable();
-}
EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
-/* Deliver cached events for old pending events, if current conntrack != old */
-void __nf_ct_event_cache_init(struct nf_conn *ct)
-{
- struct net *net = nf_ct_net(ct);
- struct nf_conntrack_ecache *ecache;
-
- /* take care of delivering potentially old events */
- ecache = per_cpu_ptr(net->ct.ecache, raw_smp_processor_id());
- BUG_ON(ecache->ct == ct);
- if (ecache->ct)
- __nf_ct_deliver_cached_events(ecache);
- /* initialize for this conntrack/packet */
- ecache->ct = ct;
- nf_conntrack_get(&ct->ct_general);
-}
-EXPORT_SYMBOL_GPL(__nf_ct_event_cache_init);
-
-/* flush the event cache - touches other CPU's data and must not be called
- * while packets are still passing through the code */
-void nf_ct_event_cache_flush(struct net *net)
-{
- struct nf_conntrack_ecache *ecache;
- int cpu;
-
- for_each_possible_cpu(cpu) {
- ecache = per_cpu_ptr(net->ct.ecache, cpu);
- if (ecache->ct)
- nf_ct_put(ecache->ct);
- }
-}
-
-int nf_conntrack_ecache_init(struct net *net)
-{
- net->ct.ecache = alloc_percpu(struct nf_conntrack_ecache);
- if (!net->ct.ecache)
- return -ENOMEM;
- return 0;
-}
-
-void nf_conntrack_ecache_fini(struct net *net)
-{
- free_percpu(net->ct.ecache);
-}
-
int nf_conntrack_register_notifier(struct nf_ct_event_notifier *new)
{
int ret = 0;
@@ -185,3 +130,107 @@ void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *new)
mutex_unlock(&nf_ct_ecache_mutex);
}
EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
+
+#define NF_CT_EVENTS_DEFAULT 1
+static int nf_ct_events __read_mostly = NF_CT_EVENTS_DEFAULT;
+
+#ifdef CONFIG_SYSCTL
+static struct ctl_table event_sysctl_table[] = {
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "nf_conntrack_events",
+ .data = &init_net.ct.sysctl_events,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {}
+};
+#endif /* CONFIG_SYSCTL */
+
+static struct nf_ct_ext_type event_extend __read_mostly = {
+ .len = sizeof(struct nf_conntrack_ecache),
+ .align = __alignof__(struct nf_conntrack_ecache),
+ .id = NF_CT_EXT_ECACHE,
+};
+
+#ifdef CONFIG_SYSCTL
+static int nf_conntrack_event_init_sysctl(struct net *net)
+{
+ struct ctl_table *table;
+
+ table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
+ GFP_KERNEL);
+ if (!table)
+ goto out;
+
+ table[0].data = &net->ct.sysctl_events;
+
+ net->ct.event_sysctl_header =
+ register_net_sysctl_table(net,
+ nf_net_netfilter_sysctl_path, table);
+ if (!net->ct.event_sysctl_header) {
+ printk(KERN_ERR "nf_ct_event: can't register to sysctl.\n");
+ goto out_register;
+ }
+ return 0;
+
+out_register:
+ kfree(table);
+out:
+ return -ENOMEM;
+}
+
+static void nf_conntrack_event_fini_sysctl(struct net *net)
+{
+ struct ctl_table *table;
+
+ table = net->ct.event_sysctl_header->ctl_table_arg;
+ unregister_net_sysctl_table(net->ct.event_sysctl_header);
+ kfree(table);
+}
+#else
+static int nf_conntrack_event_init_sysctl(struct net *net)
+{
+ return 0;
+}
+
+static void nf_conntrack_event_fini_sysctl(struct net *net)
+{
+}
+#endif /* CONFIG_SYSCTL */
+
+int nf_conntrack_ecache_init(struct net *net)
+{
+ int ret;
+
+ net->ct.sysctl_events = nf_ct_events;
+
+ if (net_eq(net, &init_net)) {
+ ret = nf_ct_extend_register(&event_extend);
+ if (ret < 0) {
+ printk(KERN_ERR "nf_ct_event: Unable to register "
+ "event extension.\n");
+ goto out_extend_register;
+ }
+ }
+
+ ret = nf_conntrack_event_init_sysctl(net);
+ if (ret < 0)
+ goto out_sysctl;
+
+ return 0;
+
+out_sysctl:
+ if (net_eq(net, &init_net))
+ nf_ct_extend_unregister(&event_extend);
+out_extend_register:
+ return ret;
+}
+
+void nf_conntrack_ecache_fini(struct net *net)
+{
+ nf_conntrack_event_fini_sysctl(net);
+ if (net_eq(net, &init_net))
+ nf_ct_extend_unregister(&event_extend);
+}
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 4e503ada5728..19706eff1647 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -468,10 +468,10 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
if (ct == &nf_conntrack_untracked)
return 0;
- if (events & IPCT_DESTROY) {
+ if (events & (1 << IPCT_DESTROY)) {
type = IPCTNL_MSG_CT_DELETE;
group = NFNLGRP_CONNTRACK_DESTROY;
- } else if (events & (IPCT_NEW | IPCT_RELATED)) {
+ } else if (events & ((1 << IPCT_NEW) | (1 << IPCT_RELATED))) {
type = IPCTNL_MSG_CT_NEW;
flags = NLM_F_CREATE|NLM_F_EXCL;
group = NFNLGRP_CONNTRACK_NEW;
@@ -519,7 +519,7 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
if (ctnetlink_dump_status(skb, ct) < 0)
goto nla_put_failure;
- if (events & IPCT_DESTROY) {
+ if (events & (1 << IPCT_DESTROY)) {
if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0)
goto nla_put_failure;
@@ -527,31 +527,31 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
if (ctnetlink_dump_timeout(skb, ct) < 0)
goto nla_put_failure;
- if (events & IPCT_PROTOINFO
+ if (events & (1 << IPCT_PROTOINFO)
&& ctnetlink_dump_protoinfo(skb, ct) < 0)
goto nla_put_failure;
- if ((events & IPCT_HELPER || nfct_help(ct))
+ if ((events & (1 << IPCT_HELPER) || nfct_help(ct))
&& ctnetlink_dump_helpinfo(skb, ct) < 0)
goto nla_put_failure;
#ifdef CONFIG_NF_CONNTRACK_SECMARK
- if ((events & IPCT_SECMARK || ct->secmark)
+ if ((events & (1 << IPCT_SECMARK) || ct->secmark)
&& ctnetlink_dump_secmark(skb, ct) < 0)
goto nla_put_failure;
#endif
- if (events & IPCT_RELATED &&
+ if (events & (1 << IPCT_RELATED) &&
ctnetlink_dump_master(skb, ct) < 0)
goto nla_put_failure;
- if (events & IPCT_NATSEQADJ &&
+ if (events & (1 << IPCT_NATSEQADJ) &&
ctnetlink_dump_nat_seq_adj(skb, ct) < 0)
goto nla_put_failure;
}
#ifdef CONFIG_NF_CONNTRACK_MARK
- if ((events & IPCT_MARK || ct->mark)
+ if ((events & (1 << IPCT_MARK) || ct->mark)
&& ctnetlink_dump_mark(skb, ct) < 0)
goto nla_put_failure;
#endif
@@ -1253,6 +1253,7 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
}
nf_ct_acct_ext_add(ct, GFP_ATOMIC);
+ nf_ct_ecache_ext_add(ct, GFP_ATOMIC);
#if defined(CONFIG_NF_CONNTRACK_MARK)
if (cda[CTA_MARK])
@@ -1340,13 +1341,13 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
else
events = IPCT_NEW;
- nf_conntrack_event_report(IPCT_STATUS |
- IPCT_HELPER |
- IPCT_PROTOINFO |
- IPCT_NATSEQADJ |
- IPCT_MARK | events,
- ct, NETLINK_CB(skb).pid,
- nlmsg_report(nlh));
+ nf_conntrack_eventmask_report((1 << IPCT_STATUS) |
+ (1 << IPCT_HELPER) |
+ (1 << IPCT_PROTOINFO) |
+ (1 << IPCT_NATSEQADJ) |
+ (1 << IPCT_MARK) | events,
+ ct, NETLINK_CB(skb).pid,
+ nlmsg_report(nlh));
nf_ct_put(ct);
} else
spin_unlock_bh(&nf_conntrack_lock);
@@ -1365,13 +1366,13 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
if (err == 0) {
nf_conntrack_get(&ct->ct_general);
spin_unlock_bh(&nf_conntrack_lock);
- nf_conntrack_event_report(IPCT_STATUS |
- IPCT_HELPER |
- IPCT_PROTOINFO |
- IPCT_NATSEQADJ |
- IPCT_MARK,
- ct, NETLINK_CB(skb).pid,
- nlmsg_report(nlh));
+ nf_conntrack_eventmask_report((1 << IPCT_STATUS) |
+ (1 << IPCT_HELPER) |
+ (1 << IPCT_PROTOINFO) |
+ (1 << IPCT_NATSEQADJ) |
+ (1 << IPCT_MARK),
+ ct, NETLINK_CB(skb).pid,
+ nlmsg_report(nlh));
nf_ct_put(ct);
} else
spin_unlock_bh(&nf_conntrack_lock);
@@ -1515,7 +1516,7 @@ ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
unsigned int type;
int flags = 0;
- if (events & IPEXP_NEW) {
+ if (events & (1 << IPEXP_NEW)) {
type = IPCTNL_MSG_EXP_NEW;
flags = NLM_F_CREATE|NLM_F_EXCL;
} else
OpenPOWER on IntegriCloud