This is the same as with the timeout variable.
Currently, after exceeding the high threshold _all_
the fragments are evicted, but it will be fixed in
later patch.
Signed-off-by: Pavel Emelyanov <xemul@openvz.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
/* sysctls */
int timeout;
+ int high_thresh;
+ int low_thresh;
};
struct inet_frag_queue {
#define INETFRAGS_HASHSZ 64
struct inet_frags_ctl {
- int high_thresh;
- int low_thresh;
int secret_interval;
};
struct inet_frag_queue *q;
int work, evicted = 0;
- work = atomic_read(&nf->mem) - f->ctl->low_thresh;
+ work = atomic_read(&nf->mem) - nf->low_thresh;
while (work > 0) {
read_lock(&f->lock);
if (list_empty(&f->lru_list)) {
};
static struct inet_frags_ctl ip4_frags_ctl __read_mostly = {
- /*
- * Fragment cache limits. We will commit 256K at one time. Should we
- * cross that limit we will prune down to 192K. This should cope with
- * even the most extreme cases without allowing an attacker to
- * measurably harm machine performance.
- */
- .high_thresh = 256 * 1024,
- .low_thresh = 192 * 1024,
.secret_interval = 10 * 60 * HZ,
};
net = skb->dev->nd_net;
/* Start by cleaning up the memory. */
- if (atomic_read(&net->ipv4.frags.mem) > ip4_frags_ctl.high_thresh)
+ if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh)
ip_evictor(net);
/* Lookup (or create) queue header */
{
.ctl_name = NET_IPV4_IPFRAG_HIGH_THRESH,
.procname = "ipfrag_high_thresh",
- .data = &ip4_frags_ctl.high_thresh,
+ .data = &init_net.ipv4.frags.high_thresh,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec
{
.ctl_name = NET_IPV4_IPFRAG_LOW_THRESH,
.procname = "ipfrag_low_thresh",
- .data = &ip4_frags_ctl.low_thresh,
+ .data = &init_net.ipv4.frags.low_thresh,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec
if (table == NULL)
goto err_alloc;
- table[0].mode &= ~0222;
- table[1].mode &= ~0222;
+ table[0].data = &net->ipv4.frags.high_thresh;
+ table[1].data = &net->ipv4.frags.low_thresh;
table[2].data = &net->ipv4.frags.timeout;
table[3].mode &= ~0222;
table[4].mode &= ~0222;
static int ipv4_frags_init_net(struct net *net)
{
+ /*
+ * Fragment cache limits. We will commit 256K at one time. Should we
+ * cross that limit we will prune down to 192K. This should cope with
+ * even the most extreme cases without allowing an attacker to
+ * measurably harm machine performance.
+ */
+ net->ipv4.frags.high_thresh = 256 * 1024;
+ net->ipv4.frags.low_thresh = 192 * 1024;
/*
* Important NOTE! Fragment queue must be destroyed before MSL expires.
* RFC791 is wrong proposing to prolongate timer each fragment arrival
};
static struct inet_frags_ctl nf_frags_ctl __read_mostly = {
- .high_thresh = 256 * 1024,
- .low_thresh = 192 * 1024,
.secret_interval = 10 * 60 * HZ,
};
{
.ctl_name = NET_NF_CONNTRACK_FRAG6_LOW_THRESH,
.procname = "nf_conntrack_frag6_low_thresh",
- .data = &nf_frags_ctl.low_thresh,
+ .data = &nf_init_frags.low_thresh,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = &proc_dointvec,
{
.ctl_name = NET_NF_CONNTRACK_FRAG6_HIGH_THRESH,
.procname = "nf_conntrack_frag6_high_thresh",
- .data = &nf_frags_ctl.high_thresh,
+ .data = &nf_init_frags.high_thresh,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = &proc_dointvec,
goto ret_orig;
}
- if (atomic_read(&nf_init_frags.mem) > nf_frags_ctl.high_thresh)
+ if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh)
nf_ct_frag6_evictor();
fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr);
nf_frags.match = ip6_frag_match;
nf_frags.frag_expire = nf_ct_frag6_expire;
nf_init_frags.timeout = IPV6_FRAG_TIMEOUT;
+ nf_init_frags.high_thresh = 256 * 1024;
+ nf_init_frags.low_thresh = 192 * 1024;
inet_frags_init_net(&nf_init_frags);
inet_frags_init(&nf_frags);
{
inet_frags_fini(&nf_frags);
- nf_frags_ctl.low_thresh = 0;
+ nf_init_frags.low_thresh = 0;
nf_ct_frag6_evictor();
}
}
net = skb->dev->nd_net;
- if (atomic_read(&net->ipv6.frags.mem) >
- init_net.ipv6.sysctl.frags.high_thresh)
+ if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh)
ip6_evictor(net, ip6_dst_idev(skb->dst));
if ((fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
{
.ctl_name = NET_IPV6_IP6FRAG_HIGH_THRESH,
.procname = "ip6frag_high_thresh",
- .data = &init_net.ipv6.sysctl.frags.high_thresh,
+ .data = &init_net.ipv6.frags.high_thresh,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec
{
.ctl_name = NET_IPV6_IP6FRAG_LOW_THRESH,
.procname = "ip6frag_low_thresh",
- .data = &init_net.ipv6.sysctl.frags.low_thresh,
+ .data = &init_net.ipv6.frags.low_thresh,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec
if (table == NULL)
goto err_alloc;
- table[0].mode &= ~0222;
- table[1].mode &= ~0222;
+ table[0].data = &net->ipv6.frags.high_thresh;
+ table[1].data = &net->ipv6.frags.low_thresh;
table[2].data = &net->ipv6.frags.timeout;
table[3].mode &= ~0222;
}
{
ip6_frags.ctl = &net->ipv6.sysctl.frags;
- net->ipv6.sysctl.frags.high_thresh = 256 * 1024;
- net->ipv6.sysctl.frags.low_thresh = 192 * 1024;
+ net->ipv6.frags.high_thresh = 256 * 1024;
+ net->ipv6.frags.low_thresh = 192 * 1024;
net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
net->ipv6.sysctl.frags.secret_interval = 10 * 60 * HZ;