struct inet_peer *peer;
};
-static struct inet_frags_ctl ip4_frags_ctl __read_mostly = {
- /*
- * Fragment cache limits. We will commit 256K at one time. Should we
- * cross that limit we will prune down to 192K. This should cope with
- * even the most extreme cases without allowing an attacker to
- * measurably harm machine performance.
- */
- .high_thresh = 256 * 1024,
- .low_thresh = 192 * 1024,
-
- /*
- * Important NOTE! Fragment queue must be destroyed before MSL expires.
- * RFC791 is wrong proposing to prolongate timer each fragment arrival
- * by TTL.
- */
- .timeout = IP_FRAG_TIME,
- .secret_interval = 10 * 60 * HZ,
-};
-
static struct inet_frags ip4_frags;
int ip_frag_nqueues(struct net *net)
{
struct sk_buff *fp;
- if (!mod_timer(&qp->q.timer, jiffies + ip4_frags_ctl.timeout)) {
+ if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
atomic_inc(&qp->q.refcnt);
return -ETIMEDOUT;
}
return ip_frag_reasm(qp, prev, dev);
write_lock(&ip4_frags.lock);
- list_move_tail(&qp->q.lru_list, &ip4_frags.lru_list);
+ list_move_tail(&qp->q.lru_list, &qp->q.net->lru_list);
write_unlock(&ip4_frags.lock);
return -EINPROGRESS;
net = skb->dev->nd_net;
/* Start by cleaning up the memory. */
- if (atomic_read(&net->ipv4.frags.mem) > ip4_frags_ctl.high_thresh)
+ if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh)
ip_evictor(net);
/* Lookup (or create) queue header */
{
.ctl_name = NET_IPV4_IPFRAG_HIGH_THRESH,
.procname = "ipfrag_high_thresh",
- .data = &ip4_frags_ctl.high_thresh,
+ .data = &init_net.ipv4.frags.high_thresh,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec
{
.ctl_name = NET_IPV4_IPFRAG_LOW_THRESH,
.procname = "ipfrag_low_thresh",
- .data = &ip4_frags_ctl.low_thresh,
+ .data = &init_net.ipv4.frags.low_thresh,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec
{
.ctl_name = NET_IPV4_IPFRAG_TIME,
.procname = "ipfrag_time",
- .data = &ip4_frags_ctl.timeout,
+ .data = &init_net.ipv4.frags.timeout,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec_jiffies,
{
.ctl_name = NET_IPV4_IPFRAG_SECRET_INTERVAL,
.procname = "ipfrag_secret_interval",
- .data = &ip4_frags_ctl.secret_interval,
+ .data = &ip4_frags.secret_interval,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec_jiffies,
static int ip4_frags_ctl_register(struct net *net)
{
+ struct ctl_table *table;
struct ctl_table_header *hdr;
- hdr = register_net_sysctl_table(net, net_ipv4_ctl_path,
- ip4_frags_ctl_table);
- return hdr == NULL ? -ENOMEM : 0;
+ table = ip4_frags_ctl_table;
+ if (net != &init_net) {
+ table = kmemdup(table, sizeof(ip4_frags_ctl_table), GFP_KERNEL);
+ if (table == NULL)
+ goto err_alloc;
+
+ table[0].data = &net->ipv4.frags.high_thresh;
+ table[1].data = &net->ipv4.frags.low_thresh;
+ table[2].data = &net->ipv4.frags.timeout;
+ table[3].mode &= ~0222;
+ table[4].mode &= ~0222;
+ }
+
+ hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table);
+ if (hdr == NULL)
+ goto err_reg;
+
+ net->ipv4.frags_hdr = hdr;
+ return 0;
+
+err_reg:
+ if (net != &init_net)
+ kfree(table);
+err_alloc:
+ return -ENOMEM;
+}
+
+static void ip4_frags_ctl_unregister(struct net *net)
+{
+ struct ctl_table *table;
+
+ table = net->ipv4.frags_hdr->ctl_table_arg;
+ unregister_net_sysctl_table(net->ipv4.frags_hdr);
+ kfree(table);
}
#else
static inline int ip4_frags_ctl_register(struct net *net)
{
return 0;
}
+
+static inline void ip4_frags_ctl_unregister(struct net *net)
+{
+}
#endif
static int ipv4_frags_init_net(struct net *net)
{
+ /*
+ * Fragment cache limits. We will commit 256K at one time. Should we
+ * cross that limit we will prune down to 192K. This should cope with
+ * even the most extreme cases without allowing an attacker to
+ * measurably harm machine performance.
+ */
+ net->ipv4.frags.high_thresh = 256 * 1024;
+ net->ipv4.frags.low_thresh = 192 * 1024;
+ /*
+ * Important NOTE! Fragment queue must be destroyed before MSL expires.
+ * RFC791 is wrong proposing to prolongate timer each fragment arrival
+ * by TTL.
+ */
+ net->ipv4.frags.timeout = IP_FRAG_TIME;
+
inet_frags_init_net(&net->ipv4.frags);
return ip4_frags_ctl_register(net);
}
+static void ipv4_frags_exit_net(struct net *net)
+{
+ ip4_frags_ctl_unregister(net);
+ inet_frags_exit_net(&net->ipv4.frags, &ip4_frags);
+}
+
+static struct pernet_operations ip4_frags_ops = {
+ .init = ipv4_frags_init_net,
+ .exit = ipv4_frags_exit_net,
+};
+
void __init ipfrag_init(void)
{
- ipv4_frags_init_net(&init_net);
- ip4_frags.ctl = &ip4_frags_ctl;
+ register_pernet_subsys(&ip4_frags_ops);
ip4_frags.hashfn = ip4_hashfn;
ip4_frags.constructor = ip4_frag_init;
ip4_frags.destructor = ip4_frag_free;
ip4_frags.qsize = sizeof(struct ipq);
ip4_frags.match = ip4_frag_match;
ip4_frags.frag_expire = ip_expire;
+ ip4_frags.secret_interval = 10 * 60 * HZ;
inet_frags_init(&ip4_frags);
}