1 /* ip_nat_helper.c - generic support functions for NAT helpers
3 * (C) 2000-2002 Harald Welte <laforge@netfilter.org>
4 * (C) 2003-2006 Netfilter Core Team <coreteam@netfilter.org>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/kmod.h>
12 #include <linux/types.h>
13 #include <linux/timer.h>
14 #include <linux/skbuff.h>
15 #include <linux/tcp.h>
16 #include <linux/udp.h>
17 #include <net/checksum.h>
20 #include <linux/netfilter_ipv4.h>
21 #include <net/netfilter/nf_conntrack.h>
22 #include <net/netfilter/nf_conntrack_helper.h>
23 #include <net/netfilter/nf_conntrack_expect.h>
24 #include <net/netfilter/nf_nat.h>
25 #include <net/netfilter/nf_nat_protocol.h>
26 #include <net/netfilter/nf_nat_core.h>
27 #include <net/netfilter/nf_nat_helper.h>
31 #define DUMP_OFFSET(x) printk("offset_before=%d, offset_after=%d, correction_pos=%u\n", x->offset_before, x->offset_after, x->correction_pos);
33 #define DEBUGP(format, args...)
34 #define DUMP_OFFSET(x)
37 static DEFINE_SPINLOCK(nf_nat_seqofs_lock);
39 /* Setup TCP sequence correction given this change at this sequence */
41 adjust_tcp_sequence(u32 seq,
44 enum ip_conntrack_info ctinfo)
47 struct nf_nat_seq *this_way, *other_way;
48 struct nf_conn_nat *nat = nfct_nat(ct);
50 DEBUGP("nf_nat_resize_packet: old_size = %u, new_size = %u\n",
51 (*skb)->len, new_size);
53 dir = CTINFO2DIR(ctinfo);
55 this_way = &nat->info.seq[dir];
56 other_way = &nat->info.seq[!dir];
58 DEBUGP("nf_nat_resize_packet: Seq_offset before: ");
59 DUMP_OFFSET(this_way);
61 spin_lock_bh(&nf_nat_seqofs_lock);
63 /* SYN adjust. If it's uninitialized, or this is after last
64 * correction, record it: we don't handle more than one
65 * adjustment in the window, but do deal with common case of a
67 if (this_way->offset_before == this_way->offset_after ||
68 before(this_way->correction_pos, seq)) {
69 this_way->correction_pos = seq;
70 this_way->offset_before = this_way->offset_after;
71 this_way->offset_after += sizediff;
73 spin_unlock_bh(&nf_nat_seqofs_lock);
75 DEBUGP("nf_nat_resize_packet: Seq_offset after: ");
76 DUMP_OFFSET(this_way);
79 /* Frobs data inside this packet, which is linear. */
80 static void mangle_contents(struct sk_buff *skb,
82 unsigned int match_offset,
83 unsigned int match_len,
84 const char *rep_buffer,
89 BUG_ON(skb_is_nonlinear(skb));
90 data = skb_network_header(skb) + dataoff;
92 /* move post-replacement */
93 memmove(data + match_offset + rep_len,
94 data + match_offset + match_len,
95 skb->tail - (skb->network_header + dataoff +
96 match_offset + match_len));
98 /* insert data from buffer */
99 memcpy(data + match_offset, rep_buffer, rep_len);
101 /* update skb info */
102 if (rep_len > match_len) {
103 DEBUGP("nf_nat_mangle_packet: Extending packet by "
104 "%u from %u bytes\n", rep_len - match_len,
106 skb_put(skb, rep_len - match_len);
108 DEBUGP("nf_nat_mangle_packet: Shrinking packet from "
109 "%u from %u bytes\n", match_len - rep_len,
111 __skb_trim(skb, skb->len + rep_len - match_len);
114 /* fix IP hdr checksum information */
115 ip_hdr(skb)->tot_len = htons(skb->len);
116 ip_send_check(ip_hdr(skb));
119 /* Unusual, but possible case. */
120 static int enlarge_skb(struct sk_buff **pskb, unsigned int extra)
122 struct sk_buff *nskb;
124 if ((*pskb)->len + extra > 65535)
127 nskb = skb_copy_expand(*pskb, skb_headroom(*pskb), extra, GFP_ATOMIC);
131 /* Transfer socket to new skb. */
133 skb_set_owner_w(nskb, (*pskb)->sk);
139 /* Generic function for mangling variable-length address changes inside
140 * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
143 * Takes care about all the nasty sequence number changes, checksumming,
144 * skb enlargement, ...
148 nf_nat_mangle_tcp_packet(struct sk_buff **pskb,
150 enum ip_conntrack_info ctinfo,
151 unsigned int match_offset,
152 unsigned int match_len,
153 const char *rep_buffer,
154 unsigned int rep_len)
160 if (!skb_make_writable(pskb, (*pskb)->len))
163 if (rep_len > match_len &&
164 rep_len - match_len > skb_tailroom(*pskb) &&
165 !enlarge_skb(pskb, rep_len - match_len))
168 SKB_LINEAR_ASSERT(*pskb);
171 tcph = (void *)iph + iph->ihl*4;
173 oldlen = (*pskb)->len - iph->ihl*4;
174 mangle_contents(*pskb, iph->ihl*4 + tcph->doff*4,
175 match_offset, match_len, rep_buffer, rep_len);
177 datalen = (*pskb)->len - iph->ihl*4;
178 if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) {
180 tcph->check = tcp_v4_check(datalen,
181 iph->saddr, iph->daddr,
182 csum_partial((char *)tcph,
185 nf_proto_csum_replace2(&tcph->check, *pskb,
186 htons(oldlen), htons(datalen), 1);
188 if (rep_len != match_len) {
189 set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
190 adjust_tcp_sequence(ntohl(tcph->seq),
191 (int)rep_len - (int)match_len,
193 /* Tell TCP window tracking about seq change */
194 nf_conntrack_tcp_update(*pskb, ip_hdrlen(*pskb),
195 ct, CTINFO2DIR(ctinfo));
199 EXPORT_SYMBOL(nf_nat_mangle_tcp_packet);
201 /* Generic function for mangling variable-length address changes inside
202 * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX
203 * command in the Amanda protocol)
205 * Takes care about all the nasty sequence number changes, checksumming,
206 * skb enlargement, ...
208 * XXX - This function could be merged with nf_nat_mangle_tcp_packet which
209 * should be fairly easy to do.
212 nf_nat_mangle_udp_packet(struct sk_buff **pskb,
214 enum ip_conntrack_info ctinfo,
215 unsigned int match_offset,
216 unsigned int match_len,
217 const char *rep_buffer,
218 unsigned int rep_len)
224 /* UDP helpers might accidentally mangle the wrong packet */
226 if ((*pskb)->len < iph->ihl*4 + sizeof(*udph) +
227 match_offset + match_len)
230 if (!skb_make_writable(pskb, (*pskb)->len))
233 if (rep_len > match_len &&
234 rep_len - match_len > skb_tailroom(*pskb) &&
235 !enlarge_skb(pskb, rep_len - match_len))
239 udph = (void *)iph + iph->ihl*4;
241 oldlen = (*pskb)->len - iph->ihl*4;
242 mangle_contents(*pskb, iph->ihl*4 + sizeof(*udph),
243 match_offset, match_len, rep_buffer, rep_len);
245 /* update the length of the UDP packet */
246 datalen = (*pskb)->len - iph->ihl*4;
247 udph->len = htons(datalen);
249 /* fix udp checksum if udp checksum was previously calculated */
250 if (!udph->check && (*pskb)->ip_summed != CHECKSUM_PARTIAL)
253 if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) {
255 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
256 datalen, IPPROTO_UDP,
257 csum_partial((char *)udph,
260 udph->check = CSUM_MANGLED_0;
262 nf_proto_csum_replace2(&udph->check, *pskb,
263 htons(oldlen), htons(datalen), 1);
267 EXPORT_SYMBOL(nf_nat_mangle_udp_packet);
269 /* Adjust one found SACK option including checksum correction */
271 sack_adjust(struct sk_buff *skb,
273 unsigned int sackoff,
274 unsigned int sackend,
275 struct nf_nat_seq *natseq)
277 while (sackoff < sackend) {
278 struct tcp_sack_block_wire *sack;
279 __be32 new_start_seq, new_end_seq;
281 sack = (void *)skb->data + sackoff;
282 if (after(ntohl(sack->start_seq) - natseq->offset_before,
283 natseq->correction_pos))
284 new_start_seq = htonl(ntohl(sack->start_seq)
285 - natseq->offset_after);
287 new_start_seq = htonl(ntohl(sack->start_seq)
288 - natseq->offset_before);
290 if (after(ntohl(sack->end_seq) - natseq->offset_before,
291 natseq->correction_pos))
292 new_end_seq = htonl(ntohl(sack->end_seq)
293 - natseq->offset_after);
295 new_end_seq = htonl(ntohl(sack->end_seq)
296 - natseq->offset_before);
298 DEBUGP("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n",
299 ntohl(sack->start_seq), new_start_seq,
300 ntohl(sack->end_seq), new_end_seq);
302 nf_proto_csum_replace4(&tcph->check, skb,
303 sack->start_seq, new_start_seq, 0);
304 nf_proto_csum_replace4(&tcph->check, skb,
305 sack->end_seq, new_end_seq, 0);
306 sack->start_seq = new_start_seq;
307 sack->end_seq = new_end_seq;
308 sackoff += sizeof(*sack);
312 /* TCP SACK sequence number adjustment */
313 static inline unsigned int
314 nf_nat_sack_adjust(struct sk_buff **pskb,
317 enum ip_conntrack_info ctinfo)
319 unsigned int dir, optoff, optend;
320 struct nf_conn_nat *nat = nfct_nat(ct);
322 optoff = ip_hdrlen(*pskb) + sizeof(struct tcphdr);
323 optend = ip_hdrlen(*pskb) + tcph->doff * 4;
325 if (!skb_make_writable(pskb, optend))
328 dir = CTINFO2DIR(ctinfo);
330 while (optoff < optend) {
331 /* Usually: option, length. */
332 unsigned char *op = (*pskb)->data + optoff;
341 /* no partial options */
342 if (optoff + 1 == optend ||
343 optoff + op[1] > optend ||
346 if (op[0] == TCPOPT_SACK &&
347 op[1] >= 2+TCPOLEN_SACK_PERBLOCK &&
348 ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0)
349 sack_adjust(*pskb, tcph, optoff+2,
351 &nat->info.seq[!dir]);
358 /* TCP sequence number adjustment. Returns 1 on success, 0 on failure */
360 nf_nat_seq_adjust(struct sk_buff **pskb,
362 enum ip_conntrack_info ctinfo)
366 __be32 newseq, newack;
367 struct nf_conn_nat *nat = nfct_nat(ct);
368 struct nf_nat_seq *this_way, *other_way;
370 dir = CTINFO2DIR(ctinfo);
372 this_way = &nat->info.seq[dir];
373 other_way = &nat->info.seq[!dir];
375 if (!skb_make_writable(pskb, ip_hdrlen(*pskb) + sizeof(*tcph)))
378 tcph = (void *)(*pskb)->data + ip_hdrlen(*pskb);
379 if (after(ntohl(tcph->seq), this_way->correction_pos))
380 newseq = htonl(ntohl(tcph->seq) + this_way->offset_after);
382 newseq = htonl(ntohl(tcph->seq) + this_way->offset_before);
384 if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
385 other_way->correction_pos))
386 newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_after);
388 newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_before);
390 nf_proto_csum_replace4(&tcph->check, *pskb, tcph->seq, newseq, 0);
391 nf_proto_csum_replace4(&tcph->check, *pskb, tcph->ack_seq, newack, 0);
393 DEBUGP("Adjusting sequence number from %u->%u, ack from %u->%u\n",
394 ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
398 tcph->ack_seq = newack;
400 if (!nf_nat_sack_adjust(pskb, tcph, ct, ctinfo))
403 nf_conntrack_tcp_update(*pskb, ip_hdrlen(*pskb), ct, dir);
407 EXPORT_SYMBOL(nf_nat_seq_adjust);
409 /* Setup NAT on this expected conntrack so it follows master. */
410 /* If we fail to get a free NAT slot, we'll get dropped on confirm */
411 void nf_nat_follow_master(struct nf_conn *ct,
412 struct nf_conntrack_expect *exp)
414 struct nf_nat_range range;
416 /* This must be a fresh one. */
417 BUG_ON(ct->status & IPS_NAT_DONE_MASK);
419 /* Change src to where master sends to */
420 range.flags = IP_NAT_RANGE_MAP_IPS;
421 range.min_ip = range.max_ip
422 = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
423 /* hook doesn't matter, but it has to do source manip */
424 nf_nat_setup_info(ct, &range, NF_IP_POST_ROUTING);
426 /* For DST manip, map port here to where it's expected. */
427 range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
428 range.min = range.max = exp->saved_proto;
429 range.min_ip = range.max_ip
430 = ct->master->tuplehash[!exp->dir].tuple.src.u3.ip;
431 /* hook doesn't matter, but it has to do destination manip */
432 nf_nat_setup_info(ct, &range, NF_IP_PRE_ROUTING);
434 EXPORT_SYMBOL(nf_nat_follow_master);