1 /* ip_nat_helper.c - generic support functions for NAT helpers
3 * (C) 2000-2002 Harald Welte <laforge@netfilter.org>
4 * (C) 2003-2004 Netfilter Core Team <coreteam@netfilter.org>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * 14 Jan 2002 Harald Welte <laforge@gnumonks.org>:
11 * - add support for SACK adjustment
12 * 14 Mar 2002 Harald Welte <laforge@gnumonks.org>:
13 * - merge SACK support into newnat API
14 * 16 Aug 2002 Brian J. Murrell <netfilter@interlinx.bc.ca>:
15 * - make ip_nat_resize_packet more generic (TCP and UDP)
16 * - add ip_nat_mangle_udp_packet
18 #include <linux/config.h>
19 #include <linux/module.h>
20 #include <linux/kmod.h>
21 #include <linux/types.h>
22 #include <linux/timer.h>
23 #include <linux/skbuff.h>
24 #include <linux/netfilter_ipv4.h>
25 #include <net/checksum.h>
31 #define ASSERT_READ_LOCK(x)
32 #define ASSERT_WRITE_LOCK(x)
34 #include <linux/netfilter_ipv4/ip_conntrack.h>
35 #include <linux/netfilter_ipv4/ip_conntrack_helper.h>
36 #include <linux/netfilter_ipv4/ip_nat.h>
37 #include <linux/netfilter_ipv4/ip_nat_protocol.h>
38 #include <linux/netfilter_ipv4/ip_nat_core.h>
39 #include <linux/netfilter_ipv4/ip_nat_helper.h>
40 #include <linux/netfilter_ipv4/listhelp.h>
44 #define DUMP_OFFSET(x) printk("offset_before=%d, offset_after=%d, correction_pos=%u\n", x->offset_before, x->offset_after, x->correction_pos);
46 #define DEBUGP(format, args...)
47 #define DUMP_OFFSET(x)
50 static DEFINE_SPINLOCK(ip_nat_seqofs_lock);
52 /* Setup TCP sequence correction given this change at this sequence */
54 adjust_tcp_sequence(u32 seq,
56 struct ip_conntrack *ct,
57 enum ip_conntrack_info ctinfo)
60 struct ip_nat_seq *this_way, *other_way;
62 DEBUGP("ip_nat_resize_packet: old_size = %u, new_size = %u\n",
63 (*skb)->len, new_size);
65 dir = CTINFO2DIR(ctinfo);
67 this_way = &ct->nat.info.seq[dir];
68 other_way = &ct->nat.info.seq[!dir];
70 DEBUGP("ip_nat_resize_packet: Seq_offset before: ");
71 DUMP_OFFSET(this_way);
73 spin_lock_bh(&ip_nat_seqofs_lock);
75 /* SYN adjust. If it's uninitialized, or this is after last
76 * correction, record it: we don't handle more than one
77 * adjustment in the window, but do deal with common case of a
79 if (this_way->offset_before == this_way->offset_after
80 || before(this_way->correction_pos, seq)) {
81 this_way->correction_pos = seq;
82 this_way->offset_before = this_way->offset_after;
83 this_way->offset_after += sizediff;
85 spin_unlock_bh(&ip_nat_seqofs_lock);
87 DEBUGP("ip_nat_resize_packet: Seq_offset after: ");
88 DUMP_OFFSET(this_way);
91 /* Frobs data inside this packet, which is linear. */
92 static void mangle_contents(struct sk_buff *skb,
94 unsigned int match_offset,
95 unsigned int match_len,
96 const char *rep_buffer,
101 BUG_ON(skb_is_nonlinear(skb));
102 data = (unsigned char *)skb->nh.iph + dataoff;
104 /* move post-replacement */
105 memmove(data + match_offset + rep_len,
106 data + match_offset + match_len,
107 skb->tail - (data + match_offset + match_len));
109 /* insert data from buffer */
110 memcpy(data + match_offset, rep_buffer, rep_len);
112 /* update skb info */
113 if (rep_len > match_len) {
114 DEBUGP("ip_nat_mangle_packet: Extending packet by "
115 "%u from %u bytes\n", rep_len - match_len,
117 skb_put(skb, rep_len - match_len);
119 DEBUGP("ip_nat_mangle_packet: Shrinking packet from "
120 "%u from %u bytes\n", match_len - rep_len,
122 __skb_trim(skb, skb->len + rep_len - match_len);
125 /* fix IP hdr checksum information */
126 skb->nh.iph->tot_len = htons(skb->len);
127 ip_send_check(skb->nh.iph);
130 /* Unusual, but possible case. */
131 static int enlarge_skb(struct sk_buff **pskb, unsigned int extra)
133 struct sk_buff *nskb;
135 if ((*pskb)->len + extra > 65535)
138 nskb = skb_copy_expand(*pskb, skb_headroom(*pskb), extra, GFP_ATOMIC);
142 /* Transfer socket to new skb. */
144 skb_set_owner_w(nskb, (*pskb)->sk);
150 /* Generic function for mangling variable-length address changes inside
151 * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
154 * Takes care about all the nasty sequence number changes, checksumming,
155 * skb enlargement, ...
159 ip_nat_mangle_tcp_packet(struct sk_buff **pskb,
160 struct ip_conntrack *ct,
161 enum ip_conntrack_info ctinfo,
162 unsigned int match_offset,
163 unsigned int match_len,
164 const char *rep_buffer,
165 unsigned int rep_len)
171 if (!skb_make_writable(pskb, (*pskb)->len))
174 if (rep_len > match_len
175 && rep_len - match_len > skb_tailroom(*pskb)
176 && !enlarge_skb(pskb, rep_len - match_len))
179 SKB_LINEAR_ASSERT(*pskb);
181 iph = (*pskb)->nh.iph;
182 tcph = (void *)iph + iph->ihl*4;
184 mangle_contents(*pskb, iph->ihl*4 + tcph->doff*4,
185 match_offset, match_len, rep_buffer, rep_len);
187 datalen = (*pskb)->len - iph->ihl*4;
189 tcph->check = tcp_v4_check(tcph, datalen, iph->saddr, iph->daddr,
190 csum_partial((char *)tcph, datalen, 0));
192 if (rep_len != match_len) {
193 set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
194 adjust_tcp_sequence(ntohl(tcph->seq),
195 (int)rep_len - (int)match_len,
197 /* Tell TCP window tracking about seq change */
198 ip_conntrack_tcp_update(*pskb, ct, CTINFO2DIR(ctinfo));
203 /* Generic function for mangling variable-length address changes inside
204 * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX
205 * command in the Amanda protocol)
207 * Takes care about all the nasty sequence number changes, checksumming,
208 * skb enlargement, ...
210 * XXX - This function could be merged with ip_nat_mangle_tcp_packet which
211 * should be fairly easy to do.
214 ip_nat_mangle_udp_packet(struct sk_buff **pskb,
215 struct ip_conntrack *ct,
216 enum ip_conntrack_info ctinfo,
217 unsigned int match_offset,
218 unsigned int match_len,
219 const char *rep_buffer,
220 unsigned int rep_len)
225 /* UDP helpers might accidentally mangle the wrong packet */
226 iph = (*pskb)->nh.iph;
227 if ((*pskb)->len < iph->ihl*4 + sizeof(*udph) +
228 match_offset + match_len)
231 if (!skb_make_writable(pskb, (*pskb)->len))
234 if (rep_len > match_len
235 && rep_len - match_len > skb_tailroom(*pskb)
236 && !enlarge_skb(pskb, rep_len - match_len))
239 iph = (*pskb)->nh.iph;
240 udph = (void *)iph + iph->ihl*4;
241 mangle_contents(*pskb, iph->ihl*4 + sizeof(*udph),
242 match_offset, match_len, rep_buffer, rep_len);
244 /* update the length of the UDP packet */
245 udph->len = htons((*pskb)->len - iph->ihl*4);
247 /* fix udp checksum if udp checksum was previously calculated */
249 int datalen = (*pskb)->len - iph->ihl * 4;
251 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
252 datalen, IPPROTO_UDP,
253 csum_partial((char *)udph,
260 /* Adjust one found SACK option including checksum correction */
262 sack_adjust(struct sk_buff *skb,
264 unsigned int sackoff,
265 unsigned int sackend,
266 struct ip_nat_seq *natseq)
268 while (sackoff < sackend) {
269 struct tcp_sack_block *sack;
270 u_int32_t new_start_seq, new_end_seq;
272 sack = (void *)skb->data + sackoff;
273 if (after(ntohl(sack->start_seq) - natseq->offset_before,
274 natseq->correction_pos))
275 new_start_seq = ntohl(sack->start_seq)
276 - natseq->offset_after;
278 new_start_seq = ntohl(sack->start_seq)
279 - natseq->offset_before;
280 new_start_seq = htonl(new_start_seq);
282 if (after(ntohl(sack->end_seq) - natseq->offset_before,
283 natseq->correction_pos))
284 new_end_seq = ntohl(sack->end_seq)
285 - natseq->offset_after;
287 new_end_seq = ntohl(sack->end_seq)
288 - natseq->offset_before;
289 new_end_seq = htonl(new_end_seq);
291 DEBUGP("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n",
292 ntohl(sack->start_seq), new_start_seq,
293 ntohl(sack->end_seq), new_end_seq);
296 ip_nat_cheat_check(~sack->start_seq, new_start_seq,
297 ip_nat_cheat_check(~sack->end_seq,
300 sack->start_seq = new_start_seq;
301 sack->end_seq = new_end_seq;
302 sackoff += sizeof(*sack);
306 /* TCP SACK sequence number adjustment */
307 static inline unsigned int
308 ip_nat_sack_adjust(struct sk_buff **pskb,
310 struct ip_conntrack *ct,
311 enum ip_conntrack_info ctinfo)
313 unsigned int dir, optoff, optend;
315 optoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct tcphdr);
316 optend = (*pskb)->nh.iph->ihl*4 + tcph->doff*4;
318 if (!skb_make_writable(pskb, optend))
321 dir = CTINFO2DIR(ctinfo);
323 while (optoff < optend) {
324 /* Usually: option, length. */
325 unsigned char *op = (*pskb)->data + optoff;
334 /* no partial options */
335 if (optoff + 1 == optend
336 || optoff + op[1] > optend
339 if (op[0] == TCPOPT_SACK
340 && op[1] >= 2+TCPOLEN_SACK_PERBLOCK
341 && ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0)
342 sack_adjust(*pskb, tcph, optoff+2,
344 &ct->nat.info.seq[!dir]);
351 /* TCP sequence number adjustment. Returns 1 on success, 0 on failure */
353 ip_nat_seq_adjust(struct sk_buff **pskb,
354 struct ip_conntrack *ct,
355 enum ip_conntrack_info ctinfo)
358 int dir, newseq, newack;
359 struct ip_nat_seq *this_way, *other_way;
361 dir = CTINFO2DIR(ctinfo);
363 this_way = &ct->nat.info.seq[dir];
364 other_way = &ct->nat.info.seq[!dir];
366 if (!skb_make_writable(pskb, (*pskb)->nh.iph->ihl*4+sizeof(*tcph)))
369 tcph = (void *)(*pskb)->data + (*pskb)->nh.iph->ihl*4;
370 if (after(ntohl(tcph->seq), this_way->correction_pos))
371 newseq = ntohl(tcph->seq) + this_way->offset_after;
373 newseq = ntohl(tcph->seq) + this_way->offset_before;
374 newseq = htonl(newseq);
376 if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
377 other_way->correction_pos))
378 newack = ntohl(tcph->ack_seq) - other_way->offset_after;
380 newack = ntohl(tcph->ack_seq) - other_way->offset_before;
381 newack = htonl(newack);
383 tcph->check = ip_nat_cheat_check(~tcph->seq, newseq,
384 ip_nat_cheat_check(~tcph->ack_seq,
388 DEBUGP("Adjusting sequence number from %u->%u, ack from %u->%u\n",
389 ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
393 tcph->ack_seq = newack;
395 if (!ip_nat_sack_adjust(pskb, tcph, ct, ctinfo))
398 ip_conntrack_tcp_update(*pskb, ct, dir);
403 /* Setup NAT on this expected conntrack so it follows master. */
404 /* If we fail to get a free NAT slot, we'll get dropped on confirm */
405 void ip_nat_follow_master(struct ip_conntrack *ct,
406 struct ip_conntrack_expect *exp)
408 struct ip_nat_range range;
410 /* This must be a fresh one. */
411 BUG_ON(ct->status & IPS_NAT_DONE_MASK);
413 /* Change src to where master sends to */
414 range.flags = IP_NAT_RANGE_MAP_IPS;
415 range.min_ip = range.max_ip
416 = ct->master->tuplehash[!exp->dir].tuple.dst.ip;
417 /* hook doesn't matter, but it has to do source manip */
418 ip_nat_setup_info(ct, &range, NF_IP_POST_ROUTING);
420 /* For DST manip, map port here to where it's expected. */
421 range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
422 range.min = range.max = exp->saved_proto;
423 range.min_ip = range.max_ip
424 = ct->master->tuplehash[!exp->dir].tuple.src.ip;
425 /* hook doesn't matter, but it has to do destination manip */
426 ip_nat_setup_info(ct, &range, NF_IP_PRE_ROUTING);