2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * The Internet Protocol (IP) output module.
8 * Version: $Id: ip_output.c,v 1.100 2002/02/01 22:01:03 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Donald Becker, <becker@super.org>
13 * Alan Cox, <Alan.Cox@linux.org>
15 * Stefan Becker, <stefanb@yello.ping.de>
16 * Jorge Cwik, <jorge@laser.satlink.net>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Hirokazu Takahashi, <taka@valinux.co.jp>
20 * See ip_input.c for original log
23 * Alan Cox : Missing nonblock feature in ip_build_xmit.
24 * Mike Kilburn : htons() missing in ip_build_xmit.
25 * Bradford Johnson: Fix faulty handling of some frames when
27 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
28 * (in case if packet not accepted by
29 * output firewall rules)
30 * Mike McLagan : Routing by source
31 * Alexey Kuznetsov: use new route cache
32 * Andi Kleen: Fix broken PMTU recovery and remove
33 * some redundant tests.
34 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
35 * Andi Kleen : Replace ip_reply with ip_send_reply.
36 * Andi Kleen : Split fast and slow ip_build_xmit path
37 * for decreased register pressure on x86
38 * and more readibility.
39 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
40 * silently drop skb instead of failing with -EPERM.
41 * Detlev Wengorz : Copy protocol for fragments.
42 * Hirokazu Takahashi: HW checksumming for outgoing UDP
44 * Hirokazu Takahashi: sendfile() on UDP works now.
47 #include <asm/uaccess.h>
48 #include <asm/system.h>
49 #include <linux/module.h>
50 #include <linux/types.h>
51 #include <linux/kernel.h>
52 #include <linux/sched.h>
54 #include <linux/string.h>
55 #include <linux/errno.h>
56 #include <linux/config.h>
58 #include <linux/socket.h>
59 #include <linux/sockios.h>
61 #include <linux/inet.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/proc_fs.h>
65 #include <linux/stat.h>
66 #include <linux/init.h>
70 #include <net/protocol.h>
71 #include <net/route.h>
72 #include <linux/skbuff.h>
76 #include <net/checksum.h>
77 #include <net/inetpeer.h>
78 #include <net/checksum.h>
79 #include <linux/igmp.h>
80 #include <linux/netfilter_ipv4.h>
81 #include <linux/netfilter_bridge.h>
82 #include <linux/mroute.h>
83 #include <linux/netlink.h>
84 #include <linux/tcp.h>
86 int sysctl_ip_default_ttl = IPDEFTTL;
88 /* Generate a checksum for an outgoing IP datagram. */
89 __inline__ void ip_send_check(struct iphdr *iph)
92 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
95 /* dev_loopback_xmit for use with netfilter. */
96 static int ip_dev_loopback_xmit(struct sk_buff *newskb)
98 newskb->mac.raw = newskb->data;
99 __skb_pull(newskb, newskb->nh.raw - newskb->data);
100 newskb->pkt_type = PACKET_LOOPBACK;
101 newskb->ip_summed = CHECKSUM_UNNECESSARY;
102 BUG_TRAP(newskb->dst);
107 static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
109 int ttl = inet->uc_ttl;
112 ttl = dst_metric(dst, RTAX_HOPLIMIT);
117 * Add an ip header to a skbuff and send it out.
120 int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
121 u32 saddr, u32 daddr, struct ip_options *opt)
123 struct inet_sock *inet = inet_sk(sk);
124 struct rtable *rt = (struct rtable *)skb->dst;
127 /* Build the IP header. */
129 iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr) + opt->optlen);
131 iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr));
135 iph->tos = inet->tos;
136 if (ip_dont_fragment(sk, &rt->u.dst))
137 iph->frag_off = htons(IP_DF);
140 iph->ttl = ip_select_ttl(inet, &rt->u.dst);
141 iph->daddr = rt->rt_dst;
142 iph->saddr = rt->rt_src;
143 iph->protocol = sk->sk_protocol;
144 iph->tot_len = htons(skb->len);
145 ip_select_ident(iph, &rt->u.dst, sk);
148 if (opt && opt->optlen) {
149 iph->ihl += opt->optlen>>2;
150 ip_options_build(skb, opt, daddr, rt, 0);
154 skb->priority = sk->sk_priority;
157 return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
161 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
163 static inline int ip_finish_output2(struct sk_buff *skb)
165 struct dst_entry *dst = skb->dst;
166 struct hh_cache *hh = dst->hh;
167 struct net_device *dev = dst->dev;
168 int hh_len = LL_RESERVED_SPACE(dev);
170 /* Be paranoid, rather than too clever. */
171 if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) {
172 struct sk_buff *skb2;
174 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
180 skb_set_owner_w(skb2, skb->sk);
188 read_lock_bh(&hh->hh_lock);
189 hh_alen = HH_DATA_ALIGN(hh->hh_len);
190 memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
191 read_unlock_bh(&hh->hh_lock);
192 skb_push(skb, hh->hh_len);
193 return hh->hh_output(skb);
194 } else if (dst->neighbour)
195 return dst->neighbour->output(skb);
198 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
203 static inline int ip_finish_output(struct sk_buff *skb)
205 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
206 /* Policy lookup after SNAT yielded a new policy */
207 if (skb->dst->xfrm != NULL)
208 return xfrm4_output_finish(skb);
210 if (skb->len > dst_mtu(skb->dst) &&
211 !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size))
212 return ip_fragment(skb, ip_finish_output2);
214 return ip_finish_output2(skb);
217 int ip_mc_output(struct sk_buff *skb)
219 struct sock *sk = skb->sk;
220 struct rtable *rt = (struct rtable*)skb->dst;
221 struct net_device *dev = rt->u.dst.dev;
224 * If the indicated interface is up and running, send the packet.
226 IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
229 skb->protocol = htons(ETH_P_IP);
232 * Multicasts are looped back for other local users
235 if (rt->rt_flags&RTCF_MULTICAST) {
236 if ((!sk || inet_sk(sk)->mc_loop)
237 #ifdef CONFIG_IP_MROUTE
238 /* Small optimization: do not loopback not local frames,
239 which returned after forwarding; they will be dropped
240 by ip_mr_input in any case.
241 Note, that local frames are looped back to be delivered
244 This check is duplicated in ip_mr_input at the moment.
246 && ((rt->rt_flags&RTCF_LOCAL) || !(IPCB(skb)->flags&IPSKB_FORWARDED))
249 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
251 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
253 ip_dev_loopback_xmit);
256 /* Multicasts with ttl 0 must not go beyond the host */
258 if (skb->nh.iph->ttl == 0) {
264 if (rt->rt_flags&RTCF_BROADCAST) {
265 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
267 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
268 newskb->dev, ip_dev_loopback_xmit);
271 return NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dev,
275 int ip_output(struct sk_buff *skb)
277 struct net_device *dev = skb->dst->dev;
279 IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
282 skb->protocol = htons(ETH_P_IP);
284 return NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev,
288 int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
290 struct sock *sk = skb->sk;
291 struct inet_sock *inet = inet_sk(sk);
292 struct ip_options *opt = inet->opt;
296 /* Skip all of this if the packet is already routed,
297 * f.e. by something like SCTP.
299 rt = (struct rtable *) skb->dst;
303 /* Make sure we can route this packet. */
304 rt = (struct rtable *)__sk_dst_check(sk, 0);
308 /* Use correct destination address if we have options. */
314 struct flowi fl = { .oif = sk->sk_bound_dev_if,
317 .saddr = inet->saddr,
318 .tos = RT_CONN_FLAGS(sk) } },
319 .proto = sk->sk_protocol,
321 { .sport = inet->sport,
322 .dport = inet->dport } } };
324 /* If this fails, retransmit mechanism of transport layer will
325 * keep trying until route appears or the connection times
328 if (ip_route_output_flow(&rt, &fl, sk, 0))
331 sk_setup_caps(sk, &rt->u.dst);
333 skb->dst = dst_clone(&rt->u.dst);
336 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
339 /* OK, we know where to send it, allocate and build IP header. */
340 iph = (struct iphdr *) skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
341 *((__u16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
342 iph->tot_len = htons(skb->len);
343 if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
344 iph->frag_off = htons(IP_DF);
347 iph->ttl = ip_select_ttl(inet, &rt->u.dst);
348 iph->protocol = sk->sk_protocol;
349 iph->saddr = rt->rt_src;
350 iph->daddr = rt->rt_dst;
352 /* Transport layer set skb->h.foo itself. */
354 if (opt && opt->optlen) {
355 iph->ihl += opt->optlen >> 2;
356 ip_options_build(skb, opt, inet->daddr, rt, 0);
359 ip_select_ident_more(iph, &rt->u.dst, sk,
360 (skb_shinfo(skb)->tso_segs ?: 1) - 1);
362 /* Add an IP checksum. */
365 skb->priority = sk->sk_priority;
367 return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
371 IP_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
373 return -EHOSTUNREACH;
377 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
379 to->pkt_type = from->pkt_type;
380 to->priority = from->priority;
381 to->protocol = from->protocol;
382 dst_release(to->dst);
383 to->dst = dst_clone(from->dst);
386 /* Copy the flags to each fragment. */
387 IPCB(to)->flags = IPCB(from)->flags;
389 #ifdef CONFIG_NET_SCHED
390 to->tc_index = from->tc_index;
392 #ifdef CONFIG_NETFILTER
393 to->nfmark = from->nfmark;
394 /* Connection association is same as pre-frag packet */
395 nf_conntrack_put(to->nfct);
396 to->nfct = from->nfct;
397 nf_conntrack_get(to->nfct);
398 to->nfctinfo = from->nfctinfo;
399 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
400 to->ipvs_property = from->ipvs_property;
402 #ifdef CONFIG_BRIDGE_NETFILTER
403 nf_bridge_put(to->nf_bridge);
404 to->nf_bridge = from->nf_bridge;
405 nf_bridge_get(to->nf_bridge);
411 * This IP datagram is too large to be sent in one piece. Break it up into
412 * smaller pieces (each of size equal to IP header plus
413 * a block of the data of the original IP data part) that will yet fit in a
414 * single device frame, and queue such a frame for sending.
417 int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
422 struct net_device *dev;
423 struct sk_buff *skb2;
424 unsigned int mtu, hlen, left, len, ll_rs;
426 __be16 not_last_frag;
427 struct rtable *rt = (struct rtable*)skb->dst;
433 * Point into the IP datagram header.
438 if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
439 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
440 htonl(dst_mtu(&rt->u.dst)));
446 * Setup starting values.
450 mtu = dst_mtu(&rt->u.dst) - hlen; /* Size of data space */
451 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
453 /* When frag_list is given, use it. First, check its validity:
454 * some transformers could create wrong frag_list or break existing
455 * one, it is not prohibited. In this case fall back to copying.
457 * LATER: this step can be merged to real generation of fragments,
458 * we can switch to copy when see the first bad fragment.
460 if (skb_shinfo(skb)->frag_list) {
461 struct sk_buff *frag;
462 int first_len = skb_pagelen(skb);
464 if (first_len - hlen > mtu ||
465 ((first_len - hlen) & 7) ||
466 (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
470 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
471 /* Correct geometry. */
472 if (frag->len > mtu ||
473 ((frag->len & 7) && frag->next) ||
474 skb_headroom(frag) < hlen)
477 /* Partially cloned skb? */
478 if (skb_shared(frag))
485 frag->destructor = sock_wfree;
486 skb->truesize -= frag->truesize;
490 /* Everything is OK. Generate! */
494 frag = skb_shinfo(skb)->frag_list;
495 skb_shinfo(skb)->frag_list = NULL;
496 skb->data_len = first_len - skb_headlen(skb);
497 skb->len = first_len;
498 iph->tot_len = htons(first_len);
499 iph->frag_off = htons(IP_MF);
503 /* Prepare header of the next frame,
504 * before previous one went down. */
506 frag->ip_summed = CHECKSUM_NONE;
507 frag->h.raw = frag->data;
508 frag->nh.raw = __skb_push(frag, hlen);
509 memcpy(frag->nh.raw, iph, hlen);
511 iph->tot_len = htons(frag->len);
512 ip_copy_metadata(frag, skb);
514 ip_options_fragment(frag);
515 offset += skb->len - hlen;
516 iph->frag_off = htons(offset>>3);
517 if (frag->next != NULL)
518 iph->frag_off |= htons(IP_MF);
519 /* Ready, complete checksum */
534 IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
543 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
548 left = skb->len - hlen; /* Space per frame */
549 ptr = raw + hlen; /* Where to start from */
551 #ifdef CONFIG_BRIDGE_NETFILTER
552 /* for bridged IP traffic encapsulated inside f.e. a vlan header,
553 * we need to make room for the encapsulating header */
554 ll_rs = LL_RESERVED_SPACE_EXTRA(rt->u.dst.dev, nf_bridge_pad(skb));
555 mtu -= nf_bridge_pad(skb);
557 ll_rs = LL_RESERVED_SPACE(rt->u.dst.dev);
560 * Fragment the datagram.
563 offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
564 not_last_frag = iph->frag_off & htons(IP_MF);
567 * Keep copying data until we run out.
572 /* IF: it doesn't fit, use 'mtu' - the data space left */
575 /* IF: we are not sending upto and including the packet end
576 then align the next start on an eight byte boundary */
584 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
585 NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
591 * Set up data on packet
594 ip_copy_metadata(skb2, skb);
595 skb_reserve(skb2, ll_rs);
596 skb_put(skb2, len + hlen);
597 skb2->nh.raw = skb2->data;
598 skb2->h.raw = skb2->data + hlen;
601 * Charge the memory for the fragment to any owner
606 skb_set_owner_w(skb2, skb->sk);
609 * Copy the packet header into the new buffer.
612 memcpy(skb2->nh.raw, skb->data, hlen);
615 * Copy a block of the IP datagram.
617 if (skb_copy_bits(skb, ptr, skb2->h.raw, len))
622 * Fill in the new header fields.
625 iph->frag_off = htons((offset >> 3));
627 /* ANK: dirty, but effective trick. Upgrade options only if
628 * the segment to be fragmented was THE FIRST (otherwise,
629 * options are already fixed) and make it ONCE
630 * on the initial skb, so that all the following fragments
631 * will inherit fixed options.
634 ip_options_fragment(skb);
637 * Added AC : If we are fragmenting a fragment that's not the
638 * last fragment then keep MF on each bit
640 if (left > 0 || not_last_frag)
641 iph->frag_off |= htons(IP_MF);
646 * Put this fragment into the sending queue.
649 IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
651 iph->tot_len = htons(len + hlen);
660 IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
665 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
670 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
672 struct iovec *iov = from;
674 if (skb->ip_summed == CHECKSUM_HW) {
675 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
678 unsigned int csum = 0;
679 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
681 skb->csum = csum_block_add(skb->csum, csum, odd);
686 static inline unsigned int
687 csum_page(struct page *page, int offset, int copy)
692 csum = csum_partial(kaddr + offset, copy, 0);
697 static inline int ip_ufo_append_data(struct sock *sk,
698 int getfrag(void *from, char *to, int offset, int len,
699 int odd, struct sk_buff *skb),
700 void *from, int length, int hh_len, int fragheaderlen,
701 int transhdrlen, int mtu,unsigned int flags)
706 /* There is support for UDP fragmentation offload by network
707 * device, so create one single skb packet containing complete
710 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
711 skb = sock_alloc_send_skb(sk,
712 hh_len + fragheaderlen + transhdrlen + 20,
713 (flags & MSG_DONTWAIT), &err);
718 /* reserve space for Hardware header */
719 skb_reserve(skb, hh_len);
721 /* create space for UDP/IP header */
722 skb_put(skb,fragheaderlen + transhdrlen);
724 /* initialize network header pointer */
725 skb->nh.raw = skb->data;
727 /* initialize protocol header pointer */
728 skb->h.raw = skb->data + fragheaderlen;
730 skb->ip_summed = CHECKSUM_HW;
732 sk->sk_sndmsg_off = 0;
735 err = skb_append_datato_frags(sk,skb, getfrag, from,
736 (length - transhdrlen));
738 /* specify the length of each IP datagram fragment*/
739 skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen);
740 __skb_queue_tail(&sk->sk_write_queue, skb);
744 /* There is not enough support do UFO ,
745 * so follow normal path
752 * ip_append_data() and ip_append_page() can make one large IP datagram
753 * from many pieces of data. Each pieces will be holded on the socket
754 * until ip_push_pending_frames() is called. Each piece can be a page
757 * Not only UDP, other transport protocols - e.g. raw sockets - can use
758 * this interface potentially.
760 * LATER: length must be adjusted by pad at tail, when it is required.
762 int ip_append_data(struct sock *sk,
763 int getfrag(void *from, char *to, int offset, int len,
764 int odd, struct sk_buff *skb),
765 void *from, int length, int transhdrlen,
766 struct ipcm_cookie *ipc, struct rtable *rt,
769 struct inet_sock *inet = inet_sk(sk);
772 struct ip_options *opt = NULL;
779 unsigned int maxfraglen, fragheaderlen;
780 int csummode = CHECKSUM_NONE;
785 if (skb_queue_empty(&sk->sk_write_queue)) {
791 if (inet->cork.opt == NULL) {
792 inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
793 if (unlikely(inet->cork.opt == NULL))
796 memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
797 inet->cork.flags |= IPCORK_OPT;
798 inet->cork.addr = ipc->addr;
800 dst_hold(&rt->u.dst);
801 inet->cork.fragsize = mtu = dst_mtu(rt->u.dst.path);
803 inet->cork.length = 0;
804 sk->sk_sndmsg_page = NULL;
805 sk->sk_sndmsg_off = 0;
806 if ((exthdrlen = rt->u.dst.header_len) != 0) {
808 transhdrlen += exthdrlen;
812 if (inet->cork.flags & IPCORK_OPT)
813 opt = inet->cork.opt;
817 mtu = inet->cork.fragsize;
819 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
821 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
822 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
824 if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
825 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu-exthdrlen);
830 * transhdrlen > 0 means that this is the first fragment and we wish
831 * it won't be fragmented in the future.
834 length + fragheaderlen <= mtu &&
835 rt->u.dst.dev->features&(NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM) &&
837 csummode = CHECKSUM_HW;
839 inet->cork.length += length;
840 if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
841 (rt->u.dst.dev->features & NETIF_F_UFO)) {
843 if(ip_ufo_append_data(sk, getfrag, from, length, hh_len,
844 fragheaderlen, transhdrlen, mtu, flags))
850 /* So, what's going on in the loop below?
852 * We use calculated fragment length to generate chained skb,
853 * each of segments is IP fragment ready for sending to network after
854 * adding appropriate IP header.
857 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
861 /* Check if the remaining data fits into current packet. */
862 copy = mtu - skb->len;
864 copy = maxfraglen - skb->len;
867 unsigned int datalen;
868 unsigned int fraglen;
869 unsigned int fraggap;
870 unsigned int alloclen;
871 struct sk_buff *skb_prev;
875 fraggap = skb_prev->len - maxfraglen;
880 * If remaining data exceeds the mtu,
881 * we know we need more fragment(s).
883 datalen = length + fraggap;
884 if (datalen > mtu - fragheaderlen)
885 datalen = maxfraglen - fragheaderlen;
886 fraglen = datalen + fragheaderlen;
888 if ((flags & MSG_MORE) &&
889 !(rt->u.dst.dev->features&NETIF_F_SG))
892 alloclen = datalen + fragheaderlen;
894 /* The last fragment gets additional space at tail.
895 * Note, with MSG_MORE we overallocate on fragments,
896 * because we have no idea what fragment will be
899 if (datalen == length)
900 alloclen += rt->u.dst.trailer_len;
903 skb = sock_alloc_send_skb(sk,
904 alloclen + hh_len + 15,
905 (flags & MSG_DONTWAIT), &err);
908 if (atomic_read(&sk->sk_wmem_alloc) <=
910 skb = sock_wmalloc(sk,
911 alloclen + hh_len + 15, 1,
913 if (unlikely(skb == NULL))
920 * Fill in the control structures
922 skb->ip_summed = csummode;
924 skb_reserve(skb, hh_len);
927 * Find where to start putting bytes.
929 data = skb_put(skb, fraglen);
930 skb->nh.raw = data + exthdrlen;
931 data += fragheaderlen;
932 skb->h.raw = data + exthdrlen;
935 skb->csum = skb_copy_and_csum_bits(
936 skb_prev, maxfraglen,
937 data + transhdrlen, fraggap, 0);
938 skb_prev->csum = csum_sub(skb_prev->csum,
941 skb_trim(skb_prev, maxfraglen);
944 copy = datalen - transhdrlen - fraggap;
945 if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
952 length -= datalen - fraggap;
955 csummode = CHECKSUM_NONE;
958 * Put the packet on the pending queue.
960 __skb_queue_tail(&sk->sk_write_queue, skb);
967 if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
971 if (getfrag(from, skb_put(skb, copy),
972 offset, copy, off, skb) < 0) {
973 __skb_trim(skb, off);
978 int i = skb_shinfo(skb)->nr_frags;
979 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
980 struct page *page = sk->sk_sndmsg_page;
981 int off = sk->sk_sndmsg_off;
984 if (page && (left = PAGE_SIZE - off) > 0) {
987 if (page != frag->page) {
988 if (i == MAX_SKB_FRAGS) {
993 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
994 frag = &skb_shinfo(skb)->frags[i];
996 } else if (i < MAX_SKB_FRAGS) {
997 if (copy > PAGE_SIZE)
999 page = alloc_pages(sk->sk_allocation, 0);
1004 sk->sk_sndmsg_page = page;
1005 sk->sk_sndmsg_off = 0;
1007 skb_fill_page_desc(skb, i, page, 0, 0);
1008 frag = &skb_shinfo(skb)->frags[i];
1009 skb->truesize += PAGE_SIZE;
1010 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
1015 if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1019 sk->sk_sndmsg_off += copy;
1022 skb->data_len += copy;
1031 inet->cork.length -= length;
1032 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1036 ssize_t ip_append_page(struct sock *sk, struct page *page,
1037 int offset, size_t size, int flags)
1039 struct inet_sock *inet = inet_sk(sk);
1040 struct sk_buff *skb;
1042 struct ip_options *opt = NULL;
1047 unsigned int maxfraglen, fragheaderlen, fraggap;
1052 if (flags&MSG_PROBE)
1055 if (skb_queue_empty(&sk->sk_write_queue))
1059 if (inet->cork.flags & IPCORK_OPT)
1060 opt = inet->cork.opt;
1062 if (!(rt->u.dst.dev->features&NETIF_F_SG))
1065 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
1066 mtu = inet->cork.fragsize;
1068 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1069 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1071 if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
1072 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu);
1076 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1079 inet->cork.length += size;
1080 if ((sk->sk_protocol == IPPROTO_UDP) &&
1081 (rt->u.dst.dev->features & NETIF_F_UFO))
1082 skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen);
1088 if (skb_shinfo(skb)->ufo_size)
1092 /* Check if the remaining data fits into current packet. */
1093 len = mtu - skb->len;
1095 len = maxfraglen - skb->len;
1098 struct sk_buff *skb_prev;
1104 fraggap = skb_prev->len - maxfraglen;
1106 alloclen = fragheaderlen + hh_len + fraggap + 15;
1107 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1108 if (unlikely(!skb)) {
1114 * Fill in the control structures
1116 skb->ip_summed = CHECKSUM_NONE;
1118 skb_reserve(skb, hh_len);
1121 * Find where to start putting bytes.
1123 data = skb_put(skb, fragheaderlen + fraggap);
1124 skb->nh.iph = iph = (struct iphdr *)data;
1125 data += fragheaderlen;
1129 skb->csum = skb_copy_and_csum_bits(
1130 skb_prev, maxfraglen,
1132 skb_prev->csum = csum_sub(skb_prev->csum,
1134 skb_trim(skb_prev, maxfraglen);
1138 * Put the packet on the pending queue.
1140 __skb_queue_tail(&sk->sk_write_queue, skb);
1144 i = skb_shinfo(skb)->nr_frags;
1147 if (skb_can_coalesce(skb, i, page, offset)) {
1148 skb_shinfo(skb)->frags[i-1].size += len;
1149 } else if (i < MAX_SKB_FRAGS) {
1151 skb_fill_page_desc(skb, i, page, offset, len);
1157 if (skb->ip_summed == CHECKSUM_NONE) {
1159 csum = csum_page(page, offset, len);
1160 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1164 skb->data_len += len;
1171 inet->cork.length -= size;
1172 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1177 * Combined all pending IP fragments on the socket as one IP datagram
1178 * and push them out.
1180 int ip_push_pending_frames(struct sock *sk)
1182 struct sk_buff *skb, *tmp_skb;
1183 struct sk_buff **tail_skb;
1184 struct inet_sock *inet = inet_sk(sk);
1185 struct ip_options *opt = NULL;
1186 struct rtable *rt = inet->cork.rt;
1192 if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1194 tail_skb = &(skb_shinfo(skb)->frag_list);
1196 /* move skb->data to ip header from ext header */
1197 if (skb->data < skb->nh.raw)
1198 __skb_pull(skb, skb->nh.raw - skb->data);
1199 while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1200 __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
1201 *tail_skb = tmp_skb;
1202 tail_skb = &(tmp_skb->next);
1203 skb->len += tmp_skb->len;
1204 skb->data_len += tmp_skb->len;
1205 skb->truesize += tmp_skb->truesize;
1206 __sock_put(tmp_skb->sk);
1207 tmp_skb->destructor = NULL;
1211 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1212 * to fragment the frame generated here. No matter, what transforms
1213 * how transforms change size of the packet, it will come out.
1215 if (inet->pmtudisc != IP_PMTUDISC_DO)
1218 /* DF bit is set when we want to see DF on outgoing frames.
1219 * If local_df is set too, we still allow to fragment this frame
1221 if (inet->pmtudisc == IP_PMTUDISC_DO ||
1222 (skb->len <= dst_mtu(&rt->u.dst) &&
1223 ip_dont_fragment(sk, &rt->u.dst)))
1226 if (inet->cork.flags & IPCORK_OPT)
1227 opt = inet->cork.opt;
1229 if (rt->rt_type == RTN_MULTICAST)
1232 ttl = ip_select_ttl(inet, &rt->u.dst);
1234 iph = (struct iphdr *)skb->data;
1238 iph->ihl += opt->optlen>>2;
1239 ip_options_build(skb, opt, inet->cork.addr, rt, 0);
1241 iph->tos = inet->tos;
1242 iph->tot_len = htons(skb->len);
1245 __ip_select_ident(iph, &rt->u.dst, 0);
1247 iph->id = htons(inet->id++);
1250 iph->protocol = sk->sk_protocol;
1251 iph->saddr = rt->rt_src;
1252 iph->daddr = rt->rt_dst;
1255 skb->priority = sk->sk_priority;
1256 skb->dst = dst_clone(&rt->u.dst);
1258 /* Netfilter gets whole the not fragmented skb. */
1259 err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL,
1260 skb->dst->dev, dst_output);
1263 err = inet->recverr ? net_xmit_errno(err) : 0;
1269 inet->cork.flags &= ~IPCORK_OPT;
1270 kfree(inet->cork.opt);
1271 inet->cork.opt = NULL;
1272 if (inet->cork.rt) {
1273 ip_rt_put(inet->cork.rt);
1274 inet->cork.rt = NULL;
1279 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1284 * Throw away all pending data on the socket.
1286 void ip_flush_pending_frames(struct sock *sk)
1288 struct inet_sock *inet = inet_sk(sk);
1289 struct sk_buff *skb;
1291 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
1294 inet->cork.flags &= ~IPCORK_OPT;
1295 kfree(inet->cork.opt);
1296 inet->cork.opt = NULL;
1297 if (inet->cork.rt) {
1298 ip_rt_put(inet->cork.rt);
1299 inet->cork.rt = NULL;
1305 * Fetch data from kernel space and fill in checksum if needed.
1307 static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1308 int len, int odd, struct sk_buff *skb)
1312 csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1313 skb->csum = csum_block_add(skb->csum, csum, odd);
1318 * Generic function to send a packet as reply to another packet.
1319 * Used to send TCP resets so far. ICMP should use this function too.
1321 * Should run single threaded per socket because it uses the sock
1322 * structure to pass arguments.
1324 * LATER: switch from ip_build_xmit to ip_append_*
1326 void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
1329 struct inet_sock *inet = inet_sk(sk);
1331 struct ip_options opt;
1334 struct ipcm_cookie ipc;
1336 struct rtable *rt = (struct rtable*)skb->dst;
1338 if (ip_options_echo(&replyopts.opt, skb))
1341 daddr = ipc.addr = rt->rt_src;
1344 if (replyopts.opt.optlen) {
1345 ipc.opt = &replyopts.opt;
1348 daddr = replyopts.opt.faddr;
1352 struct flowi fl = { .nl_u = { .ip4_u =
1354 .saddr = rt->rt_spec_dst,
1355 .tos = RT_TOS(skb->nh.iph->tos) } },
1356 /* Not quite clean, but right. */
1358 { .sport = skb->h.th->dest,
1359 .dport = skb->h.th->source } },
1360 .proto = sk->sk_protocol };
1361 if (ip_route_output_key(&rt, &fl))
1365 /* And let IP do all the hard work.
1367 This chunk is not reenterable, hence spinlock.
1368 Note that it uses the fact, that this function is called
1369 with locally disabled BH and that sk cannot be already spinlocked.
1372 inet->tos = skb->nh.iph->tos;
1373 sk->sk_priority = skb->priority;
1374 sk->sk_protocol = skb->nh.iph->protocol;
1375 ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1376 &ipc, rt, MSG_DONTWAIT);
1377 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1378 if (arg->csumoffset >= 0)
1379 *((u16 *)skb->h.raw + arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csum));
1380 skb->ip_summed = CHECKSUM_NONE;
1381 ip_push_pending_frames(sk);
1389 void __init ip_init(void)
1394 #if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1395 igmp_mc_proc_init();
1399 EXPORT_SYMBOL(ip_fragment);
1400 EXPORT_SYMBOL(ip_generic_getfrag);
1401 EXPORT_SYMBOL(ip_queue_xmit);
1402 EXPORT_SYMBOL(ip_send_check);