2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * The Internet Protocol (IP) output module.
8 * Version: $Id: ip_output.c,v 1.100 2002/02/01 22:01:03 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Donald Becker, <becker@super.org>
13 * Alan Cox, <Alan.Cox@linux.org>
15 * Stefan Becker, <stefanb@yello.ping.de>
16 * Jorge Cwik, <jorge@laser.satlink.net>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Hirokazu Takahashi, <taka@valinux.co.jp>
20 * See ip_input.c for original log
23 * Alan Cox : Missing nonblock feature in ip_build_xmit.
24 * Mike Kilburn : htons() missing in ip_build_xmit.
25 * Bradford Johnson: Fix faulty handling of some frames when
27 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
28 * (in case if packet not accepted by
29 * output firewall rules)
30 * Mike McLagan : Routing by source
31 * Alexey Kuznetsov: use new route cache
32 * Andi Kleen: Fix broken PMTU recovery and remove
33 * some redundant tests.
34 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
35 * Andi Kleen : Replace ip_reply with ip_send_reply.
36 * Andi Kleen : Split fast and slow ip_build_xmit path
37 * for decreased register pressure on x86
38 * and more readibility.
39 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
40 * silently drop skb instead of failing with -EPERM.
41 * Detlev Wengorz : Copy protocol for fragments.
42 * Hirokazu Takahashi: HW checksumming for outgoing UDP
44 * Hirokazu Takahashi: sendfile() on UDP works now.
47 #include <asm/uaccess.h>
48 #include <asm/system.h>
49 #include <linux/module.h>
50 #include <linux/types.h>
51 #include <linux/kernel.h>
52 #include <linux/sched.h>
54 #include <linux/string.h>
55 #include <linux/errno.h>
57 #include <linux/socket.h>
58 #include <linux/sockios.h>
60 #include <linux/inet.h>
61 #include <linux/netdevice.h>
62 #include <linux/etherdevice.h>
63 #include <linux/proc_fs.h>
64 #include <linux/stat.h>
65 #include <linux/init.h>
69 #include <net/protocol.h>
70 #include <net/route.h>
72 #include <linux/skbuff.h>
76 #include <net/checksum.h>
77 #include <net/inetpeer.h>
78 #include <net/checksum.h>
79 #include <linux/igmp.h>
80 #include <linux/netfilter_ipv4.h>
81 #include <linux/netfilter_bridge.h>
82 #include <linux/mroute.h>
83 #include <linux/netlink.h>
84 #include <linux/tcp.h>
86 int sysctl_ip_default_ttl = IPDEFTTL;
88 /* Generate a checksum for an outgoing IP datagram. */
89 __inline__ void ip_send_check(struct iphdr *iph)
92 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
95 /* dev_loopback_xmit for use with netfilter. */
96 static int ip_dev_loopback_xmit(struct sk_buff *newskb)
98 newskb->mac.raw = newskb->data;
99 __skb_pull(newskb, newskb->nh.raw - newskb->data);
100 newskb->pkt_type = PACKET_LOOPBACK;
101 newskb->ip_summed = CHECKSUM_UNNECESSARY;
102 BUG_TRAP(newskb->dst);
107 static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
109 int ttl = inet->uc_ttl;
112 ttl = dst_metric(dst, RTAX_HOPLIMIT);
117 * Add an ip header to a skbuff and send it out.
120 int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
121 u32 saddr, u32 daddr, struct ip_options *opt)
123 struct inet_sock *inet = inet_sk(sk);
124 struct rtable *rt = (struct rtable *)skb->dst;
127 /* Build the IP header. */
129 iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr) + opt->optlen);
131 iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr));
135 iph->tos = inet->tos;
136 if (ip_dont_fragment(sk, &rt->u.dst))
137 iph->frag_off = htons(IP_DF);
140 iph->ttl = ip_select_ttl(inet, &rt->u.dst);
141 iph->daddr = rt->rt_dst;
142 iph->saddr = rt->rt_src;
143 iph->protocol = sk->sk_protocol;
144 iph->tot_len = htons(skb->len);
145 ip_select_ident(iph, &rt->u.dst, sk);
148 if (opt && opt->optlen) {
149 iph->ihl += opt->optlen>>2;
150 ip_options_build(skb, opt, daddr, rt, 0);
154 skb->priority = sk->sk_priority;
157 return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
161 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
163 static inline int ip_finish_output2(struct sk_buff *skb)
165 struct dst_entry *dst = skb->dst;
166 struct hh_cache *hh = dst->hh;
167 struct net_device *dev = dst->dev;
168 int hh_len = LL_RESERVED_SPACE(dev);
170 /* Be paranoid, rather than too clever. */
171 if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) {
172 struct sk_buff *skb2;
174 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
180 skb_set_owner_w(skb2, skb->sk);
188 read_lock_bh(&hh->hh_lock);
189 hh_alen = HH_DATA_ALIGN(hh->hh_len);
190 memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
191 read_unlock_bh(&hh->hh_lock);
192 skb_push(skb, hh->hh_len);
193 return hh->hh_output(skb);
194 } else if (dst->neighbour)
195 return dst->neighbour->output(skb);
198 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
203 static inline int ip_finish_output(struct sk_buff *skb)
205 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
206 /* Policy lookup after SNAT yielded a new policy */
207 if (skb->dst->xfrm != NULL) {
208 IPCB(skb)->flags |= IPSKB_REROUTED;
209 return dst_output(skb);
212 if (skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb))
213 return ip_fragment(skb, ip_finish_output2);
215 return ip_finish_output2(skb);
218 int ip_mc_output(struct sk_buff *skb)
220 struct sock *sk = skb->sk;
221 struct rtable *rt = (struct rtable*)skb->dst;
222 struct net_device *dev = rt->u.dst.dev;
225 * If the indicated interface is up and running, send the packet.
227 IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
230 skb->protocol = htons(ETH_P_IP);
233 * Multicasts are looped back for other local users
236 if (rt->rt_flags&RTCF_MULTICAST) {
237 if ((!sk || inet_sk(sk)->mc_loop)
238 #ifdef CONFIG_IP_MROUTE
239 /* Small optimization: do not loopback not local frames,
240 which returned after forwarding; they will be dropped
241 by ip_mr_input in any case.
242 Note, that local frames are looped back to be delivered
245 This check is duplicated in ip_mr_input at the moment.
247 && ((rt->rt_flags&RTCF_LOCAL) || !(IPCB(skb)->flags&IPSKB_FORWARDED))
250 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
252 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
254 ip_dev_loopback_xmit);
257 /* Multicasts with ttl 0 must not go beyond the host */
259 if (skb->nh.iph->ttl == 0) {
265 if (rt->rt_flags&RTCF_BROADCAST) {
266 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
268 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
269 newskb->dev, ip_dev_loopback_xmit);
272 return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dev,
274 !(IPCB(skb)->flags & IPSKB_REROUTED));
277 int ip_output(struct sk_buff *skb)
279 struct net_device *dev = skb->dst->dev;
281 IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
284 skb->protocol = htons(ETH_P_IP);
286 return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev,
288 !(IPCB(skb)->flags & IPSKB_REROUTED));
291 int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
293 struct sock *sk = skb->sk;
294 struct inet_sock *inet = inet_sk(sk);
295 struct ip_options *opt = inet->opt;
299 /* Skip all of this if the packet is already routed,
300 * f.e. by something like SCTP.
302 rt = (struct rtable *) skb->dst;
306 /* Make sure we can route this packet. */
307 rt = (struct rtable *)__sk_dst_check(sk, 0);
311 /* Use correct destination address if we have options. */
317 struct flowi fl = { .oif = sk->sk_bound_dev_if,
320 .saddr = inet->saddr,
321 .tos = RT_CONN_FLAGS(sk) } },
322 .proto = sk->sk_protocol,
324 { .sport = inet->sport,
325 .dport = inet->dport } } };
327 /* If this fails, retransmit mechanism of transport layer will
328 * keep trying until route appears or the connection times
331 security_sk_classify_flow(sk, &fl);
332 if (ip_route_output_flow(&rt, &fl, sk, 0))
335 sk_setup_caps(sk, &rt->u.dst);
337 skb->dst = dst_clone(&rt->u.dst);
340 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
343 /* OK, we know where to send it, allocate and build IP header. */
344 iph = (struct iphdr *) skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
345 *((__u16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
346 iph->tot_len = htons(skb->len);
347 if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
348 iph->frag_off = htons(IP_DF);
351 iph->ttl = ip_select_ttl(inet, &rt->u.dst);
352 iph->protocol = sk->sk_protocol;
353 iph->saddr = rt->rt_src;
354 iph->daddr = rt->rt_dst;
356 /* Transport layer set skb->h.foo itself. */
358 if (opt && opt->optlen) {
359 iph->ihl += opt->optlen >> 2;
360 ip_options_build(skb, opt, inet->daddr, rt, 0);
363 ip_select_ident_more(iph, &rt->u.dst, sk,
364 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
366 /* Add an IP checksum. */
369 skb->priority = sk->sk_priority;
371 return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
375 IP_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
377 return -EHOSTUNREACH;
381 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
383 to->pkt_type = from->pkt_type;
384 to->priority = from->priority;
385 to->protocol = from->protocol;
386 dst_release(to->dst);
387 to->dst = dst_clone(from->dst);
390 /* Copy the flags to each fragment. */
391 IPCB(to)->flags = IPCB(from)->flags;
393 #ifdef CONFIG_NET_SCHED
394 to->tc_index = from->tc_index;
396 #ifdef CONFIG_NETFILTER
397 to->nfmark = from->nfmark;
398 /* Connection association is same as pre-frag packet */
399 nf_conntrack_put(to->nfct);
400 to->nfct = from->nfct;
401 nf_conntrack_get(to->nfct);
402 to->nfctinfo = from->nfctinfo;
403 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
404 to->ipvs_property = from->ipvs_property;
406 #ifdef CONFIG_BRIDGE_NETFILTER
407 nf_bridge_put(to->nf_bridge);
408 to->nf_bridge = from->nf_bridge;
409 nf_bridge_get(to->nf_bridge);
412 skb_copy_secmark(to, from);
416 * This IP datagram is too large to be sent in one piece. Break it up into
417 * smaller pieces (each of size equal to IP header plus
418 * a block of the data of the original IP data part) that will yet fit in a
419 * single device frame, and queue such a frame for sending.
422 int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
427 struct net_device *dev;
428 struct sk_buff *skb2;
429 unsigned int mtu, hlen, left, len, ll_rs;
431 __be16 not_last_frag;
432 struct rtable *rt = (struct rtable*)skb->dst;
438 * Point into the IP datagram header.
443 if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
444 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
445 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
446 htonl(dst_mtu(&rt->u.dst)));
452 * Setup starting values.
456 mtu = dst_mtu(&rt->u.dst) - hlen; /* Size of data space */
457 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
459 /* When frag_list is given, use it. First, check its validity:
460 * some transformers could create wrong frag_list or break existing
461 * one, it is not prohibited. In this case fall back to copying.
463 * LATER: this step can be merged to real generation of fragments,
464 * we can switch to copy when see the first bad fragment.
466 if (skb_shinfo(skb)->frag_list) {
467 struct sk_buff *frag;
468 int first_len = skb_pagelen(skb);
470 if (first_len - hlen > mtu ||
471 ((first_len - hlen) & 7) ||
472 (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
476 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
477 /* Correct geometry. */
478 if (frag->len > mtu ||
479 ((frag->len & 7) && frag->next) ||
480 skb_headroom(frag) < hlen)
483 /* Partially cloned skb? */
484 if (skb_shared(frag))
491 frag->destructor = sock_wfree;
492 skb->truesize -= frag->truesize;
496 /* Everything is OK. Generate! */
500 frag = skb_shinfo(skb)->frag_list;
501 skb_shinfo(skb)->frag_list = NULL;
502 skb->data_len = first_len - skb_headlen(skb);
503 skb->len = first_len;
504 iph->tot_len = htons(first_len);
505 iph->frag_off = htons(IP_MF);
509 /* Prepare header of the next frame,
510 * before previous one went down. */
512 frag->ip_summed = CHECKSUM_NONE;
513 frag->h.raw = frag->data;
514 frag->nh.raw = __skb_push(frag, hlen);
515 memcpy(frag->nh.raw, iph, hlen);
517 iph->tot_len = htons(frag->len);
518 ip_copy_metadata(frag, skb);
520 ip_options_fragment(frag);
521 offset += skb->len - hlen;
522 iph->frag_off = htons(offset>>3);
523 if (frag->next != NULL)
524 iph->frag_off |= htons(IP_MF);
525 /* Ready, complete checksum */
532 IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
542 IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
551 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
556 left = skb->len - hlen; /* Space per frame */
557 ptr = raw + hlen; /* Where to start from */
559 #ifdef CONFIG_BRIDGE_NETFILTER
560 /* for bridged IP traffic encapsulated inside f.e. a vlan header,
561 * we need to make room for the encapsulating header */
562 ll_rs = LL_RESERVED_SPACE_EXTRA(rt->u.dst.dev, nf_bridge_pad(skb));
563 mtu -= nf_bridge_pad(skb);
565 ll_rs = LL_RESERVED_SPACE(rt->u.dst.dev);
568 * Fragment the datagram.
571 offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
572 not_last_frag = iph->frag_off & htons(IP_MF);
575 * Keep copying data until we run out.
580 /* IF: it doesn't fit, use 'mtu' - the data space left */
583 /* IF: we are not sending upto and including the packet end
584 then align the next start on an eight byte boundary */
592 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
593 NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
599 * Set up data on packet
602 ip_copy_metadata(skb2, skb);
603 skb_reserve(skb2, ll_rs);
604 skb_put(skb2, len + hlen);
605 skb2->nh.raw = skb2->data;
606 skb2->h.raw = skb2->data + hlen;
609 * Charge the memory for the fragment to any owner
614 skb_set_owner_w(skb2, skb->sk);
617 * Copy the packet header into the new buffer.
620 memcpy(skb2->nh.raw, skb->data, hlen);
623 * Copy a block of the IP datagram.
625 if (skb_copy_bits(skb, ptr, skb2->h.raw, len))
630 * Fill in the new header fields.
633 iph->frag_off = htons((offset >> 3));
635 /* ANK: dirty, but effective trick. Upgrade options only if
636 * the segment to be fragmented was THE FIRST (otherwise,
637 * options are already fixed) and make it ONCE
638 * on the initial skb, so that all the following fragments
639 * will inherit fixed options.
642 ip_options_fragment(skb);
645 * Added AC : If we are fragmenting a fragment that's not the
646 * last fragment then keep MF on each bit
648 if (left > 0 || not_last_frag)
649 iph->frag_off |= htons(IP_MF);
654 * Put this fragment into the sending queue.
656 iph->tot_len = htons(len + hlen);
664 IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
667 IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
672 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
676 EXPORT_SYMBOL(ip_fragment);
679 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
681 struct iovec *iov = from;
683 if (skb->ip_summed == CHECKSUM_HW) {
684 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
687 unsigned int csum = 0;
688 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
690 skb->csum = csum_block_add(skb->csum, csum, odd);
695 static inline unsigned int
696 csum_page(struct page *page, int offset, int copy)
701 csum = csum_partial(kaddr + offset, copy, 0);
706 static inline int ip_ufo_append_data(struct sock *sk,
707 int getfrag(void *from, char *to, int offset, int len,
708 int odd, struct sk_buff *skb),
709 void *from, int length, int hh_len, int fragheaderlen,
710 int transhdrlen, int mtu,unsigned int flags)
715 /* There is support for UDP fragmentation offload by network
716 * device, so create one single skb packet containing complete
719 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
720 skb = sock_alloc_send_skb(sk,
721 hh_len + fragheaderlen + transhdrlen + 20,
722 (flags & MSG_DONTWAIT), &err);
727 /* reserve space for Hardware header */
728 skb_reserve(skb, hh_len);
730 /* create space for UDP/IP header */
731 skb_put(skb,fragheaderlen + transhdrlen);
733 /* initialize network header pointer */
734 skb->nh.raw = skb->data;
736 /* initialize protocol header pointer */
737 skb->h.raw = skb->data + fragheaderlen;
739 skb->ip_summed = CHECKSUM_HW;
741 sk->sk_sndmsg_off = 0;
744 err = skb_append_datato_frags(sk,skb, getfrag, from,
745 (length - transhdrlen));
747 /* specify the length of each IP datagram fragment*/
748 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
749 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
750 __skb_queue_tail(&sk->sk_write_queue, skb);
754 /* There is not enough support do UFO ,
755 * so follow normal path
762 * ip_append_data() and ip_append_page() can make one large IP datagram
763 * from many pieces of data. Each pieces will be holded on the socket
764 * until ip_push_pending_frames() is called. Each piece can be a page
767 * Not only UDP, other transport protocols - e.g. raw sockets - can use
768 * this interface potentially.
770 * LATER: length must be adjusted by pad at tail, when it is required.
772 int ip_append_data(struct sock *sk,
773 int getfrag(void *from, char *to, int offset, int len,
774 int odd, struct sk_buff *skb),
775 void *from, int length, int transhdrlen,
776 struct ipcm_cookie *ipc, struct rtable *rt,
779 struct inet_sock *inet = inet_sk(sk);
782 struct ip_options *opt = NULL;
789 unsigned int maxfraglen, fragheaderlen;
790 int csummode = CHECKSUM_NONE;
795 if (skb_queue_empty(&sk->sk_write_queue)) {
801 if (inet->cork.opt == NULL) {
802 inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
803 if (unlikely(inet->cork.opt == NULL))
806 memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
807 inet->cork.flags |= IPCORK_OPT;
808 inet->cork.addr = ipc->addr;
810 dst_hold(&rt->u.dst);
811 inet->cork.fragsize = mtu = dst_mtu(rt->u.dst.path);
813 inet->cork.length = 0;
814 sk->sk_sndmsg_page = NULL;
815 sk->sk_sndmsg_off = 0;
816 if ((exthdrlen = rt->u.dst.header_len) != 0) {
818 transhdrlen += exthdrlen;
822 if (inet->cork.flags & IPCORK_OPT)
823 opt = inet->cork.opt;
827 mtu = inet->cork.fragsize;
829 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
831 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
832 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
834 if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
835 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu-exthdrlen);
840 * transhdrlen > 0 means that this is the first fragment and we wish
841 * it won't be fragmented in the future.
844 length + fragheaderlen <= mtu &&
845 rt->u.dst.dev->features & NETIF_F_ALL_CSUM &&
847 csummode = CHECKSUM_HW;
849 inet->cork.length += length;
850 if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
851 (rt->u.dst.dev->features & NETIF_F_UFO)) {
853 err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
854 fragheaderlen, transhdrlen, mtu,
861 /* So, what's going on in the loop below?
863 * We use calculated fragment length to generate chained skb,
864 * each of segments is IP fragment ready for sending to network after
865 * adding appropriate IP header.
868 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
872 /* Check if the remaining data fits into current packet. */
873 copy = mtu - skb->len;
875 copy = maxfraglen - skb->len;
878 unsigned int datalen;
879 unsigned int fraglen;
880 unsigned int fraggap;
881 unsigned int alloclen;
882 struct sk_buff *skb_prev;
886 fraggap = skb_prev->len - maxfraglen;
891 * If remaining data exceeds the mtu,
892 * we know we need more fragment(s).
894 datalen = length + fraggap;
895 if (datalen > mtu - fragheaderlen)
896 datalen = maxfraglen - fragheaderlen;
897 fraglen = datalen + fragheaderlen;
899 if ((flags & MSG_MORE) &&
900 !(rt->u.dst.dev->features&NETIF_F_SG))
903 alloclen = datalen + fragheaderlen;
905 /* The last fragment gets additional space at tail.
906 * Note, with MSG_MORE we overallocate on fragments,
907 * because we have no idea what fragment will be
910 if (datalen == length + fraggap)
911 alloclen += rt->u.dst.trailer_len;
914 skb = sock_alloc_send_skb(sk,
915 alloclen + hh_len + 15,
916 (flags & MSG_DONTWAIT), &err);
919 if (atomic_read(&sk->sk_wmem_alloc) <=
921 skb = sock_wmalloc(sk,
922 alloclen + hh_len + 15, 1,
924 if (unlikely(skb == NULL))
931 * Fill in the control structures
933 skb->ip_summed = csummode;
935 skb_reserve(skb, hh_len);
938 * Find where to start putting bytes.
940 data = skb_put(skb, fraglen);
941 skb->nh.raw = data + exthdrlen;
942 data += fragheaderlen;
943 skb->h.raw = data + exthdrlen;
946 skb->csum = skb_copy_and_csum_bits(
947 skb_prev, maxfraglen,
948 data + transhdrlen, fraggap, 0);
949 skb_prev->csum = csum_sub(skb_prev->csum,
952 pskb_trim_unique(skb_prev, maxfraglen);
955 copy = datalen - transhdrlen - fraggap;
956 if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
963 length -= datalen - fraggap;
966 csummode = CHECKSUM_NONE;
969 * Put the packet on the pending queue.
971 __skb_queue_tail(&sk->sk_write_queue, skb);
978 if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
982 if (getfrag(from, skb_put(skb, copy),
983 offset, copy, off, skb) < 0) {
984 __skb_trim(skb, off);
989 int i = skb_shinfo(skb)->nr_frags;
990 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
991 struct page *page = sk->sk_sndmsg_page;
992 int off = sk->sk_sndmsg_off;
995 if (page && (left = PAGE_SIZE - off) > 0) {
998 if (page != frag->page) {
999 if (i == MAX_SKB_FRAGS) {
1004 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
1005 frag = &skb_shinfo(skb)->frags[i];
1007 } else if (i < MAX_SKB_FRAGS) {
1008 if (copy > PAGE_SIZE)
1010 page = alloc_pages(sk->sk_allocation, 0);
1015 sk->sk_sndmsg_page = page;
1016 sk->sk_sndmsg_off = 0;
1018 skb_fill_page_desc(skb, i, page, 0, 0);
1019 frag = &skb_shinfo(skb)->frags[i];
1020 skb->truesize += PAGE_SIZE;
1021 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
1026 if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1030 sk->sk_sndmsg_off += copy;
1033 skb->data_len += copy;
1042 inet->cork.length -= length;
1043 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1047 ssize_t ip_append_page(struct sock *sk, struct page *page,
1048 int offset, size_t size, int flags)
1050 struct inet_sock *inet = inet_sk(sk);
1051 struct sk_buff *skb;
1053 struct ip_options *opt = NULL;
1058 unsigned int maxfraglen, fragheaderlen, fraggap;
1063 if (flags&MSG_PROBE)
1066 if (skb_queue_empty(&sk->sk_write_queue))
1070 if (inet->cork.flags & IPCORK_OPT)
1071 opt = inet->cork.opt;
1073 if (!(rt->u.dst.dev->features&NETIF_F_SG))
1076 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
1077 mtu = inet->cork.fragsize;
1079 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1080 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1082 if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
1083 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu);
1087 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1090 inet->cork.length += size;
1091 if ((sk->sk_protocol == IPPROTO_UDP) &&
1092 (rt->u.dst.dev->features & NETIF_F_UFO)) {
1093 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
1094 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1101 if (skb_is_gso(skb))
1105 /* Check if the remaining data fits into current packet. */
1106 len = mtu - skb->len;
1108 len = maxfraglen - skb->len;
1111 struct sk_buff *skb_prev;
1117 fraggap = skb_prev->len - maxfraglen;
1119 alloclen = fragheaderlen + hh_len + fraggap + 15;
1120 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1121 if (unlikely(!skb)) {
1127 * Fill in the control structures
1129 skb->ip_summed = CHECKSUM_NONE;
1131 skb_reserve(skb, hh_len);
1134 * Find where to start putting bytes.
1136 data = skb_put(skb, fragheaderlen + fraggap);
1137 skb->nh.iph = iph = (struct iphdr *)data;
1138 data += fragheaderlen;
1142 skb->csum = skb_copy_and_csum_bits(
1143 skb_prev, maxfraglen,
1145 skb_prev->csum = csum_sub(skb_prev->csum,
1147 pskb_trim_unique(skb_prev, maxfraglen);
1151 * Put the packet on the pending queue.
1153 __skb_queue_tail(&sk->sk_write_queue, skb);
1157 i = skb_shinfo(skb)->nr_frags;
1160 if (skb_can_coalesce(skb, i, page, offset)) {
1161 skb_shinfo(skb)->frags[i-1].size += len;
1162 } else if (i < MAX_SKB_FRAGS) {
1164 skb_fill_page_desc(skb, i, page, offset, len);
1170 if (skb->ip_summed == CHECKSUM_NONE) {
1172 csum = csum_page(page, offset, len);
1173 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1177 skb->data_len += len;
1184 inet->cork.length -= size;
1185 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1190 * Combined all pending IP fragments on the socket as one IP datagram
1191 * and push them out.
1193 int ip_push_pending_frames(struct sock *sk)
1195 struct sk_buff *skb, *tmp_skb;
1196 struct sk_buff **tail_skb;
1197 struct inet_sock *inet = inet_sk(sk);
1198 struct ip_options *opt = NULL;
1199 struct rtable *rt = inet->cork.rt;
1205 if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1207 tail_skb = &(skb_shinfo(skb)->frag_list);
1209 /* move skb->data to ip header from ext header */
1210 if (skb->data < skb->nh.raw)
1211 __skb_pull(skb, skb->nh.raw - skb->data);
1212 while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1213 __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
1214 *tail_skb = tmp_skb;
1215 tail_skb = &(tmp_skb->next);
1216 skb->len += tmp_skb->len;
1217 skb->data_len += tmp_skb->len;
1218 skb->truesize += tmp_skb->truesize;
1219 __sock_put(tmp_skb->sk);
1220 tmp_skb->destructor = NULL;
1224 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1225 * to fragment the frame generated here. No matter, what transforms
1226 * how transforms change size of the packet, it will come out.
1228 if (inet->pmtudisc != IP_PMTUDISC_DO)
1231 /* DF bit is set when we want to see DF on outgoing frames.
1232 * If local_df is set too, we still allow to fragment this frame
1234 if (inet->pmtudisc == IP_PMTUDISC_DO ||
1235 (skb->len <= dst_mtu(&rt->u.dst) &&
1236 ip_dont_fragment(sk, &rt->u.dst)))
1239 if (inet->cork.flags & IPCORK_OPT)
1240 opt = inet->cork.opt;
1242 if (rt->rt_type == RTN_MULTICAST)
1245 ttl = ip_select_ttl(inet, &rt->u.dst);
1247 iph = (struct iphdr *)skb->data;
1251 iph->ihl += opt->optlen>>2;
1252 ip_options_build(skb, opt, inet->cork.addr, rt, 0);
1254 iph->tos = inet->tos;
1255 iph->tot_len = htons(skb->len);
1257 ip_select_ident(iph, &rt->u.dst, sk);
1259 iph->protocol = sk->sk_protocol;
1260 iph->saddr = rt->rt_src;
1261 iph->daddr = rt->rt_dst;
1264 skb->priority = sk->sk_priority;
1265 skb->dst = dst_clone(&rt->u.dst);
1267 /* Netfilter gets whole the not fragmented skb. */
1268 err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL,
1269 skb->dst->dev, dst_output);
1272 err = inet->recverr ? net_xmit_errno(err) : 0;
1278 inet->cork.flags &= ~IPCORK_OPT;
1279 kfree(inet->cork.opt);
1280 inet->cork.opt = NULL;
1281 if (inet->cork.rt) {
1282 ip_rt_put(inet->cork.rt);
1283 inet->cork.rt = NULL;
1288 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1293 * Throw away all pending data on the socket.
1295 void ip_flush_pending_frames(struct sock *sk)
1297 struct inet_sock *inet = inet_sk(sk);
1298 struct sk_buff *skb;
1300 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
1303 inet->cork.flags &= ~IPCORK_OPT;
1304 kfree(inet->cork.opt);
1305 inet->cork.opt = NULL;
1306 if (inet->cork.rt) {
1307 ip_rt_put(inet->cork.rt);
1308 inet->cork.rt = NULL;
1314 * Fetch data from kernel space and fill in checksum if needed.
1316 static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1317 int len, int odd, struct sk_buff *skb)
1321 csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1322 skb->csum = csum_block_add(skb->csum, csum, odd);
1327 * Generic function to send a packet as reply to another packet.
1328 * Used to send TCP resets so far. ICMP should use this function too.
1330 * Should run single threaded per socket because it uses the sock
1331 * structure to pass arguments.
1333 * LATER: switch from ip_build_xmit to ip_append_*
1335 void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
1338 struct inet_sock *inet = inet_sk(sk);
1340 struct ip_options opt;
1343 struct ipcm_cookie ipc;
1345 struct rtable *rt = (struct rtable*)skb->dst;
1347 if (ip_options_echo(&replyopts.opt, skb))
1350 daddr = ipc.addr = rt->rt_src;
1353 if (replyopts.opt.optlen) {
1354 ipc.opt = &replyopts.opt;
1357 daddr = replyopts.opt.faddr;
1361 struct flowi fl = { .nl_u = { .ip4_u =
1363 .saddr = rt->rt_spec_dst,
1364 .tos = RT_TOS(skb->nh.iph->tos) } },
1365 /* Not quite clean, but right. */
1367 { .sport = skb->h.th->dest,
1368 .dport = skb->h.th->source } },
1369 .proto = sk->sk_protocol };
1370 security_skb_classify_flow(skb, &fl);
1371 if (ip_route_output_key(&rt, &fl))
1375 /* And let IP do all the hard work.
1377 This chunk is not reenterable, hence spinlock.
1378 Note that it uses the fact, that this function is called
1379 with locally disabled BH and that sk cannot be already spinlocked.
1382 inet->tos = skb->nh.iph->tos;
1383 sk->sk_priority = skb->priority;
1384 sk->sk_protocol = skb->nh.iph->protocol;
1385 ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1386 &ipc, rt, MSG_DONTWAIT);
1387 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1388 if (arg->csumoffset >= 0)
1389 *((u16 *)skb->h.raw + arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csum));
1390 skb->ip_summed = CHECKSUM_NONE;
1391 ip_push_pending_frames(sk);
1399 void __init ip_init(void)
1404 #if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1405 igmp_mc_proc_init();
1409 EXPORT_SYMBOL(ip_generic_getfrag);
1410 EXPORT_SYMBOL(ip_queue_xmit);
1411 EXPORT_SYMBOL(ip_send_check);