2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * The Internet Protocol (IP) output module.
8 * Version: $Id: ip_output.c,v 1.100 2002/02/01 22:01:03 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Donald Becker, <becker@super.org>
13 * Alan Cox, <Alan.Cox@linux.org>
15 * Stefan Becker, <stefanb@yello.ping.de>
16 * Jorge Cwik, <jorge@laser.satlink.net>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Hirokazu Takahashi, <taka@valinux.co.jp>
20 * See ip_input.c for original log
23 * Alan Cox : Missing nonblock feature in ip_build_xmit.
24 * Mike Kilburn : htons() missing in ip_build_xmit.
25 * Bradford Johnson: Fix faulty handling of some frames when
27 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
28 * (in case if packet not accepted by
29 * output firewall rules)
30 * Mike McLagan : Routing by source
31 * Alexey Kuznetsov: use new route cache
32 * Andi Kleen: Fix broken PMTU recovery and remove
33 * some redundant tests.
34 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
35 * Andi Kleen : Replace ip_reply with ip_send_reply.
36 * Andi Kleen : Split fast and slow ip_build_xmit path
37 * for decreased register pressure on x86
38 * and more readibility.
39 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
40 * silently drop skb instead of failing with -EPERM.
41 * Detlev Wengorz : Copy protocol for fragments.
42 * Hirokazu Takahashi: HW checksumming for outgoing UDP
44 * Hirokazu Takahashi: sendfile() on UDP works now.
47 #include <asm/uaccess.h>
48 #include <asm/system.h>
49 #include <linux/module.h>
50 #include <linux/types.h>
51 #include <linux/kernel.h>
52 #include <linux/sched.h>
54 #include <linux/string.h>
55 #include <linux/errno.h>
56 #include <linux/config.h>
58 #include <linux/socket.h>
59 #include <linux/sockios.h>
61 #include <linux/inet.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/proc_fs.h>
65 #include <linux/stat.h>
66 #include <linux/init.h>
70 #include <net/protocol.h>
71 #include <net/route.h>
74 #include <linux/skbuff.h>
79 #include <net/checksum.h>
80 #include <net/inetpeer.h>
81 #include <net/checksum.h>
82 #include <linux/igmp.h>
83 #include <linux/netfilter_ipv4.h>
84 #include <linux/netfilter_bridge.h>
85 #include <linux/mroute.h>
86 #include <linux/netlink.h>
89 * Shall we try to damage output packets if routing dev changes?
92 int sysctl_ip_dynaddr;
93 int sysctl_ip_default_ttl = IPDEFTTL;
95 /* Generate a checksum for an outgoing IP datagram. */
96 __inline__ void ip_send_check(struct iphdr *iph)
99 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
102 /* dev_loopback_xmit for use with netfilter. */
103 static int ip_dev_loopback_xmit(struct sk_buff *newskb)
105 newskb->mac.raw = newskb->data;
106 __skb_pull(newskb, newskb->nh.raw - newskb->data);
107 newskb->pkt_type = PACKET_LOOPBACK;
108 newskb->ip_summed = CHECKSUM_UNNECESSARY;
109 BUG_TRAP(newskb->dst);
114 static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
116 int ttl = inet->uc_ttl;
119 ttl = dst_metric(dst, RTAX_HOPLIMIT);
124 * Add an ip header to a skbuff and send it out.
127 int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
128 u32 saddr, u32 daddr, struct ip_options *opt)
130 struct inet_sock *inet = inet_sk(sk);
131 struct rtable *rt = (struct rtable *)skb->dst;
134 /* Build the IP header. */
136 iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr) + opt->optlen);
138 iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr));
142 iph->tos = inet->tos;
143 if (ip_dont_fragment(sk, &rt->u.dst))
144 iph->frag_off = htons(IP_DF);
147 iph->ttl = ip_select_ttl(inet, &rt->u.dst);
148 iph->daddr = rt->rt_dst;
149 iph->saddr = rt->rt_src;
150 iph->protocol = sk->sk_protocol;
151 iph->tot_len = htons(skb->len);
152 ip_select_ident(iph, &rt->u.dst, sk);
155 if (opt && opt->optlen) {
156 iph->ihl += opt->optlen>>2;
157 ip_options_build(skb, opt, daddr, rt, 0);
161 skb->priority = sk->sk_priority;
164 return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
168 static inline int ip_finish_output2(struct sk_buff *skb)
170 struct dst_entry *dst = skb->dst;
171 struct hh_cache *hh = dst->hh;
172 struct net_device *dev = dst->dev;
173 int hh_len = LL_RESERVED_SPACE(dev);
175 /* Be paranoid, rather than too clever. */
176 if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) {
177 struct sk_buff *skb2;
179 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
185 skb_set_owner_w(skb2, skb->sk);
193 read_lock_bh(&hh->hh_lock);
194 hh_alen = HH_DATA_ALIGN(hh->hh_len);
195 memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
196 read_unlock_bh(&hh->hh_lock);
197 skb_push(skb, hh->hh_len);
198 return hh->hh_output(skb);
199 } else if (dst->neighbour)
200 return dst->neighbour->output(skb);
203 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
208 static int ip_finish_output(struct sk_buff *skb)
210 struct net_device *dev = skb->dst->dev;
213 skb->protocol = htons(ETH_P_IP);
215 return NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev,
219 int ip_mc_output(struct sk_buff *skb)
221 struct sock *sk = skb->sk;
222 struct rtable *rt = (struct rtable*)skb->dst;
223 struct net_device *dev = rt->u.dst.dev;
226 * If the indicated interface is up and running, send the packet.
228 IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
231 skb->protocol = htons(ETH_P_IP);
234 * Multicasts are looped back for other local users
237 if (rt->rt_flags&RTCF_MULTICAST) {
238 if ((!sk || inet_sk(sk)->mc_loop)
239 #ifdef CONFIG_IP_MROUTE
240 /* Small optimization: do not loopback not local frames,
241 which returned after forwarding; they will be dropped
242 by ip_mr_input in any case.
243 Note, that local frames are looped back to be delivered
246 This check is duplicated in ip_mr_input at the moment.
248 && ((rt->rt_flags&RTCF_LOCAL) || !(IPCB(skb)->flags&IPSKB_FORWARDED))
251 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
253 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
255 ip_dev_loopback_xmit);
258 /* Multicasts with ttl 0 must not go beyond the host */
260 if (skb->nh.iph->ttl == 0) {
266 if (rt->rt_flags&RTCF_BROADCAST) {
267 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
269 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
270 newskb->dev, ip_dev_loopback_xmit);
273 if (skb->len > dst_mtu(&rt->u.dst))
274 return ip_fragment(skb, ip_finish_output);
276 return ip_finish_output(skb);
279 int ip_output(struct sk_buff *skb)
281 IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
283 if (skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->tso_size)
284 return ip_fragment(skb, ip_finish_output);
286 return ip_finish_output(skb);
289 int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
291 struct sock *sk = skb->sk;
292 struct inet_sock *inet = inet_sk(sk);
293 struct ip_options *opt = inet->opt;
297 /* Skip all of this if the packet is already routed,
298 * f.e. by something like SCTP.
300 rt = (struct rtable *) skb->dst;
304 /* Make sure we can route this packet. */
305 rt = (struct rtable *)__sk_dst_check(sk, 0);
309 /* Use correct destination address if we have options. */
315 struct flowi fl = { .oif = sk->sk_bound_dev_if,
318 .saddr = inet->saddr,
319 .tos = RT_CONN_FLAGS(sk) } },
320 .proto = sk->sk_protocol,
322 { .sport = inet->sport,
323 .dport = inet->dport } } };
325 /* If this fails, retransmit mechanism of transport layer will
326 * keep trying until route appears or the connection times
329 if (ip_route_output_flow(&rt, &fl, sk, 0))
332 __sk_dst_set(sk, &rt->u.dst);
333 tcp_v4_setup_caps(sk, &rt->u.dst);
335 skb->dst = dst_clone(&rt->u.dst);
338 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
341 /* OK, we know where to send it, allocate and build IP header. */
342 iph = (struct iphdr *) skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
343 *((__u16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
344 iph->tot_len = htons(skb->len);
345 if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
346 iph->frag_off = htons(IP_DF);
349 iph->ttl = ip_select_ttl(inet, &rt->u.dst);
350 iph->protocol = sk->sk_protocol;
351 iph->saddr = rt->rt_src;
352 iph->daddr = rt->rt_dst;
354 /* Transport layer set skb->h.foo itself. */
356 if (opt && opt->optlen) {
357 iph->ihl += opt->optlen >> 2;
358 ip_options_build(skb, opt, inet->daddr, rt, 0);
361 ip_select_ident_more(iph, &rt->u.dst, sk, skb_shinfo(skb)->tso_segs);
363 /* Add an IP checksum. */
366 skb->priority = sk->sk_priority;
368 return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
372 IP_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
374 return -EHOSTUNREACH;
378 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
380 to->pkt_type = from->pkt_type;
381 to->priority = from->priority;
382 to->protocol = from->protocol;
383 dst_release(to->dst);
384 to->dst = dst_clone(from->dst);
387 /* Copy the flags to each fragment. */
388 IPCB(to)->flags = IPCB(from)->flags;
390 #ifdef CONFIG_NET_SCHED
391 to->tc_index = from->tc_index;
393 #ifdef CONFIG_NETFILTER
394 to->nfmark = from->nfmark;
395 /* Connection association is same as pre-frag packet */
396 nf_conntrack_put(to->nfct);
397 to->nfct = from->nfct;
398 nf_conntrack_get(to->nfct);
399 to->nfctinfo = from->nfctinfo;
400 #ifdef CONFIG_BRIDGE_NETFILTER
401 nf_bridge_put(to->nf_bridge);
402 to->nf_bridge = from->nf_bridge;
403 nf_bridge_get(to->nf_bridge);
409 * This IP datagram is too large to be sent in one piece. Break it up into
410 * smaller pieces (each of size equal to IP header plus
411 * a block of the data of the original IP data part) that will yet fit in a
412 * single device frame, and queue such a frame for sending.
415 int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
420 struct net_device *dev;
421 struct sk_buff *skb2;
422 unsigned int mtu, hlen, left, len, ll_rs;
425 struct rtable *rt = (struct rtable*)skb->dst;
431 * Point into the IP datagram header.
436 if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
437 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
438 htonl(dst_mtu(&rt->u.dst)));
444 * Setup starting values.
448 mtu = dst_mtu(&rt->u.dst) - hlen; /* Size of data space */
450 /* When frag_list is given, use it. First, check its validity:
451 * some transformers could create wrong frag_list or break existing
452 * one, it is not prohibited. In this case fall back to copying.
454 * LATER: this step can be merged to real generation of fragments,
455 * we can switch to copy when see the first bad fragment.
457 if (skb_shinfo(skb)->frag_list) {
458 struct sk_buff *frag;
459 int first_len = skb_pagelen(skb);
461 if (first_len - hlen > mtu ||
462 ((first_len - hlen) & 7) ||
463 (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
467 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
468 /* Correct geometry. */
469 if (frag->len > mtu ||
470 ((frag->len & 7) && frag->next) ||
471 skb_headroom(frag) < hlen)
474 /* Partially cloned skb? */
475 if (skb_shared(frag))
482 frag->destructor = sock_wfree;
483 skb->truesize -= frag->truesize;
487 /* Everything is OK. Generate! */
491 frag = skb_shinfo(skb)->frag_list;
492 skb_shinfo(skb)->frag_list = NULL;
493 skb->data_len = first_len - skb_headlen(skb);
494 skb->len = first_len;
495 iph->tot_len = htons(first_len);
496 iph->frag_off = htons(IP_MF);
500 /* Prepare header of the next frame,
501 * before previous one went down. */
503 frag->ip_summed = CHECKSUM_NONE;
504 frag->h.raw = frag->data;
505 frag->nh.raw = __skb_push(frag, hlen);
506 memcpy(frag->nh.raw, iph, hlen);
508 iph->tot_len = htons(frag->len);
509 ip_copy_metadata(frag, skb);
511 ip_options_fragment(frag);
512 offset += skb->len - hlen;
513 iph->frag_off = htons(offset>>3);
514 if (frag->next != NULL)
515 iph->frag_off |= htons(IP_MF);
516 /* Ready, complete checksum */
531 IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
540 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
545 left = skb->len - hlen; /* Space per frame */
546 ptr = raw + hlen; /* Where to start from */
548 #ifdef CONFIG_BRIDGE_NETFILTER
549 /* for bridged IP traffic encapsulated inside f.e. a vlan header,
550 * we need to make room for the encapsulating header */
551 ll_rs = LL_RESERVED_SPACE_EXTRA(rt->u.dst.dev, nf_bridge_pad(skb));
552 mtu -= nf_bridge_pad(skb);
554 ll_rs = LL_RESERVED_SPACE(rt->u.dst.dev);
557 * Fragment the datagram.
560 offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
561 not_last_frag = iph->frag_off & htons(IP_MF);
564 * Keep copying data until we run out.
569 /* IF: it doesn't fit, use 'mtu' - the data space left */
572 /* IF: we are not sending upto and including the packet end
573 then align the next start on an eight byte boundary */
581 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
582 NETDEBUG(printk(KERN_INFO "IP: frag: no memory for new fragment!\n"));
588 * Set up data on packet
591 ip_copy_metadata(skb2, skb);
592 skb_reserve(skb2, ll_rs);
593 skb_put(skb2, len + hlen);
594 skb2->nh.raw = skb2->data;
595 skb2->h.raw = skb2->data + hlen;
598 * Charge the memory for the fragment to any owner
603 skb_set_owner_w(skb2, skb->sk);
606 * Copy the packet header into the new buffer.
609 memcpy(skb2->nh.raw, skb->data, hlen);
612 * Copy a block of the IP datagram.
614 if (skb_copy_bits(skb, ptr, skb2->h.raw, len))
619 * Fill in the new header fields.
622 iph->frag_off = htons((offset >> 3));
624 /* ANK: dirty, but effective trick. Upgrade options only if
625 * the segment to be fragmented was THE FIRST (otherwise,
626 * options are already fixed) and make it ONCE
627 * on the initial skb, so that all the following fragments
628 * will inherit fixed options.
631 ip_options_fragment(skb);
634 * Added AC : If we are fragmenting a fragment that's not the
635 * last fragment then keep MF on each bit
637 if (left > 0 || not_last_frag)
638 iph->frag_off |= htons(IP_MF);
643 * Put this fragment into the sending queue.
646 IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
648 iph->tot_len = htons(len + hlen);
657 IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
662 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
667 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
669 struct iovec *iov = from;
671 if (skb->ip_summed == CHECKSUM_HW) {
672 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
675 unsigned int csum = 0;
676 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
678 skb->csum = csum_block_add(skb->csum, csum, odd);
683 static inline unsigned int
684 csum_page(struct page *page, int offset, int copy)
689 csum = csum_partial(kaddr + offset, copy, 0);
695 * ip_append_data() and ip_append_page() can make one large IP datagram
696 * from many pieces of data. Each pieces will be holded on the socket
697 * until ip_push_pending_frames() is called. Each piece can be a page
700 * Not only UDP, other transport protocols - e.g. raw sockets - can use
701 * this interface potentially.
703 * LATER: length must be adjusted by pad at tail, when it is required.
705 int ip_append_data(struct sock *sk,
706 int getfrag(void *from, char *to, int offset, int len,
707 int odd, struct sk_buff *skb),
708 void *from, int length, int transhdrlen,
709 struct ipcm_cookie *ipc, struct rtable *rt,
712 struct inet_sock *inet = inet_sk(sk);
715 struct ip_options *opt = NULL;
722 unsigned int maxfraglen, fragheaderlen;
723 int csummode = CHECKSUM_NONE;
728 if (skb_queue_empty(&sk->sk_write_queue)) {
734 if (inet->cork.opt == NULL) {
735 inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
736 if (unlikely(inet->cork.opt == NULL))
739 memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
740 inet->cork.flags |= IPCORK_OPT;
741 inet->cork.addr = ipc->addr;
743 dst_hold(&rt->u.dst);
744 inet->cork.fragsize = mtu = dst_mtu(rt->u.dst.path);
746 inet->cork.length = 0;
747 sk->sk_sndmsg_page = NULL;
748 sk->sk_sndmsg_off = 0;
749 if ((exthdrlen = rt->u.dst.header_len) != 0) {
751 transhdrlen += exthdrlen;
755 if (inet->cork.flags & IPCORK_OPT)
756 opt = inet->cork.opt;
760 mtu = inet->cork.fragsize;
762 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
764 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
765 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
767 if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
768 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu-exthdrlen);
773 * transhdrlen > 0 means that this is the first fragment and we wish
774 * it won't be fragmented in the future.
777 length + fragheaderlen <= mtu &&
778 rt->u.dst.dev->features&(NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM) &&
780 csummode = CHECKSUM_HW;
782 inet->cork.length += length;
784 /* So, what's going on in the loop below?
786 * We use calculated fragment length to generate chained skb,
787 * each of segments is IP fragment ready for sending to network after
788 * adding appropriate IP header.
791 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
795 /* Check if the remaining data fits into current packet. */
796 copy = mtu - skb->len;
798 copy = maxfraglen - skb->len;
801 unsigned int datalen;
802 unsigned int fraglen;
803 unsigned int fraggap;
804 unsigned int alloclen;
805 struct sk_buff *skb_prev;
809 fraggap = skb_prev->len - maxfraglen;
814 * If remaining data exceeds the mtu,
815 * we know we need more fragment(s).
817 datalen = length + fraggap;
818 if (datalen > mtu - fragheaderlen)
819 datalen = maxfraglen - fragheaderlen;
820 fraglen = datalen + fragheaderlen;
822 if ((flags & MSG_MORE) &&
823 !(rt->u.dst.dev->features&NETIF_F_SG))
826 alloclen = datalen + fragheaderlen;
828 /* The last fragment gets additional space at tail.
829 * Note, with MSG_MORE we overallocate on fragments,
830 * because we have no idea what fragment will be
833 if (datalen == length)
834 alloclen += rt->u.dst.trailer_len;
837 skb = sock_alloc_send_skb(sk,
838 alloclen + hh_len + 15,
839 (flags & MSG_DONTWAIT), &err);
842 if (atomic_read(&sk->sk_wmem_alloc) <=
844 skb = sock_wmalloc(sk,
845 alloclen + hh_len + 15, 1,
847 if (unlikely(skb == NULL))
854 * Fill in the control structures
856 skb->ip_summed = csummode;
858 skb_reserve(skb, hh_len);
861 * Find where to start putting bytes.
863 data = skb_put(skb, fraglen);
864 skb->nh.raw = data + exthdrlen;
865 data += fragheaderlen;
866 skb->h.raw = data + exthdrlen;
869 skb->csum = skb_copy_and_csum_bits(
870 skb_prev, maxfraglen,
871 data + transhdrlen, fraggap, 0);
872 skb_prev->csum = csum_sub(skb_prev->csum,
875 skb_trim(skb_prev, maxfraglen);
878 copy = datalen - transhdrlen - fraggap;
879 if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
886 length -= datalen - fraggap;
889 csummode = CHECKSUM_NONE;
892 * Put the packet on the pending queue.
894 __skb_queue_tail(&sk->sk_write_queue, skb);
901 if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
905 if (getfrag(from, skb_put(skb, copy),
906 offset, copy, off, skb) < 0) {
907 __skb_trim(skb, off);
912 int i = skb_shinfo(skb)->nr_frags;
913 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
914 struct page *page = sk->sk_sndmsg_page;
915 int off = sk->sk_sndmsg_off;
918 if (page && (left = PAGE_SIZE - off) > 0) {
921 if (page != frag->page) {
922 if (i == MAX_SKB_FRAGS) {
927 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
928 frag = &skb_shinfo(skb)->frags[i];
930 } else if (i < MAX_SKB_FRAGS) {
931 if (copy > PAGE_SIZE)
933 page = alloc_pages(sk->sk_allocation, 0);
938 sk->sk_sndmsg_page = page;
939 sk->sk_sndmsg_off = 0;
941 skb_fill_page_desc(skb, i, page, 0, 0);
942 frag = &skb_shinfo(skb)->frags[i];
943 skb->truesize += PAGE_SIZE;
944 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
949 if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
953 sk->sk_sndmsg_off += copy;
956 skb->data_len += copy;
965 inet->cork.length -= length;
966 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
970 ssize_t ip_append_page(struct sock *sk, struct page *page,
971 int offset, size_t size, int flags)
973 struct inet_sock *inet = inet_sk(sk);
976 struct ip_options *opt = NULL;
981 unsigned int maxfraglen, fragheaderlen, fraggap;
989 if (skb_queue_empty(&sk->sk_write_queue))
993 if (inet->cork.flags & IPCORK_OPT)
994 opt = inet->cork.opt;
996 if (!(rt->u.dst.dev->features&NETIF_F_SG))
999 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
1000 mtu = inet->cork.fragsize;
1002 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1003 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1005 if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
1006 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu);
1010 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1013 inet->cork.length += size;
1018 /* Check if the remaining data fits into current packet. */
1019 len = mtu - skb->len;
1021 len = maxfraglen - skb->len;
1023 struct sk_buff *skb_prev;
1030 fraggap = skb_prev->len - maxfraglen;
1034 alloclen = fragheaderlen + hh_len + fraggap + 15;
1035 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1036 if (unlikely(!skb)) {
1042 * Fill in the control structures
1044 skb->ip_summed = CHECKSUM_NONE;
1046 skb_reserve(skb, hh_len);
1049 * Find where to start putting bytes.
1051 data = skb_put(skb, fragheaderlen + fraggap);
1052 skb->nh.iph = iph = (struct iphdr *)data;
1053 data += fragheaderlen;
1057 skb->csum = skb_copy_and_csum_bits(
1058 skb_prev, maxfraglen,
1060 skb_prev->csum = csum_sub(skb_prev->csum,
1062 skb_trim(skb_prev, maxfraglen);
1066 * Put the packet on the pending queue.
1068 __skb_queue_tail(&sk->sk_write_queue, skb);
1072 i = skb_shinfo(skb)->nr_frags;
1075 if (skb_can_coalesce(skb, i, page, offset)) {
1076 skb_shinfo(skb)->frags[i-1].size += len;
1077 } else if (i < MAX_SKB_FRAGS) {
1079 skb_fill_page_desc(skb, i, page, offset, len);
1085 if (skb->ip_summed == CHECKSUM_NONE) {
1087 csum = csum_page(page, offset, len);
1088 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1092 skb->data_len += len;
1099 inet->cork.length -= size;
1100 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1105 * Combined all pending IP fragments on the socket as one IP datagram
1106 * and push them out.
1108 int ip_push_pending_frames(struct sock *sk)
1110 struct sk_buff *skb, *tmp_skb;
1111 struct sk_buff **tail_skb;
1112 struct inet_sock *inet = inet_sk(sk);
1113 struct ip_options *opt = NULL;
1114 struct rtable *rt = inet->cork.rt;
1120 if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1122 tail_skb = &(skb_shinfo(skb)->frag_list);
1124 /* move skb->data to ip header from ext header */
1125 if (skb->data < skb->nh.raw)
1126 __skb_pull(skb, skb->nh.raw - skb->data);
1127 while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1128 __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
1129 *tail_skb = tmp_skb;
1130 tail_skb = &(tmp_skb->next);
1131 skb->len += tmp_skb->len;
1132 skb->data_len += tmp_skb->len;
1133 skb->truesize += tmp_skb->truesize;
1134 __sock_put(tmp_skb->sk);
1135 tmp_skb->destructor = NULL;
1139 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1140 * to fragment the frame generated here. No matter, what transforms
1141 * how transforms change size of the packet, it will come out.
1143 if (inet->pmtudisc != IP_PMTUDISC_DO)
1146 /* DF bit is set when we want to see DF on outgoing frames.
1147 * If local_df is set too, we still allow to fragment this frame
1149 if (inet->pmtudisc == IP_PMTUDISC_DO ||
1150 (skb->len <= dst_mtu(&rt->u.dst) &&
1151 ip_dont_fragment(sk, &rt->u.dst)))
1154 if (inet->cork.flags & IPCORK_OPT)
1155 opt = inet->cork.opt;
1157 if (rt->rt_type == RTN_MULTICAST)
1160 ttl = ip_select_ttl(inet, &rt->u.dst);
1162 iph = (struct iphdr *)skb->data;
1166 iph->ihl += opt->optlen>>2;
1167 ip_options_build(skb, opt, inet->cork.addr, rt, 0);
1169 iph->tos = inet->tos;
1170 iph->tot_len = htons(skb->len);
1173 __ip_select_ident(iph, &rt->u.dst, 0);
1175 iph->id = htons(inet->id++);
1178 iph->protocol = sk->sk_protocol;
1179 iph->saddr = rt->rt_src;
1180 iph->daddr = rt->rt_dst;
1183 skb->priority = sk->sk_priority;
1184 skb->dst = dst_clone(&rt->u.dst);
1186 /* Netfilter gets whole the not fragmented skb. */
1187 err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL,
1188 skb->dst->dev, dst_output);
1191 err = inet->recverr ? net_xmit_errno(err) : 0;
1197 inet->cork.flags &= ~IPCORK_OPT;
1198 if (inet->cork.opt) {
1199 kfree(inet->cork.opt);
1200 inet->cork.opt = NULL;
1202 if (inet->cork.rt) {
1203 ip_rt_put(inet->cork.rt);
1204 inet->cork.rt = NULL;
1209 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1214 * Throw away all pending data on the socket.
1216 void ip_flush_pending_frames(struct sock *sk)
1218 struct inet_sock *inet = inet_sk(sk);
1219 struct sk_buff *skb;
1221 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
1224 inet->cork.flags &= ~IPCORK_OPT;
1225 if (inet->cork.opt) {
1226 kfree(inet->cork.opt);
1227 inet->cork.opt = NULL;
1229 if (inet->cork.rt) {
1230 ip_rt_put(inet->cork.rt);
1231 inet->cork.rt = NULL;
1237 * Fetch data from kernel space and fill in checksum if needed.
1239 static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1240 int len, int odd, struct sk_buff *skb)
1244 csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1245 skb->csum = csum_block_add(skb->csum, csum, odd);
1250 * Generic function to send a packet as reply to another packet.
1251 * Used to send TCP resets so far. ICMP should use this function too.
1253 * Should run single threaded per socket because it uses the sock
1254 * structure to pass arguments.
1256 * LATER: switch from ip_build_xmit to ip_append_*
1258 void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
1261 struct inet_sock *inet = inet_sk(sk);
1263 struct ip_options opt;
1266 struct ipcm_cookie ipc;
1268 struct rtable *rt = (struct rtable*)skb->dst;
1270 if (ip_options_echo(&replyopts.opt, skb))
1273 daddr = ipc.addr = rt->rt_src;
1276 if (replyopts.opt.optlen) {
1277 ipc.opt = &replyopts.opt;
1280 daddr = replyopts.opt.faddr;
1284 struct flowi fl = { .nl_u = { .ip4_u =
1286 .saddr = rt->rt_spec_dst,
1287 .tos = RT_TOS(skb->nh.iph->tos) } },
1288 /* Not quite clean, but right. */
1290 { .sport = skb->h.th->dest,
1291 .dport = skb->h.th->source } },
1292 .proto = sk->sk_protocol };
1293 if (ip_route_output_key(&rt, &fl))
1297 /* And let IP do all the hard work.
1299 This chunk is not reenterable, hence spinlock.
1300 Note that it uses the fact, that this function is called
1301 with locally disabled BH and that sk cannot be already spinlocked.
1304 inet->tos = skb->nh.iph->tos;
1305 sk->sk_priority = skb->priority;
1306 sk->sk_protocol = skb->nh.iph->protocol;
1307 ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1308 &ipc, rt, MSG_DONTWAIT);
1309 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1310 if (arg->csumoffset >= 0)
1311 *((u16 *)skb->h.raw + arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csum));
1312 skb->ip_summed = CHECKSUM_NONE;
1313 ip_push_pending_frames(sk);
1321 void __init ip_init(void)
1326 #if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1327 igmp_mc_proc_init();
1331 EXPORT_SYMBOL(ip_fragment);
1332 EXPORT_SYMBOL(ip_generic_getfrag);
1333 EXPORT_SYMBOL(ip_queue_xmit);
1334 EXPORT_SYMBOL(ip_send_check);