4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/config.h>
14 #include <linux/dccp.h>
15 #include <linux/icmp.h>
16 #include <linux/module.h>
17 #include <linux/skbuff.h>
18 #include <linux/random.h>
21 #include <net/inet_hashtables.h>
22 #include <net/inet_sock.h>
24 #include <net/timewait_sock.h>
25 #include <net/tcp_states.h>
33 struct inet_hashinfo __cacheline_aligned dccp_hashinfo = {
34 .lhash_lock = RW_LOCK_UNLOCKED,
35 .lhash_users = ATOMIC_INIT(0),
36 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(dccp_hashinfo.lhash_wait),
39 EXPORT_SYMBOL_GPL(dccp_hashinfo);
41 static int dccp_v4_get_port(struct sock *sk, const unsigned short snum)
43 return inet_csk_get_port(&dccp_hashinfo, sk, snum,
44 inet_csk_bind_conflict);
47 int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
49 struct inet_sock *inet = inet_sk(sk);
50 struct dccp_sock *dp = dccp_sk(sk);
51 const struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
57 dp->dccps_role = DCCP_ROLE_CLIENT;
59 if (dccp_service_not_initialized(sk))
62 if (addr_len < sizeof(struct sockaddr_in))
65 if (usin->sin_family != AF_INET)
68 nexthop = daddr = usin->sin_addr.s_addr;
69 if (inet->opt != NULL && inet->opt->srr) {
72 nexthop = inet->opt->faddr;
75 tmp = ip_route_connect(&rt, nexthop, inet->saddr,
76 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
78 inet->sport, usin->sin_port, sk);
82 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
87 if (inet->opt == NULL || !inet->opt->srr)
91 inet->saddr = rt->rt_src;
92 inet->rcv_saddr = inet->saddr;
94 inet->dport = usin->sin_port;
97 inet_csk(sk)->icsk_ext_hdr_len = 0;
98 if (inet->opt != NULL)
99 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
101 * Socket identity is still unknown (sport may be zero).
102 * However we set state to DCCP_REQUESTING and not releasing socket
103 * lock select source port, enter ourselves into the hash tables and
104 * complete initialization after this.
106 dccp_set_state(sk, DCCP_REQUESTING);
107 err = inet_hash_connect(&dccp_death_row, sk);
111 err = ip_route_newports(&rt, IPPROTO_DCCP, inet->sport, inet->dport,
116 /* OK, now commit destination to socket. */
117 sk_setup_caps(sk, &rt->u.dst);
120 dp->dccps_iss = secure_dccp_sequence_number(inet->saddr,
124 dccp_update_gss(sk, dp->dccps_iss);
126 inet->id = dp->dccps_iss ^ jiffies;
128 err = dccp_connect(sk);
136 * This unhashes the socket and releases the local port, if necessary.
138 dccp_set_state(sk, DCCP_CLOSED);
140 sk->sk_route_caps = 0;
145 EXPORT_SYMBOL_GPL(dccp_v4_connect);
148 * This routine does path mtu discovery as defined in RFC1191.
150 static inline void dccp_do_pmtu_discovery(struct sock *sk,
151 const struct iphdr *iph,
154 struct dst_entry *dst;
155 const struct inet_sock *inet = inet_sk(sk);
156 const struct dccp_sock *dp = dccp_sk(sk);
158 /* We are not interested in DCCP_LISTEN and request_socks (RESPONSEs
159 * send out by Linux are always < 576bytes so they should go through
162 if (sk->sk_state == DCCP_LISTEN)
165 /* We don't check in the destentry if pmtu discovery is forbidden
166 * on this route. We just assume that no packet_to_big packets
167 * are send back when pmtu discovery is not active.
168 * There is a small race when the user changes this flag in the
169 * route, but I think that's acceptable.
171 if ((dst = __sk_dst_check(sk, 0)) == NULL)
174 dst->ops->update_pmtu(dst, mtu);
176 /* Something is about to be wrong... Remember soft error
177 * for the case, if this connection will not able to recover.
179 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
180 sk->sk_err_soft = EMSGSIZE;
184 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
185 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
186 dccp_sync_mss(sk, mtu);
189 * From: draft-ietf-dccp-spec-11.txt
191 * DCCP-Sync packets are the best choice for upward
192 * probing, since DCCP-Sync probes do not risk application
195 dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC);
196 } /* else let the usual retransmit timer handle it */
199 static void dccp_v4_ctl_send_ack(struct sk_buff *rxskb)
202 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
203 const int dccp_hdr_ack_len = sizeof(struct dccp_hdr) +
204 sizeof(struct dccp_hdr_ext) +
205 sizeof(struct dccp_hdr_ack_bits);
208 if (((struct rtable *)rxskb->dst)->rt_type != RTN_LOCAL)
211 skb = alloc_skb(MAX_DCCP_HEADER + 15, GFP_ATOMIC);
215 /* Reserve space for headers. */
216 skb_reserve(skb, MAX_DCCP_HEADER);
218 skb->dst = dst_clone(rxskb->dst);
220 skb->h.raw = skb_push(skb, dccp_hdr_ack_len);
222 memset(dh, 0, dccp_hdr_ack_len);
224 /* Build DCCP header and checksum it. */
225 dh->dccph_type = DCCP_PKT_ACK;
226 dh->dccph_sport = rxdh->dccph_dport;
227 dh->dccph_dport = rxdh->dccph_sport;
228 dh->dccph_doff = dccp_hdr_ack_len / 4;
231 dccp_hdr_set_seq(dh, DCCP_SKB_CB(rxskb)->dccpd_ack_seq);
232 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb),
233 DCCP_SKB_CB(rxskb)->dccpd_seq);
235 bh_lock_sock(dccp_ctl_socket->sk);
236 err = ip_build_and_send_pkt(skb, dccp_ctl_socket->sk,
237 rxskb->nh.iph->daddr,
238 rxskb->nh.iph->saddr, NULL);
239 bh_unlock_sock(dccp_ctl_socket->sk);
241 if (err == NET_XMIT_CN || err == 0) {
242 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
243 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
247 static void dccp_v4_reqsk_send_ack(struct sk_buff *skb,
248 struct request_sock *req)
250 dccp_v4_ctl_send_ack(skb);
253 static int dccp_v4_send_response(struct sock *sk, struct request_sock *req,
254 struct dst_entry *dst)
259 /* First, grab a route. */
261 if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL)
264 skb = dccp_make_response(sk, dst, req);
266 const struct inet_request_sock *ireq = inet_rsk(req);
267 struct dccp_hdr *dh = dccp_hdr(skb);
269 dh->dccph_checksum = dccp_v4_checksum(skb, ireq->loc_addr,
271 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
272 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
275 if (err == NET_XMIT_CN)
285 * This routine is called by the ICMP module when it gets some sort of error
286 * condition. If err < 0 then the socket should be closed and the error
287 * returned to the user. If err > 0 it's just the icmp type << 8 | icmp code.
288 * After adjustment header points to the first 8 bytes of the tcp header. We
289 * need to find the appropriate port.
291 * The locking strategy used here is very "optimistic". When someone else
292 * accesses the socket the ICMP is just dropped and for some paths there is no
293 * check at all. A more general error queue to queue errors for later handling
294 * is probably better.
296 void dccp_v4_err(struct sk_buff *skb, u32 info)
298 const struct iphdr *iph = (struct iphdr *)skb->data;
299 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data +
301 struct dccp_sock *dp;
302 struct inet_sock *inet;
303 const int type = skb->h.icmph->type;
304 const int code = skb->h.icmph->code;
309 if (skb->len < (iph->ihl << 2) + 8) {
310 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
314 sk = inet_lookup(&dccp_hashinfo, iph->daddr, dh->dccph_dport,
315 iph->saddr, dh->dccph_sport, inet_iif(skb));
317 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
321 if (sk->sk_state == DCCP_TIME_WAIT) {
322 inet_twsk_put((struct inet_timewait_sock *)sk);
327 /* If too many ICMPs get dropped on busy
328 * servers this needs to be solved differently.
330 if (sock_owned_by_user(sk))
331 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
333 if (sk->sk_state == DCCP_CLOSED)
337 seq = dccp_hdr_seq(skb);
338 if (sk->sk_state != DCCP_LISTEN &&
339 !between48(seq, dp->dccps_swl, dp->dccps_swh)) {
340 NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS);
345 case ICMP_SOURCE_QUENCH:
346 /* Just silently ignore these. */
348 case ICMP_PARAMETERPROB:
351 case ICMP_DEST_UNREACH:
352 if (code > NR_ICMP_UNREACH)
355 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
356 if (!sock_owned_by_user(sk))
357 dccp_do_pmtu_discovery(sk, iph, info);
361 err = icmp_err_convert[code].errno;
363 case ICMP_TIME_EXCEEDED:
370 switch (sk->sk_state) {
371 struct request_sock *req , **prev;
373 if (sock_owned_by_user(sk))
375 req = inet_csk_search_req(sk, &prev, dh->dccph_dport,
376 iph->daddr, iph->saddr);
381 * ICMPs are not backlogged, hence we cannot get an established
386 if (seq != dccp_rsk(req)->dreq_iss) {
387 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
391 * Still in RESPOND, just remove it silently.
392 * There is no good way to pass the error to the newly
393 * created socket, and POSIX does not want network
394 * errors returned from accept().
396 inet_csk_reqsk_queue_drop(sk, req, prev);
399 case DCCP_REQUESTING:
401 if (!sock_owned_by_user(sk)) {
402 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
405 sk->sk_error_report(sk);
409 sk->sk_err_soft = err;
413 /* If we've already connected we will keep trying
414 * until we time out, or the user gives up.
416 * rfc1122 4.2.3.9 allows to consider as hard errors
417 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
418 * but it is obsoleted by pmtu discovery).
420 * Note, that in modern internet, where routing is unreliable
421 * and in each dark corner broken firewalls sit, sending random
422 * errors ordered by their masters even this two messages finally lose
423 * their original sense (even Linux sends invalid PORT_UNREACHs)
425 * Now we are in compliance with RFCs.
430 if (!sock_owned_by_user(sk) && inet->recverr) {
432 sk->sk_error_report(sk);
433 } else /* Only an error on timeout */
434 sk->sk_err_soft = err;
440 /* This routine computes an IPv4 DCCP checksum. */
441 void dccp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
443 const struct inet_sock *inet = inet_sk(sk);
444 struct dccp_hdr *dh = dccp_hdr(skb);
446 dh->dccph_checksum = dccp_v4_checksum(skb, inet->saddr, inet->daddr);
449 EXPORT_SYMBOL_GPL(dccp_v4_send_check);
451 static inline u64 dccp_v4_init_sequence(const struct sock *sk,
452 const struct sk_buff *skb)
454 return secure_dccp_sequence_number(skb->nh.iph->daddr,
456 dccp_hdr(skb)->dccph_dport,
457 dccp_hdr(skb)->dccph_sport);
460 int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
462 struct inet_request_sock *ireq;
464 struct request_sock *req;
465 struct dccp_request_sock *dreq;
466 const __be32 saddr = skb->nh.iph->saddr;
467 const __be32 daddr = skb->nh.iph->daddr;
468 const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
469 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
470 __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY;
472 /* Never answer to DCCP_PKT_REQUESTs send to broadcast or multicast */
473 if (((struct rtable *)skb->dst)->rt_flags &
474 (RTCF_BROADCAST | RTCF_MULTICAST)) {
475 reset_code = DCCP_RESET_CODE_NO_CONNECTION;
479 if (dccp_bad_service_code(sk, service)) {
480 reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
484 * TW buckets are converted to open requests without
485 * limitations, they conserve resources and peer is
486 * evidently real one.
488 if (inet_csk_reqsk_queue_is_full(sk))
492 * Accept backlog is full. If we have already queued enough
493 * of warm entries in syn queue, drop request. It is better than
494 * clogging syn queue with openreqs with exponentially increasing
497 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
500 req = reqsk_alloc(sk->sk_prot->rsk_prot);
504 if (dccp_parse_options(sk, skb))
507 dccp_openreq_init(req, &dp, skb);
509 ireq = inet_rsk(req);
510 ireq->loc_addr = daddr;
511 ireq->rmt_addr = saddr;
512 req->rcv_wnd = 100; /* Fake, option parsing will get the
517 * Step 3: Process LISTEN state
519 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
521 * In fact we defer setting S.GSR, S.SWL, S.SWH to
522 * dccp_create_openreq_child.
524 dreq = dccp_rsk(req);
525 dreq->dreq_isr = dcb->dccpd_seq;
526 dreq->dreq_iss = dccp_v4_init_sequence(sk, skb);
527 dreq->dreq_service = service;
529 if (dccp_v4_send_response(sk, req, NULL))
532 inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
538 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
539 dcb->dccpd_reset_code = reset_code;
543 EXPORT_SYMBOL_GPL(dccp_v4_conn_request);
546 * The three way handshake has completed - we got a valid ACK or DATAACK -
547 * now create the new socket.
549 * This is the equivalent of TCP's tcp_v4_syn_recv_sock
551 struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
552 struct request_sock *req,
553 struct dst_entry *dst)
555 struct inet_request_sock *ireq;
556 struct inet_sock *newinet;
557 struct dccp_sock *newdp;
560 if (sk_acceptq_is_full(sk))
563 if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL)
566 newsk = dccp_create_openreq_child(sk, req, skb);
570 sk_setup_caps(newsk, dst);
572 newdp = dccp_sk(newsk);
573 newinet = inet_sk(newsk);
574 ireq = inet_rsk(req);
575 newinet->daddr = ireq->rmt_addr;
576 newinet->rcv_saddr = ireq->loc_addr;
577 newinet->saddr = ireq->loc_addr;
578 newinet->opt = ireq->opt;
580 newinet->mc_index = inet_iif(skb);
581 newinet->mc_ttl = skb->nh.iph->ttl;
582 newinet->id = jiffies;
584 dccp_sync_mss(newsk, dst_mtu(dst));
586 __inet_hash(&dccp_hashinfo, newsk, 0);
587 __inet_inherit_port(&dccp_hashinfo, sk, newsk);
592 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
594 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
599 EXPORT_SYMBOL_GPL(dccp_v4_request_recv_sock);
601 static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
603 const struct dccp_hdr *dh = dccp_hdr(skb);
604 const struct iphdr *iph = skb->nh.iph;
606 struct request_sock **prev;
607 /* Find possible connection requests. */
608 struct request_sock *req = inet_csk_search_req(sk, &prev,
610 iph->saddr, iph->daddr);
612 return dccp_check_req(sk, skb, req, prev);
614 nsk = __inet_lookup_established(&dccp_hashinfo,
615 iph->saddr, dh->dccph_sport,
616 iph->daddr, ntohs(dh->dccph_dport),
619 if (nsk->sk_state != DCCP_TIME_WAIT) {
623 inet_twsk_put((struct inet_timewait_sock *)nsk);
630 int dccp_v4_checksum(const struct sk_buff *skb, const __be32 saddr,
633 const struct dccp_hdr* dh = dccp_hdr(skb);
637 if (dh->dccph_cscov == 0)
638 checksum_len = skb->len;
640 checksum_len = (dh->dccph_cscov + dh->dccph_x) * sizeof(u32);
641 checksum_len = checksum_len < skb->len ? checksum_len :
645 tmp = csum_partial((unsigned char *)dh, checksum_len, 0);
646 return csum_tcpudp_magic(saddr, daddr, checksum_len,
650 static int dccp_v4_verify_checksum(struct sk_buff *skb,
651 const __be32 saddr, const __be32 daddr)
653 struct dccp_hdr *dh = dccp_hdr(skb);
657 if (dh->dccph_cscov == 0)
658 checksum_len = skb->len;
660 checksum_len = (dh->dccph_cscov + dh->dccph_x) * sizeof(u32);
661 checksum_len = checksum_len < skb->len ? checksum_len :
664 tmp = csum_partial((unsigned char *)dh, checksum_len, 0);
665 return csum_tcpudp_magic(saddr, daddr, checksum_len,
666 IPPROTO_DCCP, tmp) == 0 ? 0 : -1;
669 static struct dst_entry* dccp_v4_route_skb(struct sock *sk,
673 struct flowi fl = { .oif = ((struct rtable *)skb->dst)->rt_iif,
675 { .daddr = skb->nh.iph->saddr,
676 .saddr = skb->nh.iph->daddr,
677 .tos = RT_CONN_FLAGS(sk) } },
678 .proto = sk->sk_protocol,
680 { .sport = dccp_hdr(skb)->dccph_dport,
681 .dport = dccp_hdr(skb)->dccph_sport }
685 if (ip_route_output_flow(&rt, &fl, sk, 0)) {
686 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
693 static void dccp_v4_ctl_send_reset(struct sk_buff *rxskb)
696 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
697 const int dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
698 sizeof(struct dccp_hdr_ext) +
699 sizeof(struct dccp_hdr_reset);
701 struct dst_entry *dst;
704 /* Never send a reset in response to a reset. */
705 if (rxdh->dccph_type == DCCP_PKT_RESET)
708 if (((struct rtable *)rxskb->dst)->rt_type != RTN_LOCAL)
711 dst = dccp_v4_route_skb(dccp_ctl_socket->sk, rxskb);
715 skb = alloc_skb(MAX_DCCP_HEADER + 15, GFP_ATOMIC);
719 /* Reserve space for headers. */
720 skb_reserve(skb, MAX_DCCP_HEADER);
721 skb->dst = dst_clone(dst);
723 skb->h.raw = skb_push(skb, dccp_hdr_reset_len);
725 memset(dh, 0, dccp_hdr_reset_len);
727 /* Build DCCP header and checksum it. */
728 dh->dccph_type = DCCP_PKT_RESET;
729 dh->dccph_sport = rxdh->dccph_dport;
730 dh->dccph_dport = rxdh->dccph_sport;
731 dh->dccph_doff = dccp_hdr_reset_len / 4;
733 dccp_hdr_reset(skb)->dccph_reset_code =
734 DCCP_SKB_CB(rxskb)->dccpd_reset_code;
736 /* See "8.3.1. Abnormal Termination" in draft-ietf-dccp-spec-11 */
738 if (DCCP_SKB_CB(rxskb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
739 dccp_set_seqno(&seqno, DCCP_SKB_CB(rxskb)->dccpd_ack_seq + 1);
741 dccp_hdr_set_seq(dh, seqno);
742 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb),
743 DCCP_SKB_CB(rxskb)->dccpd_seq);
745 dh->dccph_checksum = dccp_v4_checksum(skb, rxskb->nh.iph->saddr,
746 rxskb->nh.iph->daddr);
748 bh_lock_sock(dccp_ctl_socket->sk);
749 err = ip_build_and_send_pkt(skb, dccp_ctl_socket->sk,
750 rxskb->nh.iph->daddr,
751 rxskb->nh.iph->saddr, NULL);
752 bh_unlock_sock(dccp_ctl_socket->sk);
754 if (err == NET_XMIT_CN || err == 0) {
755 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
756 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
762 int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
764 struct dccp_hdr *dh = dccp_hdr(skb);
766 if (sk->sk_state == DCCP_OPEN) { /* Fast path */
767 if (dccp_rcv_established(sk, skb, dh, skb->len))
773 * Step 3: Process LISTEN state
774 * If S.state == LISTEN,
775 * If P.type == Request or P contains a valid Init Cookie
777 * * Must scan the packet's options to check for an Init
778 * Cookie. Only the Init Cookie is processed here,
779 * however; other options are processed in Step 8. This
780 * scan need only be performed if the endpoint uses Init
782 * * Generate a new socket and switch to that socket *
783 * Set S := new socket for this port pair
785 * Choose S.ISS (initial seqno) or set from Init Cookie
786 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
787 * Continue with S.state == RESPOND
788 * * A Response packet will be generated in Step 11 *
790 * Generate Reset(No Connection) unless P.type == Reset
791 * Drop packet and return
793 * NOTE: the check for the packet types is done in
794 * dccp_rcv_state_process
796 if (sk->sk_state == DCCP_LISTEN) {
797 struct sock *nsk = dccp_v4_hnd_req(sk, skb);
803 if (dccp_child_process(sk, nsk, skb))
809 if (dccp_rcv_state_process(sk, skb, dh, skb->len))
814 dccp_v4_ctl_send_reset(skb);
820 EXPORT_SYMBOL_GPL(dccp_v4_do_rcv);
822 int dccp_invalid_packet(struct sk_buff *skb)
824 const struct dccp_hdr *dh;
826 if (skb->pkt_type != PACKET_HOST)
829 if (!pskb_may_pull(skb, sizeof(struct dccp_hdr))) {
830 LIMIT_NETDEBUG(KERN_WARNING "DCCP: pskb_may_pull failed\n");
836 /* If the packet type is not understood, drop packet and return */
837 if (dh->dccph_type >= DCCP_PKT_INVALID) {
838 LIMIT_NETDEBUG(KERN_WARNING "DCCP: invalid packet type\n");
843 * If P.Data Offset is too small for packet type, or too large for
844 * packet, drop packet and return
846 if (dh->dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) {
847 LIMIT_NETDEBUG(KERN_WARNING "DCCP: P.Data Offset(%u) "
853 if (!pskb_may_pull(skb, dh->dccph_doff * sizeof(u32))) {
854 LIMIT_NETDEBUG(KERN_WARNING "DCCP: P.Data Offset(%u) "
863 * If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet
864 * has short sequence numbers), drop packet and return
866 if (dh->dccph_x == 0 &&
867 dh->dccph_type != DCCP_PKT_DATA &&
868 dh->dccph_type != DCCP_PKT_ACK &&
869 dh->dccph_type != DCCP_PKT_DATAACK) {
870 LIMIT_NETDEBUG(KERN_WARNING "DCCP: P.type (%s) not Data, Ack "
871 "nor DataAck and P.X == 0\n",
872 dccp_packet_name(dh->dccph_type));
879 EXPORT_SYMBOL_GPL(dccp_invalid_packet);
881 /* this is called when real data arrives */
882 int dccp_v4_rcv(struct sk_buff *skb)
884 const struct dccp_hdr *dh;
887 /* Step 1: Check header basics: */
889 if (dccp_invalid_packet(skb))
892 /* If the header checksum is incorrect, drop packet and return */
893 if (dccp_v4_verify_checksum(skb, skb->nh.iph->saddr,
894 skb->nh.iph->daddr) < 0) {
895 LIMIT_NETDEBUG(KERN_WARNING "%s: incorrect header checksum\n",
902 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(skb);
903 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
905 dccp_pr_debug("%8.8s "
906 "src=%u.%u.%u.%u@%-5d "
907 "dst=%u.%u.%u.%u@%-5d seq=%llu",
908 dccp_packet_name(dh->dccph_type),
909 NIPQUAD(skb->nh.iph->saddr), ntohs(dh->dccph_sport),
910 NIPQUAD(skb->nh.iph->daddr), ntohs(dh->dccph_dport),
911 (unsigned long long) DCCP_SKB_CB(skb)->dccpd_seq);
913 if (dccp_packet_without_ack(skb)) {
914 DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
915 dccp_pr_debug_cat("\n");
917 DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
918 dccp_pr_debug_cat(", ack=%llu\n",
920 DCCP_SKB_CB(skb)->dccpd_ack_seq);
924 * Look up flow ID in table and get corresponding socket */
925 sk = __inet_lookup(&dccp_hashinfo,
926 skb->nh.iph->saddr, dh->dccph_sport,
927 skb->nh.iph->daddr, ntohs(dh->dccph_dport),
933 * Generate Reset(No Connection) unless P.type == Reset
934 * Drop packet and return
937 dccp_pr_debug("failed to look up flow ID in table and "
938 "get corresponding socket\n");
944 * ... or S.state == TIMEWAIT,
945 * Generate Reset(No Connection) unless P.type == Reset
946 * Drop packet and return
949 if (sk->sk_state == DCCP_TIME_WAIT) {
950 dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: "
955 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
956 goto discard_and_relse;
959 return sk_receive_skb(sk, skb);
962 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
966 * Generate Reset(No Connection) unless P.type == Reset
967 * Drop packet and return
969 if (dh->dccph_type != DCCP_PKT_RESET) {
970 DCCP_SKB_CB(skb)->dccpd_reset_code =
971 DCCP_RESET_CODE_NO_CONNECTION;
972 dccp_v4_ctl_send_reset(skb);
985 inet_twsk_put((struct inet_timewait_sock *)sk);
989 struct inet_connection_sock_af_ops dccp_ipv4_af_ops = {
990 .queue_xmit = ip_queue_xmit,
991 .send_check = dccp_v4_send_check,
992 .rebuild_header = inet_sk_rebuild_header,
993 .conn_request = dccp_v4_conn_request,
994 .syn_recv_sock = dccp_v4_request_recv_sock,
995 .net_header_len = sizeof(struct iphdr),
996 .setsockopt = ip_setsockopt,
997 .getsockopt = ip_getsockopt,
998 .addr2sockaddr = inet_csk_addr2sockaddr,
999 .sockaddr_len = sizeof(struct sockaddr_in),
1002 static int dccp_v4_init_sock(struct sock *sk)
1004 const int err = dccp_init_sock(sk);
1007 inet_csk(sk)->icsk_af_ops = &dccp_ipv4_af_ops;
1011 static void dccp_v4_reqsk_destructor(struct request_sock *req)
1013 kfree(inet_rsk(req)->opt);
1016 static struct request_sock_ops dccp_request_sock_ops = {
1018 .obj_size = sizeof(struct dccp_request_sock),
1019 .rtx_syn_ack = dccp_v4_send_response,
1020 .send_ack = dccp_v4_reqsk_send_ack,
1021 .destructor = dccp_v4_reqsk_destructor,
1022 .send_reset = dccp_v4_ctl_send_reset,
1025 static struct timewait_sock_ops dccp_timewait_sock_ops = {
1026 .twsk_obj_size = sizeof(struct inet_timewait_sock),
1029 struct proto dccp_prot = {
1031 .owner = THIS_MODULE,
1032 .close = dccp_close,
1033 .connect = dccp_v4_connect,
1034 .disconnect = dccp_disconnect,
1035 .ioctl = dccp_ioctl,
1036 .init = dccp_v4_init_sock,
1037 .setsockopt = dccp_setsockopt,
1038 .getsockopt = dccp_getsockopt,
1039 .sendmsg = dccp_sendmsg,
1040 .recvmsg = dccp_recvmsg,
1041 .backlog_rcv = dccp_v4_do_rcv,
1043 .unhash = dccp_unhash,
1044 .accept = inet_csk_accept,
1045 .get_port = dccp_v4_get_port,
1046 .shutdown = dccp_shutdown,
1047 .destroy = dccp_destroy_sock,
1048 .orphan_count = &dccp_orphan_count,
1049 .max_header = MAX_DCCP_HEADER,
1050 .obj_size = sizeof(struct dccp_sock),
1051 .rsk_prot = &dccp_request_sock_ops,
1052 .twsk_prot = &dccp_timewait_sock_ops,
1055 EXPORT_SYMBOL_GPL(dccp_prot);