]> err.no Git - linux-2.6/blobdiff - include/linux/dccp.h
[DCCP]: Integrate state transitions for passive-close
[linux-2.6] / include / linux / dccp.h
index f3fc4392e93d74a470c94c40813851e5042d3518..312b989c7edb5ae103782b2a3036d5b92d72939c 100644 (file)
@@ -144,6 +144,8 @@ enum dccp_reset_codes {
        DCCP_RESET_CODE_TOO_BUSY,
        DCCP_RESET_CODE_BAD_INIT_COOKIE,
        DCCP_RESET_CODE_AGGRESSION_PENALTY,
+
+       DCCP_MAX_RESET_CODES            /* Leave at the end!  */
 };
 
 /* DCCP options */
@@ -225,37 +227,50 @@ struct dccp_so_feat {
 #include <net/tcp_states.h>
 
 enum dccp_state {
-       DCCP_OPEN       = TCP_ESTABLISHED,
-       DCCP_REQUESTING = TCP_SYN_SENT,
-       DCCP_PARTOPEN   = TCP_FIN_WAIT1, /* FIXME:
-                                           This mapping is horrible, but TCP has
-                                           no matching state for DCCP_PARTOPEN,
-                                           as TCP_SYN_RECV is already used by
-                                           DCCP_RESPOND, why don't stop using TCP
-                                           mapping of states? OK, now we don't use
-                                           sk_stream_sendmsg anymore, so doesn't
-                                           seem to exist any reason for us to
-                                           do the TCP mapping here */
-       DCCP_LISTEN     = TCP_LISTEN,
-       DCCP_RESPOND    = TCP_SYN_RECV,
-       DCCP_CLOSING    = TCP_CLOSING,
-       DCCP_TIME_WAIT  = TCP_TIME_WAIT,
-       DCCP_CLOSED     = TCP_CLOSE,
-       DCCP_MAX_STATES = TCP_MAX_STATES,
+       DCCP_OPEN            = TCP_ESTABLISHED,
+       DCCP_REQUESTING      = TCP_SYN_SENT,
+       DCCP_LISTEN          = TCP_LISTEN,
+       DCCP_RESPOND         = TCP_SYN_RECV,
+       /*
+        * States involved in closing a DCCP connection:
+        * 1) ACTIVE_CLOSEREQ is entered by a server sending a CloseReq.
+        *
+        * 2) CLOSING can have three different meanings (RFC 4340, 8.3):
+        *  a. Client has performed active-close, has sent a Close to the server
+        *     from state OPEN or PARTOPEN, and is waiting for the final Reset
+        *     (in this case, SOCK_DONE == 1).
+        *  b. Client is asked to perform passive-close, by receiving a CloseReq
+        *     in (PART)OPEN state. It sends a Close and waits for final Reset
+        *     (in this case, SOCK_DONE == 0).
+        *  c. Server performs an active-close as in (a), keeps TIMEWAIT state.
+        *
+        * 3) The following intermediate states are employed to give passively
+        *    closing nodes a chance to process their unread data:
+        *    - PASSIVE_CLOSE    (from OPEN => CLOSED) and
+        *    - PASSIVE_CLOSEREQ (from (PART)OPEN to CLOSING; case (b) above).
+        */
+       DCCP_ACTIVE_CLOSEREQ = TCP_FIN_WAIT1,
+       DCCP_PASSIVE_CLOSE   = TCP_CLOSE_WAIT,  /* any node receiving a Close */
+       DCCP_CLOSING         = TCP_CLOSING,
+       DCCP_TIME_WAIT       = TCP_TIME_WAIT,
+       DCCP_CLOSED          = TCP_CLOSE,
+       DCCP_PARTOPEN        = TCP_MAX_STATES,
+       DCCP_PASSIVE_CLOSEREQ,                  /* clients receiving CloseReq */
+       DCCP_MAX_STATES
 };
 
-#define DCCP_STATE_MASK 0xf
-#define DCCP_ACTION_FIN (1<<7)
+#define DCCP_STATE_MASK 0x1f
 
 enum {
-       DCCPF_OPEN       = TCPF_ESTABLISHED,
-       DCCPF_REQUESTING = TCPF_SYN_SENT,
-       DCCPF_PARTOPEN   = TCPF_FIN_WAIT1,
-       DCCPF_LISTEN     = TCPF_LISTEN,
-       DCCPF_RESPOND    = TCPF_SYN_RECV,
-       DCCPF_CLOSING    = TCPF_CLOSING,
-       DCCPF_TIME_WAIT  = TCPF_TIME_WAIT,
-       DCCPF_CLOSED     = TCPF_CLOSE,
+       DCCPF_OPEN            = TCPF_ESTABLISHED,
+       DCCPF_REQUESTING      = TCPF_SYN_SENT,
+       DCCPF_LISTEN          = TCPF_LISTEN,
+       DCCPF_RESPOND         = TCPF_SYN_RECV,
+       DCCPF_ACTIVE_CLOSEREQ = TCPF_FIN_WAIT1,
+       DCCPF_CLOSING         = TCPF_CLOSING,
+       DCCPF_TIME_WAIT       = TCPF_TIME_WAIT,
+       DCCPF_CLOSED          = TCPF_CLOSE,
+       DCCPF_PARTOPEN        = (1 << DCCP_PARTOPEN),
 };
 
 static inline struct dccp_hdr *dccp_hdr(const struct sk_buff *skb)
@@ -270,10 +285,9 @@ static inline struct dccp_hdr *dccp_zeroed_hdr(struct sk_buff *skb, int headlen)
        return memset(skb_transport_header(skb), 0, headlen);
 }
 
-static inline struct dccp_hdr_ext *dccp_hdrx(const struct sk_buff *skb)
+static inline struct dccp_hdr_ext *dccp_hdrx(const struct dccp_hdr *dh)
 {
-       return (struct dccp_hdr_ext *)(skb_transport_header(skb) +
-                                      sizeof(struct dccp_hdr));
+       return (struct dccp_hdr_ext *)((unsigned char *)dh + sizeof(*dh));
 }
 
 static inline unsigned int __dccp_basic_hdr_len(const struct dccp_hdr *dh)
@@ -287,13 +301,12 @@ static inline unsigned int dccp_basic_hdr_len(const struct sk_buff *skb)
        return __dccp_basic_hdr_len(dh);
 }
 
-static inline __u64 dccp_hdr_seq(const struct sk_buff *skb)
+static inline __u64 dccp_hdr_seq(const struct dccp_hdr *dh)
 {
-       const struct dccp_hdr *dh = dccp_hdr(skb);
        __u64 seq_nr =  ntohs(dh->dccph_seq);
 
        if (dh->dccph_x != 0)
-               seq_nr = (seq_nr << 32) + ntohl(dccp_hdrx(skb)->dccph_seq_low);
+               seq_nr = (seq_nr << 32) + ntohl(dccp_hdrx(dh)->dccph_seq_low);
        else
                seq_nr += (u32)dh->dccph_seq2 << 16;