]> err.no Git - linux-2.6/blobdiff - net/core/sock.c
[PATCH] 8139cp: fix eeprom read command length
[linux-2.6] / net / core / sock.c
index a96ea7dd0fc1b4f6bf2bd092b44218c92c384244..5d820c3766530a991056050d23be7013107ae20c 100644 (file)
@@ -385,7 +385,21 @@ set_sndbuf:
                                val = sysctl_rmem_max;
 set_rcvbuf:
                        sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
-                       /* FIXME: is this lower bound the right one? */
+                       /*
+                        * We double it on the way in to account for
+                        * "struct sk_buff" etc. overhead.   Applications
+                        * assume that the SO_RCVBUF setting they make will
+                        * allow that much actual data to be received on that
+                        * socket.
+                        *
+                        * Applications are unaware that "struct sk_buff" and
+                        * other overheads allocate from the receive buffer
+                        * during socket buffer allocation.
+                        *
+                        * And after considering the possible alternatives,
+                        * returning the value we actually used in getsockopt
+                        * is the most desirable behavior.
+                        */
                        if ((val * 2) < SOCK_MIN_RCVBUF)
                                sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
                        else
@@ -818,6 +832,9 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
                atomic_set(&newsk->sk_omem_alloc, 0);
                skb_queue_head_init(&newsk->sk_receive_queue);
                skb_queue_head_init(&newsk->sk_write_queue);
+#ifdef CONFIG_NET_DMA
+               skb_queue_head_init(&newsk->sk_async_wait_queue);
+#endif
 
                rwlock_init(&newsk->sk_dst_lock);
                rwlock_init(&newsk->sk_callback_lock);
@@ -1369,6 +1386,9 @@ void sock_init_data(struct socket *sock, struct sock *sk)
        skb_queue_head_init(&sk->sk_receive_queue);
        skb_queue_head_init(&sk->sk_write_queue);
        skb_queue_head_init(&sk->sk_error_queue);
+#ifdef CONFIG_NET_DMA
+       skb_queue_head_init(&sk->sk_async_wait_queue);
+#endif
 
        sk->sk_send_head        =       NULL;