]> err.no Git - linux-2.6/blobdiff - include/net/sock.h
[IPSEC]: Move flow construction into xfrm_dst_lookup
[linux-2.6] / include / net / sock.h
index 67e35c7e230c42a08bb718e6508d3826dbbe6cdd..f5b6437141315cd745cbe8ddabdc1b64390936cd 100644 (file)
@@ -145,7 +145,8 @@ struct sock_common {
   *    @sk_forward_alloc: space allocated forward
   *    @sk_allocation: allocation mode
   *    @sk_sndbuf: size of send buffer in bytes
-  *    @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, %SO_OOBINLINE settings
+  *    @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
+  *               %SO_OOBINLINE settings
   *    @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
   *    @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
   *    @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
@@ -153,9 +154,12 @@ struct sock_common {
   *    @sk_backlog: always used with the per-socket spinlock held
   *    @sk_callback_lock: used with the callbacks in the end of this struct
   *    @sk_error_queue: rarely used
-  *    @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt, IPV6_ADDRFORM for instance)
+  *    @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt,
+  *                      IPV6_ADDRFORM for instance)
   *    @sk_err: last error
-  *    @sk_err_soft: errors that don't cause failure but are the cause of a persistent failure not just 'timed out'
+  *    @sk_err_soft: errors that don't cause failure but are the cause of a
+  *                  persistent failure not just 'timed out'
+  *    @sk_drops: raw drops counter
   *    @sk_ack_backlog: current listen backlog
   *    @sk_max_ack_backlog: listen backlog set in listen()
   *    @sk_priority: %SO_PRIORITY setting
@@ -239,6 +243,7 @@ struct sock {
        rwlock_t                sk_callback_lock;
        int                     sk_err,
                                sk_err_soft;
+       atomic_t                sk_drops;
        unsigned short          sk_ack_backlog;
        unsigned short          sk_max_ack_backlog;
        __u32                   sk_priority;
@@ -944,7 +949,7 @@ static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
                return err;
        
        rcu_read_lock_bh();
-       filter = sk->sk_filter;
+       filter = rcu_dereference(sk->sk_filter);
        if (filter) {
                unsigned int pkt_len = sk_run_filter(skb, filter->insns,
                                filter->len);