X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=net%2Fcore%2Fsock.c;h=91f8bbc93526523a2eaca207359951117712c577;hb=ad619800e4e034cad44299b2a22df9eebb043ac3;hp=174c64bc7a431b9f5905eab4ee70fe9e21bc4b6c;hpb=1338d466d9c3f8a65cc6d83c629cd906f2a989f8;p=linux-2.6 diff --git a/net/core/sock.c b/net/core/sock.c index 174c64bc7a..91f8bbc935 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -7,8 +7,6 @@ * handler for protocols to use and generic option handler. * * - * Version: $Id: sock.c,v 1.117 2002/02/01 22:01:03 davem Exp $ - * * Authors: Ross Biro * Fred N. van Kempen, * Florian La Roche, @@ -182,7 +180,7 @@ static const char *af_family_clock_key_strings[AF_MAX+1] = { "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" , "clock-21" , "clock-AF_SNA" , "clock-AF_IRDA" , "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" , - "clock-27" , "clock-28" , "clock-29" , + "clock-27" , "clock-28" , "clock-AF_CAN" , "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , "clock-AF_RXRPC" , "clock-AF_MAX" }; @@ -228,11 +226,12 @@ static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) static int warned __read_mostly; *timeo_p = 0; - if (warned < 10 && net_ratelimit()) + if (warned < 10 && net_ratelimit()) { warned++; printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) " "tries to set negative timeout\n", current->comm, task_pid_nr(current)); + } return 0; } *timeo_p = MAX_SCHEDULE_TIMEOUT; @@ -269,7 +268,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) int err = 0; int skb_len; - /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces + /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces number of warnings when compiling with -W --ANK */ if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= @@ -450,15 +449,6 @@ int sock_setsockopt(struct socket *sock, int level, int optname, * Options without arguments */ -#ifdef SO_DONTLINGER /* Compatibility item... */ - if (optname == SO_DONTLINGER) { - lock_sock(sk); - sock_reset_flag(sk, SOCK_LINGER); - release_sock(sk); - return 0; - } -#endif - if (optname == SO_BINDTODEVICE) return sock_bindtodevice(sk, optval, optlen); @@ -942,7 +932,6 @@ static void sk_prot_free(struct proto *prot, struct sock *sk) * @family: protocol family * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) * @prot: struct proto associated with this new sock instance - * @zero_it: if we should zero the newly allocated sock */ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, struct proto *prot) @@ -1001,6 +990,7 @@ void sk_release_kernel(struct sock *sk) sock_hold(sk); sock_release(sk->sk_socket); + release_net(sock_net(sk)); sock_net_set(sk, get_net(&init_net)); sock_put(sk); } @@ -1076,7 +1066,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority) * to be taken into account in all callers. -acme */ sk_refcnt_debug_inc(newsk); - newsk->sk_socket = NULL; + sk_set_socket(newsk, NULL); newsk->sk_sleep = NULL; if (newsk->sk_prot->sockets_allocated) @@ -1452,7 +1442,7 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind) /* Under pressure. */ if (allocated > prot->sysctl_mem[1]) if (prot->enter_memory_pressure) - prot->enter_memory_pressure(); + prot->enter_memory_pressure(sk); /* Over hard limit. */ if (allocated > prot->sysctl_mem[2]) @@ -1642,7 +1632,7 @@ static void sock_def_readable(struct sock *sk, int len) { read_lock(&sk->sk_callback_lock); if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) - wake_up_interruptible(sk->sk_sleep); + wake_up_interruptible_sync(sk->sk_sleep); sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); read_unlock(&sk->sk_callback_lock); } @@ -1656,7 +1646,7 @@ static void sock_def_write_space(struct sock *sk) */ if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) - wake_up_interruptible(sk->sk_sleep); + wake_up_interruptible_sync(sk->sk_sleep); /* Should agree with poll, otherwise some programs break */ if (sock_writeable(sk)) @@ -1712,7 +1702,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) sk->sk_rcvbuf = sysctl_rmem_default; sk->sk_sndbuf = sysctl_wmem_default; sk->sk_state = TCP_CLOSE; - sk->sk_socket = sock; + sk_set_socket(sk, sock); sock_set_flag(sk, SOCK_ZAPPED); @@ -1746,7 +1736,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; - sk->sk_stamp = ktime_set(-1L, -1L); + sk->sk_stamp = ktime_set(-1L, 0); atomic_set(&sk->sk_refcnt, 1); atomic_set(&sk->sk_drops, 0); @@ -1947,15 +1937,62 @@ struct prot_inuse { }; static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR); + +#ifdef CONFIG_NET_NS +void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) +{ + int cpu = smp_processor_id(); + per_cpu_ptr(net->core.inuse, cpu)->val[prot->inuse_idx] += val; +} +EXPORT_SYMBOL_GPL(sock_prot_inuse_add); + +int sock_prot_inuse_get(struct net *net, struct proto *prot) +{ + int cpu, idx = prot->inuse_idx; + int res = 0; + + for_each_possible_cpu(cpu) + res += per_cpu_ptr(net->core.inuse, cpu)->val[idx]; + + return res >= 0 ? res : 0; +} +EXPORT_SYMBOL_GPL(sock_prot_inuse_get); + +static int sock_inuse_init_net(struct net *net) +{ + net->core.inuse = alloc_percpu(struct prot_inuse); + return net->core.inuse ? 0 : -ENOMEM; +} + +static void sock_inuse_exit_net(struct net *net) +{ + free_percpu(net->core.inuse); +} + +static struct pernet_operations net_inuse_ops = { + .init = sock_inuse_init_net, + .exit = sock_inuse_exit_net, +}; + +static __init int net_inuse_init(void) +{ + if (register_pernet_subsys(&net_inuse_ops)) + panic("Cannot initialize net inuse counters"); + + return 0; +} + +core_initcall(net_inuse_init); +#else static DEFINE_PER_CPU(struct prot_inuse, prot_inuse); -void sock_prot_inuse_add(struct proto *prot, int val) +void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) { __get_cpu_var(prot_inuse).val[prot->inuse_idx] += val; } EXPORT_SYMBOL_GPL(sock_prot_inuse_add); -int sock_prot_inuse_get(struct proto *prot) +int sock_prot_inuse_get(struct net *net, struct proto *prot) { int cpu, idx = prot->inuse_idx; int res = 0; @@ -1966,6 +2003,7 @@ int sock_prot_inuse_get(struct proto *prot) return res >= 0 ? res : 0; } EXPORT_SYMBOL_GPL(sock_prot_inuse_get); +#endif static void assign_proto_idx(struct proto *prot) { @@ -1999,11 +2037,6 @@ int proto_register(struct proto *prot, int alloc_slab) char *request_sock_slab_name = NULL; char *timewait_sock_slab_name; - if (sock_prot_inuse_init(prot) != 0) { - printk(KERN_CRIT "%s: Can't alloc inuse counters!\n", prot->name); - goto out; - } - if (alloc_slab) { prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0, SLAB_HWCACHE_ALIGN, NULL); @@ -2011,7 +2044,7 @@ int proto_register(struct proto *prot, int alloc_slab) if (prot->slab == NULL) { printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n", prot->name); - goto out_free_inuse; + goto out; } if (prot->rsk_prot != NULL) { @@ -2070,8 +2103,6 @@ out_free_request_sock_slab_name: out_free_sock_slab: kmem_cache_destroy(prot->slab); prot->slab = NULL; -out_free_inuse: - sock_prot_inuse_free(prot); out: return -ENOBUFS; } @@ -2085,8 +2116,6 @@ void proto_unregister(struct proto *prot) list_del(&prot->node); write_unlock(&proto_list_lock); - sock_prot_inuse_free(prot); - if (prot->slab != NULL) { kmem_cache_destroy(prot->slab); prot->slab = NULL;