2 * NETLINK Kernel-user communication protocol.
4 * Authors: Alan Cox <alan@redhat.com>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13 * added netlink_proto_exit
14 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
15 * use nlk_sk, as sk->protinfo is on a diet 8)
16 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
17 * - inc module use count of module that owns
18 * the kernel socket in case userspace opens
19 * socket of same protocol
20 * - remove all module support, since netlink is
21 * mandatory if CONFIG_NET=y these days
24 #include <linux/config.h>
25 #include <linux/module.h>
27 #include <linux/kernel.h>
28 #include <linux/init.h>
29 #include <linux/signal.h>
30 #include <linux/sched.h>
31 #include <linux/errno.h>
32 #include <linux/string.h>
33 #include <linux/stat.h>
34 #include <linux/socket.h>
36 #include <linux/fcntl.h>
37 #include <linux/termios.h>
38 #include <linux/sockios.h>
39 #include <linux/net.h>
41 #include <linux/slab.h>
42 #include <asm/uaccess.h>
43 #include <linux/skbuff.h>
44 #include <linux/netdevice.h>
45 #include <linux/rtnetlink.h>
46 #include <linux/proc_fs.h>
47 #include <linux/seq_file.h>
48 #include <linux/smp_lock.h>
49 #include <linux/notifier.h>
50 #include <linux/security.h>
51 #include <linux/jhash.h>
52 #include <linux/jiffies.h>
53 #include <linux/random.h>
54 #include <linux/bitops.h>
56 #include <linux/types.h>
57 #include <linux/audit.h>
65 /* struct sock has to be the first member of netlink_sock */
70 unsigned int dst_groups;
72 wait_queue_head_t wait;
73 struct netlink_callback *cb;
75 void (*data_ready)(struct sock *sk, int bytes);
76 struct module *module;
80 #define NETLINK_KERNEL_SOCKET 0x1
82 static inline struct netlink_sock *nlk_sk(struct sock *sk)
84 return (struct netlink_sock *)sk;
88 struct hlist_head *table;
89 unsigned long rehash_time;
95 unsigned int max_shift;
100 struct netlink_table {
101 struct nl_pid_hash hash;
102 struct hlist_head mc_list;
103 unsigned int nl_nonroot;
104 struct module *module;
107 static struct netlink_table *nl_table;
109 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
111 static int netlink_dump(struct sock *sk);
112 static void netlink_destroy_callback(struct netlink_callback *cb);
114 static DEFINE_RWLOCK(nl_table_lock);
115 static atomic_t nl_table_users = ATOMIC_INIT(0);
117 static struct notifier_block *netlink_chain;
119 static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
121 return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
124 static void netlink_sock_destruct(struct sock *sk)
126 skb_queue_purge(&sk->sk_receive_queue);
128 if (!sock_flag(sk, SOCK_DEAD)) {
129 printk("Freeing alive netlink socket %p\n", sk);
132 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
133 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
134 BUG_TRAP(!nlk_sk(sk)->cb);
137 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on SMP.
138 * Look, when several writers sleep and reader wakes them up, all but one
139 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
140 * this, _but_ remember, it adds useless work on UP machines.
143 static void netlink_table_grab(void)
145 write_lock_bh(&nl_table_lock);
147 if (atomic_read(&nl_table_users)) {
148 DECLARE_WAITQUEUE(wait, current);
150 add_wait_queue_exclusive(&nl_table_wait, &wait);
152 set_current_state(TASK_UNINTERRUPTIBLE);
153 if (atomic_read(&nl_table_users) == 0)
155 write_unlock_bh(&nl_table_lock);
157 write_lock_bh(&nl_table_lock);
160 __set_current_state(TASK_RUNNING);
161 remove_wait_queue(&nl_table_wait, &wait);
165 static __inline__ void netlink_table_ungrab(void)
167 write_unlock_bh(&nl_table_lock);
168 wake_up(&nl_table_wait);
171 static __inline__ void
172 netlink_lock_table(void)
174 /* read_lock() synchronizes us to netlink_table_grab */
176 read_lock(&nl_table_lock);
177 atomic_inc(&nl_table_users);
178 read_unlock(&nl_table_lock);
181 static __inline__ void
182 netlink_unlock_table(void)
184 if (atomic_dec_and_test(&nl_table_users))
185 wake_up(&nl_table_wait);
188 static __inline__ struct sock *netlink_lookup(int protocol, u32 pid)
190 struct nl_pid_hash *hash = &nl_table[protocol].hash;
191 struct hlist_head *head;
193 struct hlist_node *node;
195 read_lock(&nl_table_lock);
196 head = nl_pid_hashfn(hash, pid);
197 sk_for_each(sk, node, head) {
198 if (nlk_sk(sk)->pid == pid) {
205 read_unlock(&nl_table_lock);
209 static inline struct hlist_head *nl_pid_hash_alloc(size_t size)
211 if (size <= PAGE_SIZE)
212 return kmalloc(size, GFP_ATOMIC);
214 return (struct hlist_head *)
215 __get_free_pages(GFP_ATOMIC, get_order(size));
218 static inline void nl_pid_hash_free(struct hlist_head *table, size_t size)
220 if (size <= PAGE_SIZE)
223 free_pages((unsigned long)table, get_order(size));
226 static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow)
228 unsigned int omask, mask, shift;
230 struct hlist_head *otable, *table;
233 omask = mask = hash->mask;
234 osize = size = (mask + 1) * sizeof(*table);
238 if (++shift > hash->max_shift)
244 table = nl_pid_hash_alloc(size);
248 memset(table, 0, size);
249 otable = hash->table;
253 get_random_bytes(&hash->rnd, sizeof(hash->rnd));
255 for (i = 0; i <= omask; i++) {
257 struct hlist_node *node, *tmp;
259 sk_for_each_safe(sk, node, tmp, &otable[i])
260 __sk_add_node(sk, nl_pid_hashfn(hash, nlk_sk(sk)->pid));
263 nl_pid_hash_free(otable, osize);
264 hash->rehash_time = jiffies + 10 * 60 * HZ;
268 static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len)
270 int avg = hash->entries >> hash->shift;
272 if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1))
275 if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
276 nl_pid_hash_rehash(hash, 0);
283 static struct proto_ops netlink_ops;
285 static int netlink_insert(struct sock *sk, u32 pid)
287 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
288 struct hlist_head *head;
289 int err = -EADDRINUSE;
291 struct hlist_node *node;
294 netlink_table_grab();
295 head = nl_pid_hashfn(hash, pid);
297 sk_for_each(osk, node, head) {
298 if (nlk_sk(osk)->pid == pid)
310 if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
313 if (len && nl_pid_hash_dilute(hash, len))
314 head = nl_pid_hashfn(hash, pid);
316 nlk_sk(sk)->pid = pid;
317 sk_add_node(sk, head);
321 netlink_table_ungrab();
325 static void netlink_remove(struct sock *sk)
327 netlink_table_grab();
328 if (sk_del_node_init(sk))
329 nl_table[sk->sk_protocol].hash.entries--;
330 if (nlk_sk(sk)->groups)
331 __sk_del_bind_node(sk);
332 netlink_table_ungrab();
335 static struct proto netlink_proto = {
337 .owner = THIS_MODULE,
338 .obj_size = sizeof(struct netlink_sock),
341 static int netlink_create(struct socket *sock, int protocol)
344 struct netlink_sock *nlk;
345 struct module *module;
347 sock->state = SS_UNCONNECTED;
349 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
350 return -ESOCKTNOSUPPORT;
352 if (protocol<0 || protocol >= MAX_LINKS)
353 return -EPROTONOSUPPORT;
355 netlink_lock_table();
356 if (!nl_table[protocol].hash.entries) {
358 /* We do 'best effort'. If we find a matching module,
359 * it is loaded. If not, we don't return an error to
360 * allow pure userspace<->userspace communication. -HW
362 netlink_unlock_table();
363 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
364 netlink_lock_table();
367 module = nl_table[protocol].module;
368 if (!try_module_get(module))
370 netlink_unlock_table();
372 sock->ops = &netlink_ops;
374 sk = sk_alloc(PF_NETLINK, GFP_KERNEL, &netlink_proto, 1);
380 sock_init_data(sock, sk);
384 nlk->module = module;
385 spin_lock_init(&nlk->cb_lock);
386 init_waitqueue_head(&nlk->wait);
387 sk->sk_destruct = netlink_sock_destruct;
389 sk->sk_protocol = protocol;
393 static int netlink_release(struct socket *sock)
395 struct sock *sk = sock->sk;
396 struct netlink_sock *nlk;
404 spin_lock(&nlk->cb_lock);
406 nlk->cb->done(nlk->cb);
407 netlink_destroy_callback(nlk->cb);
410 spin_unlock(&nlk->cb_lock);
412 /* OK. Socket is unlinked, and, therefore,
413 no new packets will arrive */
417 wake_up_interruptible_all(&nlk->wait);
419 skb_queue_purge(&sk->sk_write_queue);
421 if (nlk->pid && !nlk->groups) {
422 struct netlink_notify n = {
423 .protocol = sk->sk_protocol,
426 notifier_call_chain(&netlink_chain, NETLINK_URELEASE, &n);
430 module_put(nlk->module);
432 if (nlk->flags & NETLINK_KERNEL_SOCKET) {
433 netlink_table_grab();
434 nl_table[sk->sk_protocol].module = NULL;
435 netlink_table_ungrab();
442 static int netlink_autobind(struct socket *sock)
444 struct sock *sk = sock->sk;
445 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
446 struct hlist_head *head;
448 struct hlist_node *node;
449 s32 pid = current->pid;
451 static s32 rover = -4097;
455 netlink_table_grab();
456 head = nl_pid_hashfn(hash, pid);
457 sk_for_each(osk, node, head) {
458 if (nlk_sk(osk)->pid == pid) {
459 /* Bind collision, search negative pid values. */
463 netlink_table_ungrab();
467 netlink_table_ungrab();
469 err = netlink_insert(sk, pid);
470 if (err == -EADDRINUSE)
473 /* If 2 threads race to autobind, that is fine. */
480 static inline int netlink_capable(struct socket *sock, unsigned int flag)
482 return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) ||
483 capable(CAP_NET_ADMIN);
486 static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
488 struct sock *sk = sock->sk;
489 struct netlink_sock *nlk = nlk_sk(sk);
490 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
493 if (nladdr->nl_family != AF_NETLINK)
496 /* Only superuser is allowed to listen multicasts */
497 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_RECV))
501 if (nladdr->nl_pid != nlk->pid)
504 err = nladdr->nl_pid ?
505 netlink_insert(sk, nladdr->nl_pid) :
506 netlink_autobind(sock);
511 if (!nladdr->nl_groups && !nlk->groups)
514 netlink_table_grab();
515 if (nlk->groups && !nladdr->nl_groups)
516 __sk_del_bind_node(sk);
517 else if (!nlk->groups && nladdr->nl_groups)
518 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
519 nlk->groups = nladdr->nl_groups;
520 netlink_table_ungrab();
525 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
529 struct sock *sk = sock->sk;
530 struct netlink_sock *nlk = nlk_sk(sk);
531 struct sockaddr_nl *nladdr=(struct sockaddr_nl*)addr;
533 if (addr->sa_family == AF_UNSPEC) {
534 sk->sk_state = NETLINK_UNCONNECTED;
539 if (addr->sa_family != AF_NETLINK)
542 /* Only superuser is allowed to send multicasts */
543 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND))
547 err = netlink_autobind(sock);
550 sk->sk_state = NETLINK_CONNECTED;
551 nlk->dst_pid = nladdr->nl_pid;
552 nlk->dst_groups = nladdr->nl_groups;
558 static int netlink_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
560 struct sock *sk = sock->sk;
561 struct netlink_sock *nlk = nlk_sk(sk);
562 struct sockaddr_nl *nladdr=(struct sockaddr_nl *)addr;
564 nladdr->nl_family = AF_NETLINK;
566 *addr_len = sizeof(*nladdr);
569 nladdr->nl_pid = nlk->dst_pid;
570 nladdr->nl_groups = nlk->dst_groups;
572 nladdr->nl_pid = nlk->pid;
573 nladdr->nl_groups = nlk->groups;
578 static void netlink_overrun(struct sock *sk)
580 if (!test_and_set_bit(0, &nlk_sk(sk)->state)) {
581 sk->sk_err = ENOBUFS;
582 sk->sk_error_report(sk);
586 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
588 int protocol = ssk->sk_protocol;
590 struct netlink_sock *nlk;
592 sock = netlink_lookup(protocol, pid);
594 return ERR_PTR(-ECONNREFUSED);
596 /* Don't bother queuing skb if kernel socket has no input function */
598 if ((nlk->pid == 0 && !nlk->data_ready) ||
599 (sock->sk_state == NETLINK_CONNECTED &&
600 nlk->dst_pid != nlk_sk(ssk)->pid)) {
602 return ERR_PTR(-ECONNREFUSED);
607 struct sock *netlink_getsockbyfilp(struct file *filp)
609 struct inode *inode = filp->f_dentry->d_inode;
612 if (!S_ISSOCK(inode->i_mode))
613 return ERR_PTR(-ENOTSOCK);
615 sock = SOCKET_I(inode)->sk;
616 if (sock->sk_family != AF_NETLINK)
617 return ERR_PTR(-EINVAL);
624 * Attach a skb to a netlink socket.
625 * The caller must hold a reference to the destination socket. On error, the
626 * reference is dropped. The skb is not send to the destination, just all
627 * all error checks are performed and memory in the queue is reserved.
629 * < 0: error. skb freed, reference to sock dropped.
631 * 1: repeat lookup - reference dropped while waiting for socket memory.
633 int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long timeo)
635 struct netlink_sock *nlk;
639 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
640 test_bit(0, &nlk->state)) {
641 DECLARE_WAITQUEUE(wait, current);
650 __set_current_state(TASK_INTERRUPTIBLE);
651 add_wait_queue(&nlk->wait, &wait);
653 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
654 test_bit(0, &nlk->state)) &&
655 !sock_flag(sk, SOCK_DEAD))
656 timeo = schedule_timeout(timeo);
658 __set_current_state(TASK_RUNNING);
659 remove_wait_queue(&nlk->wait, &wait);
662 if (signal_pending(current)) {
664 return sock_intr_errno(timeo);
668 skb_set_owner_r(skb, sk);
672 int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol)
674 struct netlink_sock *nlk;
679 skb_queue_tail(&sk->sk_receive_queue, skb);
680 sk->sk_data_ready(sk, len);
685 void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
691 static inline struct sk_buff *netlink_trim(struct sk_buff *skb,
692 unsigned int __nocast allocation)
698 delta = skb->end - skb->tail;
699 if (delta * 2 < skb->truesize)
702 if (skb_shared(skb)) {
703 struct sk_buff *nskb = skb_clone(skb, allocation);
710 if (!pskb_expand_head(skb, 0, -delta, allocation))
711 skb->truesize -= delta;
716 int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock)
722 skb = netlink_trim(skb, gfp_any());
724 timeo = sock_sndtimeo(ssk, nonblock);
726 sk = netlink_getsockbypid(ssk, pid);
731 err = netlink_attachskb(sk, skb, nonblock, timeo);
737 return netlink_sendskb(sk, skb, ssk->sk_protocol);
740 static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
742 struct netlink_sock *nlk = nlk_sk(sk);
744 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
745 !test_bit(0, &nlk->state)) {
746 skb_set_owner_r(skb, sk);
747 skb_queue_tail(&sk->sk_receive_queue, skb);
748 sk->sk_data_ready(sk, skb->len);
749 return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf;
754 struct netlink_broadcast_data {
755 struct sock *exclude_sk;
761 unsigned int allocation;
762 struct sk_buff *skb, *skb2;
765 static inline int do_one_broadcast(struct sock *sk,
766 struct netlink_broadcast_data *p)
768 struct netlink_sock *nlk = nlk_sk(sk);
771 if (p->exclude_sk == sk)
774 if (nlk->pid == p->pid || !(nlk->groups & p->group))
783 if (p->skb2 == NULL) {
784 if (skb_shared(p->skb)) {
785 p->skb2 = skb_clone(p->skb, p->allocation);
787 p->skb2 = skb_get(p->skb);
789 * skb ownership may have been set when
790 * delivered to a previous socket.
795 if (p->skb2 == NULL) {
797 /* Clone failed. Notify ALL listeners. */
799 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
812 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
813 u32 group, int allocation)
815 struct netlink_broadcast_data info;
816 struct hlist_node *node;
819 skb = netlink_trim(skb, allocation);
821 info.exclude_sk = ssk;
827 info.allocation = allocation;
831 /* While we sleep in clone, do not allow to change socket list */
833 netlink_lock_table();
835 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
836 do_one_broadcast(sk, &info);
840 netlink_unlock_table();
843 kfree_skb(info.skb2);
845 if (info.delivered) {
846 if (info.congested && (allocation & __GFP_WAIT))
855 struct netlink_set_err_data {
856 struct sock *exclude_sk;
862 static inline int do_one_set_err(struct sock *sk,
863 struct netlink_set_err_data *p)
865 struct netlink_sock *nlk = nlk_sk(sk);
867 if (sk == p->exclude_sk)
870 if (nlk->pid == p->pid || !(nlk->groups & p->group))
873 sk->sk_err = p->code;
874 sk->sk_error_report(sk);
879 void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
881 struct netlink_set_err_data info;
882 struct hlist_node *node;
885 info.exclude_sk = ssk;
890 read_lock(&nl_table_lock);
892 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
893 do_one_set_err(sk, &info);
895 read_unlock(&nl_table_lock);
898 static inline void netlink_rcv_wake(struct sock *sk)
900 struct netlink_sock *nlk = nlk_sk(sk);
902 if (skb_queue_empty(&sk->sk_receive_queue))
903 clear_bit(0, &nlk->state);
904 if (!test_bit(0, &nlk->state))
905 wake_up_interruptible(&nlk->wait);
908 static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
909 struct msghdr *msg, size_t len)
911 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
912 struct sock *sk = sock->sk;
913 struct netlink_sock *nlk = nlk_sk(sk);
914 struct sockaddr_nl *addr=msg->msg_name;
919 struct scm_cookie scm;
921 if (msg->msg_flags&MSG_OOB)
924 if (NULL == siocb->scm)
926 err = scm_send(sock, msg, siocb->scm);
930 if (msg->msg_namelen) {
931 if (addr->nl_family != AF_NETLINK)
933 dst_pid = addr->nl_pid;
934 dst_groups = addr->nl_groups;
935 if (dst_groups && !netlink_capable(sock, NL_NONROOT_SEND))
938 dst_pid = nlk->dst_pid;
939 dst_groups = nlk->dst_groups;
943 err = netlink_autobind(sock);
949 if (len > sk->sk_sndbuf - 32)
952 skb = alloc_skb(len, GFP_KERNEL);
956 NETLINK_CB(skb).pid = nlk->pid;
957 NETLINK_CB(skb).dst_pid = dst_pid;
958 NETLINK_CB(skb).dst_groups = dst_groups;
959 NETLINK_CB(skb).loginuid = audit_get_loginuid(current->audit_context);
960 memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
962 /* What can I do? Netlink is asynchronous, so that
963 we will have to save current capabilities to
964 check them, when this message will be delivered
965 to corresponding kernel module. --ANK (980802)
969 if (memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len)) {
974 err = security_netlink_send(sk, skb);
981 atomic_inc(&skb->users);
982 netlink_broadcast(sk, skb, dst_pid, dst_groups, GFP_KERNEL);
984 err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
990 static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
991 struct msghdr *msg, size_t len,
994 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
995 struct scm_cookie scm;
996 struct sock *sk = sock->sk;
997 struct netlink_sock *nlk = nlk_sk(sk);
998 int noblock = flags&MSG_DONTWAIT;
1000 struct sk_buff *skb;
1008 skb = skb_recv_datagram(sk,flags,noblock,&err);
1012 msg->msg_namelen = 0;
1016 msg->msg_flags |= MSG_TRUNC;
1020 skb->h.raw = skb->data;
1021 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1023 if (msg->msg_name) {
1024 struct sockaddr_nl *addr = (struct sockaddr_nl*)msg->msg_name;
1025 addr->nl_family = AF_NETLINK;
1027 addr->nl_pid = NETLINK_CB(skb).pid;
1028 addr->nl_groups = NETLINK_CB(skb).dst_groups;
1029 msg->msg_namelen = sizeof(*addr);
1032 if (NULL == siocb->scm) {
1033 memset(&scm, 0, sizeof(scm));
1036 siocb->scm->creds = *NETLINK_CREDS(skb);
1037 skb_free_datagram(sk, skb);
1039 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2)
1042 scm_recv(sock, msg, siocb->scm, flags);
1045 netlink_rcv_wake(sk);
1046 return err ? : copied;
1049 static void netlink_data_ready(struct sock *sk, int len)
1051 struct netlink_sock *nlk = nlk_sk(sk);
1053 if (nlk->data_ready)
1054 nlk->data_ready(sk, len);
1055 netlink_rcv_wake(sk);
1059 * We export these functions to other modules. They provide a
1060 * complete set of kernel non-blocking support for message
1065 netlink_kernel_create(int unit, void (*input)(struct sock *sk, int len), struct module *module)
1067 struct socket *sock;
1069 struct netlink_sock *nlk;
1074 if (unit<0 || unit>=MAX_LINKS)
1077 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
1080 if (netlink_create(sock, unit) < 0)
1081 goto out_sock_release;
1084 sk->sk_data_ready = netlink_data_ready;
1086 nlk_sk(sk)->data_ready = input;
1088 if (netlink_insert(sk, 0))
1089 goto out_sock_release;
1092 nlk->flags |= NETLINK_KERNEL_SOCKET;
1094 netlink_table_grab();
1095 nl_table[unit].module = module;
1096 netlink_table_ungrab();
1105 void netlink_set_nonroot(int protocol, unsigned int flags)
1107 if ((unsigned int)protocol < MAX_LINKS)
1108 nl_table[protocol].nl_nonroot = flags;
1111 static void netlink_destroy_callback(struct netlink_callback *cb)
1119 * It looks a bit ugly.
1120 * It would be better to create kernel thread.
1123 static int netlink_dump(struct sock *sk)
1125 struct netlink_sock *nlk = nlk_sk(sk);
1126 struct netlink_callback *cb;
1127 struct sk_buff *skb;
1128 struct nlmsghdr *nlh;
1131 skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL);
1135 spin_lock(&nlk->cb_lock);
1139 spin_unlock(&nlk->cb_lock);
1144 len = cb->dump(skb, cb);
1147 spin_unlock(&nlk->cb_lock);
1148 skb_queue_tail(&sk->sk_receive_queue, skb);
1149 sk->sk_data_ready(sk, len);
1153 nlh = NLMSG_NEW_ANSWER(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
1154 memcpy(NLMSG_DATA(nlh), &len, sizeof(len));
1155 skb_queue_tail(&sk->sk_receive_queue, skb);
1156 sk->sk_data_ready(sk, skb->len);
1160 spin_unlock(&nlk->cb_lock);
1162 netlink_destroy_callback(cb);
1169 int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1170 struct nlmsghdr *nlh,
1171 int (*dump)(struct sk_buff *skb, struct netlink_callback*),
1172 int (*done)(struct netlink_callback*))
1174 struct netlink_callback *cb;
1176 struct netlink_sock *nlk;
1178 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
1182 memset(cb, 0, sizeof(*cb));
1186 atomic_inc(&skb->users);
1189 sk = netlink_lookup(ssk->sk_protocol, NETLINK_CB(skb).pid);
1191 netlink_destroy_callback(cb);
1192 return -ECONNREFUSED;
1195 /* A dump is in progress... */
1196 spin_lock(&nlk->cb_lock);
1198 spin_unlock(&nlk->cb_lock);
1199 netlink_destroy_callback(cb);
1204 spin_unlock(&nlk->cb_lock);
1211 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1213 struct sk_buff *skb;
1214 struct nlmsghdr *rep;
1215 struct nlmsgerr *errmsg;
1219 size = NLMSG_SPACE(sizeof(struct nlmsgerr));
1221 size = NLMSG_SPACE(4 + NLMSG_ALIGN(nlh->nlmsg_len));
1223 skb = alloc_skb(size, GFP_KERNEL);
1227 sk = netlink_lookup(in_skb->sk->sk_protocol,
1228 NETLINK_CB(in_skb).pid);
1230 sk->sk_err = ENOBUFS;
1231 sk->sk_error_report(sk);
1237 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1238 NLMSG_ERROR, sizeof(struct nlmsgerr), 0);
1239 errmsg = NLMSG_DATA(rep);
1240 errmsg->error = err;
1241 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(struct nlmsghdr));
1242 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
1246 #ifdef CONFIG_PROC_FS
1247 struct nl_seq_iter {
1252 static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
1254 struct nl_seq_iter *iter = seq->private;
1257 struct hlist_node *node;
1260 for (i=0; i<MAX_LINKS; i++) {
1261 struct nl_pid_hash *hash = &nl_table[i].hash;
1263 for (j = 0; j <= hash->mask; j++) {
1264 sk_for_each(s, node, &hash->table[j]) {
1277 static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
1279 read_lock(&nl_table_lock);
1280 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1283 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1286 struct nl_seq_iter *iter;
1291 if (v == SEQ_START_TOKEN)
1292 return netlink_seq_socket_idx(seq, 0);
1298 iter = seq->private;
1300 j = iter->hash_idx + 1;
1303 struct nl_pid_hash *hash = &nl_table[i].hash;
1305 for (; j <= hash->mask; j++) {
1306 s = sk_head(&hash->table[j]);
1315 } while (++i < MAX_LINKS);
1320 static void netlink_seq_stop(struct seq_file *seq, void *v)
1322 read_unlock(&nl_table_lock);
1326 static int netlink_seq_show(struct seq_file *seq, void *v)
1328 if (v == SEQ_START_TOKEN)
1330 "sk Eth Pid Groups "
1331 "Rmem Wmem Dump Locks\n");
1334 struct netlink_sock *nlk = nlk_sk(s);
1336 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %d\n",
1341 atomic_read(&s->sk_rmem_alloc),
1342 atomic_read(&s->sk_wmem_alloc),
1344 atomic_read(&s->sk_refcnt)
1351 static struct seq_operations netlink_seq_ops = {
1352 .start = netlink_seq_start,
1353 .next = netlink_seq_next,
1354 .stop = netlink_seq_stop,
1355 .show = netlink_seq_show,
1359 static int netlink_seq_open(struct inode *inode, struct file *file)
1361 struct seq_file *seq;
1362 struct nl_seq_iter *iter;
1365 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
1369 err = seq_open(file, &netlink_seq_ops);
1375 memset(iter, 0, sizeof(*iter));
1376 seq = file->private_data;
1377 seq->private = iter;
1381 static struct file_operations netlink_seq_fops = {
1382 .owner = THIS_MODULE,
1383 .open = netlink_seq_open,
1385 .llseek = seq_lseek,
1386 .release = seq_release_private,
1391 int netlink_register_notifier(struct notifier_block *nb)
1393 return notifier_chain_register(&netlink_chain, nb);
1396 int netlink_unregister_notifier(struct notifier_block *nb)
1398 return notifier_chain_unregister(&netlink_chain, nb);
1401 static struct proto_ops netlink_ops = {
1402 .family = PF_NETLINK,
1403 .owner = THIS_MODULE,
1404 .release = netlink_release,
1405 .bind = netlink_bind,
1406 .connect = netlink_connect,
1407 .socketpair = sock_no_socketpair,
1408 .accept = sock_no_accept,
1409 .getname = netlink_getname,
1410 .poll = datagram_poll,
1411 .ioctl = sock_no_ioctl,
1412 .listen = sock_no_listen,
1413 .shutdown = sock_no_shutdown,
1414 .setsockopt = sock_no_setsockopt,
1415 .getsockopt = sock_no_getsockopt,
1416 .sendmsg = netlink_sendmsg,
1417 .recvmsg = netlink_recvmsg,
1418 .mmap = sock_no_mmap,
1419 .sendpage = sock_no_sendpage,
1422 static struct net_proto_family netlink_family_ops = {
1423 .family = PF_NETLINK,
1424 .create = netlink_create,
1425 .owner = THIS_MODULE, /* for consistency 8) */
1428 extern void netlink_skb_parms_too_large(void);
1430 static int __init netlink_proto_init(void)
1432 struct sk_buff *dummy_skb;
1436 int err = proto_register(&netlink_proto, 0);
1441 if (sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb))
1442 netlink_skb_parms_too_large();
1444 nl_table = kmalloc(sizeof(*nl_table) * MAX_LINKS, GFP_KERNEL);
1447 printk(KERN_CRIT "netlink_init: Cannot allocate nl_table\n");
1451 memset(nl_table, 0, sizeof(*nl_table) * MAX_LINKS);
1453 if (num_physpages >= (128 * 1024))
1454 max = num_physpages >> (21 - PAGE_SHIFT);
1456 max = num_physpages >> (23 - PAGE_SHIFT);
1458 order = get_bitmask_order(max) - 1 + PAGE_SHIFT;
1459 max = (1UL << order) / sizeof(struct hlist_head);
1460 order = get_bitmask_order(max > UINT_MAX ? UINT_MAX : max) - 1;
1462 for (i = 0; i < MAX_LINKS; i++) {
1463 struct nl_pid_hash *hash = &nl_table[i].hash;
1465 hash->table = nl_pid_hash_alloc(1 * sizeof(*hash->table));
1468 nl_pid_hash_free(nl_table[i].hash.table,
1469 1 * sizeof(*hash->table));
1473 memset(hash->table, 0, 1 * sizeof(*hash->table));
1474 hash->max_shift = order;
1477 hash->rehash_time = jiffies;
1480 sock_register(&netlink_family_ops);
1481 #ifdef CONFIG_PROC_FS
1482 proc_net_fops_create("netlink", 0, &netlink_seq_fops);
1484 /* The netlink device handler may be needed early. */
1490 core_initcall(netlink_proto_init);
1492 EXPORT_SYMBOL(netlink_ack);
1493 EXPORT_SYMBOL(netlink_broadcast);
1494 EXPORT_SYMBOL(netlink_dump_start);
1495 EXPORT_SYMBOL(netlink_kernel_create);
1496 EXPORT_SYMBOL(netlink_register_notifier);
1497 EXPORT_SYMBOL(netlink_set_err);
1498 EXPORT_SYMBOL(netlink_set_nonroot);
1499 EXPORT_SYMBOL(netlink_unicast);
1500 EXPORT_SYMBOL(netlink_unregister_notifier);