2 * NETLINK Kernel-user communication protocol.
4 * Authors: Alan Cox <alan@redhat.com>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13 * added netlink_proto_exit
14 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
15 * use nlk_sk, as sk->protinfo is on a diet 8)
16 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
17 * - inc module use count of module that owns
18 * the kernel socket in case userspace opens
19 * socket of same protocol
20 * - remove all module support, since netlink is
21 * mandatory if CONFIG_NET=y these days
24 #include <linux/config.h>
25 #include <linux/module.h>
27 #include <linux/kernel.h>
28 #include <linux/init.h>
29 #include <linux/signal.h>
30 #include <linux/sched.h>
31 #include <linux/errno.h>
32 #include <linux/string.h>
33 #include <linux/stat.h>
34 #include <linux/socket.h>
36 #include <linux/fcntl.h>
37 #include <linux/termios.h>
38 #include <linux/sockios.h>
39 #include <linux/net.h>
41 #include <linux/slab.h>
42 #include <asm/uaccess.h>
43 #include <linux/skbuff.h>
44 #include <linux/netdevice.h>
45 #include <linux/rtnetlink.h>
46 #include <linux/proc_fs.h>
47 #include <linux/seq_file.h>
48 #include <linux/smp_lock.h>
49 #include <linux/notifier.h>
50 #include <linux/security.h>
51 #include <linux/jhash.h>
52 #include <linux/jiffies.h>
53 #include <linux/random.h>
54 #include <linux/bitops.h>
56 #include <linux/types.h>
57 #include <linux/audit.h>
65 /* struct sock has to be the first member of netlink_sock */
70 unsigned int dst_groups;
72 wait_queue_head_t wait;
73 struct netlink_callback *cb;
75 void (*data_ready)(struct sock *sk, int bytes);
78 static inline struct netlink_sock *nlk_sk(struct sock *sk)
80 return (struct netlink_sock *)sk;
84 struct hlist_head *table;
85 unsigned long rehash_time;
91 unsigned int max_shift;
96 struct netlink_table {
97 struct nl_pid_hash hash;
98 struct hlist_head mc_list;
99 unsigned int nl_nonroot;
100 struct proto_ops *p_ops;
103 static struct netlink_table *nl_table;
105 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
107 static int netlink_dump(struct sock *sk);
108 static void netlink_destroy_callback(struct netlink_callback *cb);
110 static DEFINE_RWLOCK(nl_table_lock);
111 static atomic_t nl_table_users = ATOMIC_INIT(0);
113 static struct notifier_block *netlink_chain;
115 static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
117 return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
120 static void netlink_sock_destruct(struct sock *sk)
122 skb_queue_purge(&sk->sk_receive_queue);
124 if (!sock_flag(sk, SOCK_DEAD)) {
125 printk("Freeing alive netlink socket %p\n", sk);
128 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
129 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
130 BUG_TRAP(!nlk_sk(sk)->cb);
133 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on SMP.
134 * Look, when several writers sleep and reader wakes them up, all but one
135 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
136 * this, _but_ remember, it adds useless work on UP machines.
139 static void netlink_table_grab(void)
141 write_lock_bh(&nl_table_lock);
143 if (atomic_read(&nl_table_users)) {
144 DECLARE_WAITQUEUE(wait, current);
146 add_wait_queue_exclusive(&nl_table_wait, &wait);
148 set_current_state(TASK_UNINTERRUPTIBLE);
149 if (atomic_read(&nl_table_users) == 0)
151 write_unlock_bh(&nl_table_lock);
153 write_lock_bh(&nl_table_lock);
156 __set_current_state(TASK_RUNNING);
157 remove_wait_queue(&nl_table_wait, &wait);
161 static __inline__ void netlink_table_ungrab(void)
163 write_unlock_bh(&nl_table_lock);
164 wake_up(&nl_table_wait);
167 static __inline__ void
168 netlink_lock_table(void)
170 /* read_lock() synchronizes us to netlink_table_grab */
172 read_lock(&nl_table_lock);
173 atomic_inc(&nl_table_users);
174 read_unlock(&nl_table_lock);
177 static __inline__ void
178 netlink_unlock_table(void)
180 if (atomic_dec_and_test(&nl_table_users))
181 wake_up(&nl_table_wait);
184 static __inline__ struct sock *netlink_lookup(int protocol, u32 pid)
186 struct nl_pid_hash *hash = &nl_table[protocol].hash;
187 struct hlist_head *head;
189 struct hlist_node *node;
191 read_lock(&nl_table_lock);
192 head = nl_pid_hashfn(hash, pid);
193 sk_for_each(sk, node, head) {
194 if (nlk_sk(sk)->pid == pid) {
201 read_unlock(&nl_table_lock);
205 static inline struct hlist_head *nl_pid_hash_alloc(size_t size)
207 if (size <= PAGE_SIZE)
208 return kmalloc(size, GFP_ATOMIC);
210 return (struct hlist_head *)
211 __get_free_pages(GFP_ATOMIC, get_order(size));
214 static inline void nl_pid_hash_free(struct hlist_head *table, size_t size)
216 if (size <= PAGE_SIZE)
219 free_pages((unsigned long)table, get_order(size));
222 static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow)
224 unsigned int omask, mask, shift;
226 struct hlist_head *otable, *table;
229 omask = mask = hash->mask;
230 osize = size = (mask + 1) * sizeof(*table);
234 if (++shift > hash->max_shift)
240 table = nl_pid_hash_alloc(size);
244 memset(table, 0, size);
245 otable = hash->table;
249 get_random_bytes(&hash->rnd, sizeof(hash->rnd));
251 for (i = 0; i <= omask; i++) {
253 struct hlist_node *node, *tmp;
255 sk_for_each_safe(sk, node, tmp, &otable[i])
256 __sk_add_node(sk, nl_pid_hashfn(hash, nlk_sk(sk)->pid));
259 nl_pid_hash_free(otable, osize);
260 hash->rehash_time = jiffies + 10 * 60 * HZ;
264 static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len)
266 int avg = hash->entries >> hash->shift;
268 if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1))
271 if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
272 nl_pid_hash_rehash(hash, 0);
279 static struct proto_ops netlink_ops;
281 static int netlink_insert(struct sock *sk, u32 pid)
283 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
284 struct hlist_head *head;
285 int err = -EADDRINUSE;
287 struct hlist_node *node;
290 netlink_table_grab();
291 head = nl_pid_hashfn(hash, pid);
293 sk_for_each(osk, node, head) {
294 if (nlk_sk(osk)->pid == pid)
306 if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
309 if (len && nl_pid_hash_dilute(hash, len))
310 head = nl_pid_hashfn(hash, pid);
312 nlk_sk(sk)->pid = pid;
313 sk_add_node(sk, head);
317 netlink_table_ungrab();
321 static void netlink_remove(struct sock *sk)
323 netlink_table_grab();
324 if (sk_del_node_init(sk))
325 nl_table[sk->sk_protocol].hash.entries--;
326 if (nlk_sk(sk)->groups)
327 __sk_del_bind_node(sk);
328 netlink_table_ungrab();
331 static struct proto netlink_proto = {
333 .owner = THIS_MODULE,
334 .obj_size = sizeof(struct netlink_sock),
337 static int netlink_create(struct socket *sock, int protocol)
340 struct netlink_sock *nlk;
342 sock->state = SS_UNCONNECTED;
344 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
345 return -ESOCKTNOSUPPORT;
347 if (protocol<0 || protocol >= MAX_LINKS)
348 return -EPROTONOSUPPORT;
350 netlink_table_grab();
351 if (!nl_table[protocol].hash.entries) {
353 /* We do 'best effort'. If we find a matching module,
354 * it is loaded. If not, we don't return an error to
355 * allow pure userspace<->userspace communication. -HW
357 netlink_table_ungrab();
358 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
359 netlink_table_grab();
362 netlink_table_ungrab();
364 sock->ops = nl_table[protocol].p_ops;
366 sk = sk_alloc(PF_NETLINK, GFP_KERNEL, &netlink_proto, 1);
370 sock_init_data(sock, sk);
374 spin_lock_init(&nlk->cb_lock);
375 init_waitqueue_head(&nlk->wait);
376 sk->sk_destruct = netlink_sock_destruct;
378 sk->sk_protocol = protocol;
382 static int netlink_release(struct socket *sock)
384 struct sock *sk = sock->sk;
385 struct netlink_sock *nlk;
393 spin_lock(&nlk->cb_lock);
395 nlk->cb->done(nlk->cb);
396 netlink_destroy_callback(nlk->cb);
399 spin_unlock(&nlk->cb_lock);
401 /* OK. Socket is unlinked, and, therefore,
402 no new packets will arrive */
406 wake_up_interruptible_all(&nlk->wait);
408 skb_queue_purge(&sk->sk_write_queue);
410 if (nlk->pid && !nlk->groups) {
411 struct netlink_notify n = {
412 .protocol = sk->sk_protocol,
415 notifier_call_chain(&netlink_chain, NETLINK_URELEASE, &n);
418 /* When this is a kernel socket, we need to remove the owner pointer,
419 * since we don't know whether the module will be dying at any given
423 struct proto_ops *p_tmp;
425 netlink_table_grab();
426 p_tmp = nl_table[sk->sk_protocol].p_ops;
427 if (p_tmp != &netlink_ops) {
428 nl_table[sk->sk_protocol].p_ops = &netlink_ops;
431 netlink_table_ungrab();
438 static int netlink_autobind(struct socket *sock)
440 struct sock *sk = sock->sk;
441 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
442 struct hlist_head *head;
444 struct hlist_node *node;
445 s32 pid = current->pid;
447 static s32 rover = -4097;
451 netlink_table_grab();
452 head = nl_pid_hashfn(hash, pid);
453 sk_for_each(osk, node, head) {
454 if (nlk_sk(osk)->pid == pid) {
455 /* Bind collision, search negative pid values. */
459 netlink_table_ungrab();
463 netlink_table_ungrab();
465 err = netlink_insert(sk, pid);
466 if (err == -EADDRINUSE)
469 /* If 2 threads race to autobind, that is fine. */
476 static inline int netlink_capable(struct socket *sock, unsigned int flag)
478 return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) ||
479 capable(CAP_NET_ADMIN);
482 static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
484 struct sock *sk = sock->sk;
485 struct netlink_sock *nlk = nlk_sk(sk);
486 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
489 if (nladdr->nl_family != AF_NETLINK)
492 /* Only superuser is allowed to listen multicasts */
493 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_RECV))
497 if (nladdr->nl_pid != nlk->pid)
500 err = nladdr->nl_pid ?
501 netlink_insert(sk, nladdr->nl_pid) :
502 netlink_autobind(sock);
507 if (!nladdr->nl_groups && !nlk->groups)
510 netlink_table_grab();
511 if (nlk->groups && !nladdr->nl_groups)
512 __sk_del_bind_node(sk);
513 else if (!nlk->groups && nladdr->nl_groups)
514 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
515 nlk->groups = nladdr->nl_groups;
516 netlink_table_ungrab();
521 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
525 struct sock *sk = sock->sk;
526 struct netlink_sock *nlk = nlk_sk(sk);
527 struct sockaddr_nl *nladdr=(struct sockaddr_nl*)addr;
529 if (addr->sa_family == AF_UNSPEC) {
530 sk->sk_state = NETLINK_UNCONNECTED;
535 if (addr->sa_family != AF_NETLINK)
538 /* Only superuser is allowed to send multicasts */
539 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND))
543 err = netlink_autobind(sock);
546 sk->sk_state = NETLINK_CONNECTED;
547 nlk->dst_pid = nladdr->nl_pid;
548 nlk->dst_groups = nladdr->nl_groups;
554 static int netlink_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
556 struct sock *sk = sock->sk;
557 struct netlink_sock *nlk = nlk_sk(sk);
558 struct sockaddr_nl *nladdr=(struct sockaddr_nl *)addr;
560 nladdr->nl_family = AF_NETLINK;
562 *addr_len = sizeof(*nladdr);
565 nladdr->nl_pid = nlk->dst_pid;
566 nladdr->nl_groups = nlk->dst_groups;
568 nladdr->nl_pid = nlk->pid;
569 nladdr->nl_groups = nlk->groups;
574 static void netlink_overrun(struct sock *sk)
576 if (!test_and_set_bit(0, &nlk_sk(sk)->state)) {
577 sk->sk_err = ENOBUFS;
578 sk->sk_error_report(sk);
582 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
584 int protocol = ssk->sk_protocol;
586 struct netlink_sock *nlk;
588 sock = netlink_lookup(protocol, pid);
590 return ERR_PTR(-ECONNREFUSED);
592 /* Don't bother queuing skb if kernel socket has no input function */
594 if ((nlk->pid == 0 && !nlk->data_ready) ||
595 (sock->sk_state == NETLINK_CONNECTED &&
596 nlk->dst_pid != nlk_sk(ssk)->pid)) {
598 return ERR_PTR(-ECONNREFUSED);
603 struct sock *netlink_getsockbyfilp(struct file *filp)
605 struct inode *inode = filp->f_dentry->d_inode;
608 if (!S_ISSOCK(inode->i_mode))
609 return ERR_PTR(-ENOTSOCK);
611 sock = SOCKET_I(inode)->sk;
612 if (sock->sk_family != AF_NETLINK)
613 return ERR_PTR(-EINVAL);
620 * Attach a skb to a netlink socket.
621 * The caller must hold a reference to the destination socket. On error, the
622 * reference is dropped. The skb is not send to the destination, just all
623 * all error checks are performed and memory in the queue is reserved.
625 * < 0: error. skb freed, reference to sock dropped.
627 * 1: repeat lookup - reference dropped while waiting for socket memory.
629 int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long timeo)
631 struct netlink_sock *nlk;
635 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
636 test_bit(0, &nlk->state)) {
637 DECLARE_WAITQUEUE(wait, current);
646 __set_current_state(TASK_INTERRUPTIBLE);
647 add_wait_queue(&nlk->wait, &wait);
649 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
650 test_bit(0, &nlk->state)) &&
651 !sock_flag(sk, SOCK_DEAD))
652 timeo = schedule_timeout(timeo);
654 __set_current_state(TASK_RUNNING);
655 remove_wait_queue(&nlk->wait, &wait);
658 if (signal_pending(current)) {
660 return sock_intr_errno(timeo);
664 skb_set_owner_r(skb, sk);
668 int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol)
670 struct netlink_sock *nlk;
675 skb_queue_tail(&sk->sk_receive_queue, skb);
676 sk->sk_data_ready(sk, len);
681 void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
687 static inline struct sk_buff *netlink_trim(struct sk_buff *skb,
688 unsigned int __nocast allocation)
694 delta = skb->end - skb->tail;
695 if (delta * 2 < skb->truesize)
698 if (skb_shared(skb)) {
699 struct sk_buff *nskb = skb_clone(skb, allocation);
706 if (!pskb_expand_head(skb, 0, -delta, allocation))
707 skb->truesize -= delta;
712 int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock)
718 skb = netlink_trim(skb, gfp_any());
720 timeo = sock_sndtimeo(ssk, nonblock);
722 sk = netlink_getsockbypid(ssk, pid);
727 err = netlink_attachskb(sk, skb, nonblock, timeo);
733 return netlink_sendskb(sk, skb, ssk->sk_protocol);
736 static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
738 struct netlink_sock *nlk = nlk_sk(sk);
740 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
741 !test_bit(0, &nlk->state)) {
742 skb_set_owner_r(skb, sk);
743 skb_queue_tail(&sk->sk_receive_queue, skb);
744 sk->sk_data_ready(sk, skb->len);
745 return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf;
750 struct netlink_broadcast_data {
751 struct sock *exclude_sk;
757 unsigned int allocation;
758 struct sk_buff *skb, *skb2;
761 static inline int do_one_broadcast(struct sock *sk,
762 struct netlink_broadcast_data *p)
764 struct netlink_sock *nlk = nlk_sk(sk);
767 if (p->exclude_sk == sk)
770 if (nlk->pid == p->pid || !(nlk->groups & p->group))
779 if (p->skb2 == NULL) {
780 if (skb_shared(p->skb)) {
781 p->skb2 = skb_clone(p->skb, p->allocation);
783 p->skb2 = skb_get(p->skb);
785 * skb ownership may have been set when
786 * delivered to a previous socket.
791 if (p->skb2 == NULL) {
793 /* Clone failed. Notify ALL listeners. */
795 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
808 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
809 u32 group, int allocation)
811 struct netlink_broadcast_data info;
812 struct hlist_node *node;
815 skb = netlink_trim(skb, allocation);
817 info.exclude_sk = ssk;
823 info.allocation = allocation;
827 /* While we sleep in clone, do not allow to change socket list */
829 netlink_lock_table();
831 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
832 do_one_broadcast(sk, &info);
836 netlink_unlock_table();
839 kfree_skb(info.skb2);
841 if (info.delivered) {
842 if (info.congested && (allocation & __GFP_WAIT))
851 struct netlink_set_err_data {
852 struct sock *exclude_sk;
858 static inline int do_one_set_err(struct sock *sk,
859 struct netlink_set_err_data *p)
861 struct netlink_sock *nlk = nlk_sk(sk);
863 if (sk == p->exclude_sk)
866 if (nlk->pid == p->pid || !(nlk->groups & p->group))
869 sk->sk_err = p->code;
870 sk->sk_error_report(sk);
875 void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
877 struct netlink_set_err_data info;
878 struct hlist_node *node;
881 info.exclude_sk = ssk;
886 read_lock(&nl_table_lock);
888 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
889 do_one_set_err(sk, &info);
891 read_unlock(&nl_table_lock);
894 static inline void netlink_rcv_wake(struct sock *sk)
896 struct netlink_sock *nlk = nlk_sk(sk);
898 if (skb_queue_empty(&sk->sk_receive_queue))
899 clear_bit(0, &nlk->state);
900 if (!test_bit(0, &nlk->state))
901 wake_up_interruptible(&nlk->wait);
904 static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
905 struct msghdr *msg, size_t len)
907 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
908 struct sock *sk = sock->sk;
909 struct netlink_sock *nlk = nlk_sk(sk);
910 struct sockaddr_nl *addr=msg->msg_name;
915 struct scm_cookie scm;
917 if (msg->msg_flags&MSG_OOB)
920 if (NULL == siocb->scm)
922 err = scm_send(sock, msg, siocb->scm);
926 if (msg->msg_namelen) {
927 if (addr->nl_family != AF_NETLINK)
929 dst_pid = addr->nl_pid;
930 dst_groups = addr->nl_groups;
931 if (dst_groups && !netlink_capable(sock, NL_NONROOT_SEND))
934 dst_pid = nlk->dst_pid;
935 dst_groups = nlk->dst_groups;
939 err = netlink_autobind(sock);
945 if (len > sk->sk_sndbuf - 32)
948 skb = alloc_skb(len, GFP_KERNEL);
952 NETLINK_CB(skb).pid = nlk->pid;
953 NETLINK_CB(skb).dst_pid = dst_pid;
954 NETLINK_CB(skb).dst_groups = dst_groups;
955 NETLINK_CB(skb).loginuid = audit_get_loginuid(current->audit_context);
956 memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
958 /* What can I do? Netlink is asynchronous, so that
959 we will have to save current capabilities to
960 check them, when this message will be delivered
961 to corresponding kernel module. --ANK (980802)
965 if (memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len)) {
970 err = security_netlink_send(sk, skb);
977 atomic_inc(&skb->users);
978 netlink_broadcast(sk, skb, dst_pid, dst_groups, GFP_KERNEL);
980 err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
986 static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
987 struct msghdr *msg, size_t len,
990 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
991 struct scm_cookie scm;
992 struct sock *sk = sock->sk;
993 struct netlink_sock *nlk = nlk_sk(sk);
994 int noblock = flags&MSG_DONTWAIT;
1004 skb = skb_recv_datagram(sk,flags,noblock,&err);
1008 msg->msg_namelen = 0;
1012 msg->msg_flags |= MSG_TRUNC;
1016 skb->h.raw = skb->data;
1017 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1019 if (msg->msg_name) {
1020 struct sockaddr_nl *addr = (struct sockaddr_nl*)msg->msg_name;
1021 addr->nl_family = AF_NETLINK;
1023 addr->nl_pid = NETLINK_CB(skb).pid;
1024 addr->nl_groups = NETLINK_CB(skb).dst_groups;
1025 msg->msg_namelen = sizeof(*addr);
1028 if (NULL == siocb->scm) {
1029 memset(&scm, 0, sizeof(scm));
1032 siocb->scm->creds = *NETLINK_CREDS(skb);
1033 skb_free_datagram(sk, skb);
1035 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2)
1038 scm_recv(sock, msg, siocb->scm, flags);
1041 netlink_rcv_wake(sk);
1042 return err ? : copied;
1045 static void netlink_data_ready(struct sock *sk, int len)
1047 struct netlink_sock *nlk = nlk_sk(sk);
1049 if (nlk->data_ready)
1050 nlk->data_ready(sk, len);
1051 netlink_rcv_wake(sk);
1055 * We export these functions to other modules. They provide a
1056 * complete set of kernel non-blocking support for message
1061 netlink_kernel_create(int unit, void (*input)(struct sock *sk, int len), struct module *module)
1063 struct proto_ops *p_ops;
1064 struct socket *sock;
1070 if (unit<0 || unit>=MAX_LINKS)
1073 /* Do a quick check, to make us not go down to netlink_insert()
1074 * if protocol already has kernel socket.
1076 sk = netlink_lookup(unit, 0);
1082 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
1087 /* Every registering protocol implemented in a module needs
1088 * it's own p_ops, since the socket code cannot deal with
1089 * module refcounting otherwise. -HW
1091 p_ops = kmalloc(sizeof(*p_ops), GFP_KERNEL);
1093 goto out_sock_release;
1095 memcpy(p_ops, &netlink_ops, sizeof(*p_ops));
1096 p_ops->owner = module;
1098 p_ops = &netlink_ops;
1100 netlink_table_grab();
1101 nl_table[unit].p_ops = p_ops;
1102 netlink_table_ungrab();
1104 if (netlink_create(sock, unit) < 0) {
1106 goto out_kfree_p_ops;
1110 sk->sk_data_ready = netlink_data_ready;
1112 nlk_sk(sk)->data_ready = input;
1114 if (netlink_insert(sk, 0)) {
1116 goto out_kfree_p_ops;
1122 netlink_table_grab();
1123 if (nl_table[unit].p_ops != &netlink_ops) {
1124 kfree(nl_table[unit].p_ops);
1125 nl_table[unit].p_ops = &netlink_ops;
1127 netlink_table_ungrab();
1133 void netlink_set_nonroot(int protocol, unsigned int flags)
1135 if ((unsigned int)protocol < MAX_LINKS)
1136 nl_table[protocol].nl_nonroot = flags;
1139 static void netlink_destroy_callback(struct netlink_callback *cb)
1147 * It looks a bit ugly.
1148 * It would be better to create kernel thread.
1151 static int netlink_dump(struct sock *sk)
1153 struct netlink_sock *nlk = nlk_sk(sk);
1154 struct netlink_callback *cb;
1155 struct sk_buff *skb;
1156 struct nlmsghdr *nlh;
1159 skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL);
1163 spin_lock(&nlk->cb_lock);
1167 spin_unlock(&nlk->cb_lock);
1172 len = cb->dump(skb, cb);
1175 spin_unlock(&nlk->cb_lock);
1176 skb_queue_tail(&sk->sk_receive_queue, skb);
1177 sk->sk_data_ready(sk, len);
1181 nlh = NLMSG_NEW_ANSWER(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
1182 memcpy(NLMSG_DATA(nlh), &len, sizeof(len));
1183 skb_queue_tail(&sk->sk_receive_queue, skb);
1184 sk->sk_data_ready(sk, skb->len);
1188 spin_unlock(&nlk->cb_lock);
1190 netlink_destroy_callback(cb);
1197 int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1198 struct nlmsghdr *nlh,
1199 int (*dump)(struct sk_buff *skb, struct netlink_callback*),
1200 int (*done)(struct netlink_callback*))
1202 struct netlink_callback *cb;
1204 struct netlink_sock *nlk;
1206 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
1210 memset(cb, 0, sizeof(*cb));
1214 atomic_inc(&skb->users);
1217 sk = netlink_lookup(ssk->sk_protocol, NETLINK_CB(skb).pid);
1219 netlink_destroy_callback(cb);
1220 return -ECONNREFUSED;
1223 /* A dump is in progress... */
1224 spin_lock(&nlk->cb_lock);
1226 spin_unlock(&nlk->cb_lock);
1227 netlink_destroy_callback(cb);
1232 spin_unlock(&nlk->cb_lock);
1239 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1241 struct sk_buff *skb;
1242 struct nlmsghdr *rep;
1243 struct nlmsgerr *errmsg;
1247 size = NLMSG_SPACE(sizeof(struct nlmsgerr));
1249 size = NLMSG_SPACE(4 + NLMSG_ALIGN(nlh->nlmsg_len));
1251 skb = alloc_skb(size, GFP_KERNEL);
1255 sk = netlink_lookup(in_skb->sk->sk_protocol,
1256 NETLINK_CB(in_skb).pid);
1258 sk->sk_err = ENOBUFS;
1259 sk->sk_error_report(sk);
1265 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1266 NLMSG_ERROR, sizeof(struct nlmsgerr), 0);
1267 errmsg = NLMSG_DATA(rep);
1268 errmsg->error = err;
1269 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(struct nlmsghdr));
1270 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
1274 #ifdef CONFIG_PROC_FS
1275 struct nl_seq_iter {
1280 static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
1282 struct nl_seq_iter *iter = seq->private;
1285 struct hlist_node *node;
1288 for (i=0; i<MAX_LINKS; i++) {
1289 struct nl_pid_hash *hash = &nl_table[i].hash;
1291 for (j = 0; j <= hash->mask; j++) {
1292 sk_for_each(s, node, &hash->table[j]) {
1305 static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
1307 read_lock(&nl_table_lock);
1308 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1311 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1314 struct nl_seq_iter *iter;
1319 if (v == SEQ_START_TOKEN)
1320 return netlink_seq_socket_idx(seq, 0);
1326 iter = seq->private;
1328 j = iter->hash_idx + 1;
1331 struct nl_pid_hash *hash = &nl_table[i].hash;
1333 for (; j <= hash->mask; j++) {
1334 s = sk_head(&hash->table[j]);
1343 } while (++i < MAX_LINKS);
1348 static void netlink_seq_stop(struct seq_file *seq, void *v)
1350 read_unlock(&nl_table_lock);
1354 static int netlink_seq_show(struct seq_file *seq, void *v)
1356 if (v == SEQ_START_TOKEN)
1358 "sk Eth Pid Groups "
1359 "Rmem Wmem Dump Locks\n");
1362 struct netlink_sock *nlk = nlk_sk(s);
1364 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %d\n",
1369 atomic_read(&s->sk_rmem_alloc),
1370 atomic_read(&s->sk_wmem_alloc),
1372 atomic_read(&s->sk_refcnt)
1379 static struct seq_operations netlink_seq_ops = {
1380 .start = netlink_seq_start,
1381 .next = netlink_seq_next,
1382 .stop = netlink_seq_stop,
1383 .show = netlink_seq_show,
1387 static int netlink_seq_open(struct inode *inode, struct file *file)
1389 struct seq_file *seq;
1390 struct nl_seq_iter *iter;
1393 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
1397 err = seq_open(file, &netlink_seq_ops);
1403 memset(iter, 0, sizeof(*iter));
1404 seq = file->private_data;
1405 seq->private = iter;
1409 static struct file_operations netlink_seq_fops = {
1410 .owner = THIS_MODULE,
1411 .open = netlink_seq_open,
1413 .llseek = seq_lseek,
1414 .release = seq_release_private,
1419 int netlink_register_notifier(struct notifier_block *nb)
1421 return notifier_chain_register(&netlink_chain, nb);
1424 int netlink_unregister_notifier(struct notifier_block *nb)
1426 return notifier_chain_unregister(&netlink_chain, nb);
1429 static struct proto_ops netlink_ops = {
1430 .family = PF_NETLINK,
1431 .owner = THIS_MODULE,
1432 .release = netlink_release,
1433 .bind = netlink_bind,
1434 .connect = netlink_connect,
1435 .socketpair = sock_no_socketpair,
1436 .accept = sock_no_accept,
1437 .getname = netlink_getname,
1438 .poll = datagram_poll,
1439 .ioctl = sock_no_ioctl,
1440 .listen = sock_no_listen,
1441 .shutdown = sock_no_shutdown,
1442 .setsockopt = sock_no_setsockopt,
1443 .getsockopt = sock_no_getsockopt,
1444 .sendmsg = netlink_sendmsg,
1445 .recvmsg = netlink_recvmsg,
1446 .mmap = sock_no_mmap,
1447 .sendpage = sock_no_sendpage,
1450 static struct net_proto_family netlink_family_ops = {
1451 .family = PF_NETLINK,
1452 .create = netlink_create,
1453 .owner = THIS_MODULE, /* for consistency 8) */
1456 extern void netlink_skb_parms_too_large(void);
1458 static int __init netlink_proto_init(void)
1460 struct sk_buff *dummy_skb;
1464 int err = proto_register(&netlink_proto, 0);
1469 if (sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb))
1470 netlink_skb_parms_too_large();
1472 nl_table = kmalloc(sizeof(*nl_table) * MAX_LINKS, GFP_KERNEL);
1475 printk(KERN_CRIT "netlink_init: Cannot allocate nl_table\n");
1479 memset(nl_table, 0, sizeof(*nl_table) * MAX_LINKS);
1481 if (num_physpages >= (128 * 1024))
1482 max = num_physpages >> (21 - PAGE_SHIFT);
1484 max = num_physpages >> (23 - PAGE_SHIFT);
1486 order = get_bitmask_order(max) - 1 + PAGE_SHIFT;
1487 max = (1UL << order) / sizeof(struct hlist_head);
1488 order = get_bitmask_order(max > UINT_MAX ? UINT_MAX : max) - 1;
1490 for (i = 0; i < MAX_LINKS; i++) {
1491 struct nl_pid_hash *hash = &nl_table[i].hash;
1493 nl_table[i].p_ops = &netlink_ops;
1495 hash->table = nl_pid_hash_alloc(1 * sizeof(*hash->table));
1498 nl_pid_hash_free(nl_table[i].hash.table,
1499 1 * sizeof(*hash->table));
1503 memset(hash->table, 0, 1 * sizeof(*hash->table));
1504 hash->max_shift = order;
1507 hash->rehash_time = jiffies;
1510 sock_register(&netlink_family_ops);
1511 #ifdef CONFIG_PROC_FS
1512 proc_net_fops_create("netlink", 0, &netlink_seq_fops);
1514 /* The netlink device handler may be needed early. */
1520 core_initcall(netlink_proto_init);
1522 EXPORT_SYMBOL(netlink_ack);
1523 EXPORT_SYMBOL(netlink_broadcast);
1524 EXPORT_SYMBOL(netlink_dump_start);
1525 EXPORT_SYMBOL(netlink_kernel_create);
1526 EXPORT_SYMBOL(netlink_register_notifier);
1527 EXPORT_SYMBOL(netlink_set_err);
1528 EXPORT_SYMBOL(netlink_set_nonroot);
1529 EXPORT_SYMBOL(netlink_unicast);
1530 EXPORT_SYMBOL(netlink_unregister_notifier);