2 * NETLINK Kernel-user communication protocol.
4 * Authors: Alan Cox <alan@redhat.com>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13 * added netlink_proto_exit
14 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
15 * use nlk_sk, as sk->protinfo is on a diet 8)
16 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
17 * - inc module use count of module that owns
18 * the kernel socket in case userspace opens
19 * socket of same protocol
20 * - remove all module support, since netlink is
21 * mandatory if CONFIG_NET=y these days
24 #include <linux/module.h>
26 #include <linux/capability.h>
27 #include <linux/kernel.h>
28 #include <linux/init.h>
29 #include <linux/signal.h>
30 #include <linux/sched.h>
31 #include <linux/errno.h>
32 #include <linux/string.h>
33 #include <linux/stat.h>
34 #include <linux/socket.h>
36 #include <linux/fcntl.h>
37 #include <linux/termios.h>
38 #include <linux/sockios.h>
39 #include <linux/net.h>
41 #include <linux/slab.h>
42 #include <asm/uaccess.h>
43 #include <linux/skbuff.h>
44 #include <linux/netdevice.h>
45 #include <linux/rtnetlink.h>
46 #include <linux/proc_fs.h>
47 #include <linux/seq_file.h>
48 #include <linux/smp_lock.h>
49 #include <linux/notifier.h>
50 #include <linux/security.h>
51 #include <linux/jhash.h>
52 #include <linux/jiffies.h>
53 #include <linux/random.h>
54 #include <linux/bitops.h>
56 #include <linux/types.h>
57 #include <linux/audit.h>
58 #include <linux/selinux.h>
59 #include <linux/mutex.h>
63 #include <net/netlink.h>
65 #define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8)
68 /* struct sock has to be the first member of netlink_sock */
76 unsigned long *groups;
78 wait_queue_head_t wait;
79 struct netlink_callback *cb;
80 struct mutex *cb_mutex;
81 struct mutex cb_def_mutex;
82 void (*data_ready)(struct sock *sk, int bytes);
83 struct module *module;
86 #define NETLINK_KERNEL_SOCKET 0x1
87 #define NETLINK_RECV_PKTINFO 0x2
89 static inline struct netlink_sock *nlk_sk(struct sock *sk)
91 return (struct netlink_sock *)sk;
95 struct hlist_head *table;
96 unsigned long rehash_time;
101 unsigned int entries;
102 unsigned int max_shift;
107 struct netlink_table {
108 struct nl_pid_hash hash;
109 struct hlist_head mc_list;
110 unsigned long *listeners;
111 unsigned int nl_nonroot;
113 struct mutex *cb_mutex;
114 struct module *module;
118 static struct netlink_table *nl_table;
120 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
122 static int netlink_dump(struct sock *sk);
123 static void netlink_destroy_callback(struct netlink_callback *cb);
125 static DEFINE_RWLOCK(nl_table_lock);
126 static atomic_t nl_table_users = ATOMIC_INIT(0);
128 static ATOMIC_NOTIFIER_HEAD(netlink_chain);
130 static u32 netlink_group_mask(u32 group)
132 return group ? 1 << (group - 1) : 0;
135 static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
137 return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
140 static void netlink_sock_destruct(struct sock *sk)
142 skb_queue_purge(&sk->sk_receive_queue);
144 if (!sock_flag(sk, SOCK_DEAD)) {
145 printk("Freeing alive netlink socket %p\n", sk);
148 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
149 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
150 BUG_TRAP(!nlk_sk(sk)->cb);
151 BUG_TRAP(!nlk_sk(sk)->groups);
154 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on SMP.
155 * Look, when several writers sleep and reader wakes them up, all but one
156 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
157 * this, _but_ remember, it adds useless work on UP machines.
160 static void netlink_table_grab(void)
162 write_lock_irq(&nl_table_lock);
164 if (atomic_read(&nl_table_users)) {
165 DECLARE_WAITQUEUE(wait, current);
167 add_wait_queue_exclusive(&nl_table_wait, &wait);
169 set_current_state(TASK_UNINTERRUPTIBLE);
170 if (atomic_read(&nl_table_users) == 0)
172 write_unlock_irq(&nl_table_lock);
174 write_lock_irq(&nl_table_lock);
177 __set_current_state(TASK_RUNNING);
178 remove_wait_queue(&nl_table_wait, &wait);
182 static __inline__ void netlink_table_ungrab(void)
184 write_unlock_irq(&nl_table_lock);
185 wake_up(&nl_table_wait);
188 static __inline__ void
189 netlink_lock_table(void)
191 /* read_lock() synchronizes us to netlink_table_grab */
193 read_lock(&nl_table_lock);
194 atomic_inc(&nl_table_users);
195 read_unlock(&nl_table_lock);
198 static __inline__ void
199 netlink_unlock_table(void)
201 if (atomic_dec_and_test(&nl_table_users))
202 wake_up(&nl_table_wait);
205 static __inline__ struct sock *netlink_lookup(int protocol, u32 pid)
207 struct nl_pid_hash *hash = &nl_table[protocol].hash;
208 struct hlist_head *head;
210 struct hlist_node *node;
212 read_lock(&nl_table_lock);
213 head = nl_pid_hashfn(hash, pid);
214 sk_for_each(sk, node, head) {
215 if (nlk_sk(sk)->pid == pid) {
222 read_unlock(&nl_table_lock);
226 static inline struct hlist_head *nl_pid_hash_alloc(size_t size)
228 if (size <= PAGE_SIZE)
229 return kmalloc(size, GFP_ATOMIC);
231 return (struct hlist_head *)
232 __get_free_pages(GFP_ATOMIC, get_order(size));
235 static inline void nl_pid_hash_free(struct hlist_head *table, size_t size)
237 if (size <= PAGE_SIZE)
240 free_pages((unsigned long)table, get_order(size));
243 static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow)
245 unsigned int omask, mask, shift;
247 struct hlist_head *otable, *table;
250 omask = mask = hash->mask;
251 osize = size = (mask + 1) * sizeof(*table);
255 if (++shift > hash->max_shift)
261 table = nl_pid_hash_alloc(size);
265 memset(table, 0, size);
266 otable = hash->table;
270 get_random_bytes(&hash->rnd, sizeof(hash->rnd));
272 for (i = 0; i <= omask; i++) {
274 struct hlist_node *node, *tmp;
276 sk_for_each_safe(sk, node, tmp, &otable[i])
277 __sk_add_node(sk, nl_pid_hashfn(hash, nlk_sk(sk)->pid));
280 nl_pid_hash_free(otable, osize);
281 hash->rehash_time = jiffies + 10 * 60 * HZ;
285 static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len)
287 int avg = hash->entries >> hash->shift;
289 if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1))
292 if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
293 nl_pid_hash_rehash(hash, 0);
300 static const struct proto_ops netlink_ops;
303 netlink_update_listeners(struct sock *sk)
305 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
306 struct hlist_node *node;
310 for (i = 0; i < NLGRPSZ(tbl->groups)/sizeof(unsigned long); i++) {
312 sk_for_each_bound(sk, node, &tbl->mc_list)
313 mask |= nlk_sk(sk)->groups[i];
314 tbl->listeners[i] = mask;
316 /* this function is only called with the netlink table "grabbed", which
317 * makes sure updates are visible before bind or setsockopt return. */
320 static int netlink_insert(struct sock *sk, u32 pid)
322 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
323 struct hlist_head *head;
324 int err = -EADDRINUSE;
326 struct hlist_node *node;
329 netlink_table_grab();
330 head = nl_pid_hashfn(hash, pid);
332 sk_for_each(osk, node, head) {
333 if (nlk_sk(osk)->pid == pid)
345 if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
348 if (len && nl_pid_hash_dilute(hash, len))
349 head = nl_pid_hashfn(hash, pid);
351 nlk_sk(sk)->pid = pid;
352 sk_add_node(sk, head);
356 netlink_table_ungrab();
360 static void netlink_remove(struct sock *sk)
362 netlink_table_grab();
363 if (sk_del_node_init(sk))
364 nl_table[sk->sk_protocol].hash.entries--;
365 if (nlk_sk(sk)->subscriptions)
366 __sk_del_bind_node(sk);
367 netlink_table_ungrab();
370 static struct proto netlink_proto = {
372 .owner = THIS_MODULE,
373 .obj_size = sizeof(struct netlink_sock),
376 static int __netlink_create(struct socket *sock, struct mutex *cb_mutex,
380 struct netlink_sock *nlk;
382 sock->ops = &netlink_ops;
384 sk = sk_alloc(PF_NETLINK, GFP_KERNEL, &netlink_proto, 1);
388 sock_init_data(sock, sk);
392 nlk->cb_mutex = cb_mutex;
394 nlk->cb_mutex = &nlk->cb_def_mutex;
395 mutex_init(nlk->cb_mutex);
397 init_waitqueue_head(&nlk->wait);
399 sk->sk_destruct = netlink_sock_destruct;
400 sk->sk_protocol = protocol;
404 static int netlink_create(struct socket *sock, int protocol)
406 struct module *module = NULL;
407 struct mutex *cb_mutex;
408 struct netlink_sock *nlk;
411 sock->state = SS_UNCONNECTED;
413 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
414 return -ESOCKTNOSUPPORT;
416 if (protocol<0 || protocol >= MAX_LINKS)
417 return -EPROTONOSUPPORT;
419 netlink_lock_table();
421 if (!nl_table[protocol].registered) {
422 netlink_unlock_table();
423 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
424 netlink_lock_table();
427 if (nl_table[protocol].registered &&
428 try_module_get(nl_table[protocol].module))
429 module = nl_table[protocol].module;
430 cb_mutex = nl_table[protocol].cb_mutex;
431 netlink_unlock_table();
433 if ((err = __netlink_create(sock, cb_mutex, protocol)) < 0)
436 nlk = nlk_sk(sock->sk);
437 nlk->module = module;
446 static int netlink_release(struct socket *sock)
448 struct sock *sk = sock->sk;
449 struct netlink_sock *nlk;
458 mutex_lock(nlk->cb_mutex);
461 nlk->cb->done(nlk->cb);
462 netlink_destroy_callback(nlk->cb);
465 mutex_unlock(nlk->cb_mutex);
467 /* OK. Socket is unlinked, and, therefore,
468 no new packets will arrive */
471 wake_up_interruptible_all(&nlk->wait);
473 skb_queue_purge(&sk->sk_write_queue);
475 if (nlk->pid && !nlk->subscriptions) {
476 struct netlink_notify n = {
477 .protocol = sk->sk_protocol,
480 atomic_notifier_call_chain(&netlink_chain,
481 NETLINK_URELEASE, &n);
484 module_put(nlk->module);
486 netlink_table_grab();
487 if (nlk->flags & NETLINK_KERNEL_SOCKET) {
488 kfree(nl_table[sk->sk_protocol].listeners);
489 nl_table[sk->sk_protocol].module = NULL;
490 nl_table[sk->sk_protocol].registered = 0;
491 } else if (nlk->subscriptions)
492 netlink_update_listeners(sk);
493 netlink_table_ungrab();
502 static int netlink_autobind(struct socket *sock)
504 struct sock *sk = sock->sk;
505 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
506 struct hlist_head *head;
508 struct hlist_node *node;
509 s32 pid = current->tgid;
511 static s32 rover = -4097;
515 netlink_table_grab();
516 head = nl_pid_hashfn(hash, pid);
517 sk_for_each(osk, node, head) {
518 if (nlk_sk(osk)->pid == pid) {
519 /* Bind collision, search negative pid values. */
523 netlink_table_ungrab();
527 netlink_table_ungrab();
529 err = netlink_insert(sk, pid);
530 if (err == -EADDRINUSE)
533 /* If 2 threads race to autobind, that is fine. */
540 static inline int netlink_capable(struct socket *sock, unsigned int flag)
542 return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) ||
543 capable(CAP_NET_ADMIN);
547 netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
549 struct netlink_sock *nlk = nlk_sk(sk);
551 if (nlk->subscriptions && !subscriptions)
552 __sk_del_bind_node(sk);
553 else if (!nlk->subscriptions && subscriptions)
554 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
555 nlk->subscriptions = subscriptions;
558 static int netlink_alloc_groups(struct sock *sk)
560 struct netlink_sock *nlk = nlk_sk(sk);
564 netlink_lock_table();
565 groups = nl_table[sk->sk_protocol].groups;
566 if (!nl_table[sk->sk_protocol].registered)
568 netlink_unlock_table();
573 nlk->groups = kzalloc(NLGRPSZ(groups), GFP_KERNEL);
574 if (nlk->groups == NULL)
576 nlk->ngroups = groups;
580 static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
582 struct sock *sk = sock->sk;
583 struct netlink_sock *nlk = nlk_sk(sk);
584 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
587 if (nladdr->nl_family != AF_NETLINK)
590 /* Only superuser is allowed to listen multicasts */
591 if (nladdr->nl_groups) {
592 if (!netlink_capable(sock, NL_NONROOT_RECV))
594 if (nlk->groups == NULL) {
595 err = netlink_alloc_groups(sk);
602 if (nladdr->nl_pid != nlk->pid)
605 err = nladdr->nl_pid ?
606 netlink_insert(sk, nladdr->nl_pid) :
607 netlink_autobind(sock);
612 if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
615 netlink_table_grab();
616 netlink_update_subscriptions(sk, nlk->subscriptions +
617 hweight32(nladdr->nl_groups) -
618 hweight32(nlk->groups[0]));
619 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups;
620 netlink_update_listeners(sk);
621 netlink_table_ungrab();
626 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
630 struct sock *sk = sock->sk;
631 struct netlink_sock *nlk = nlk_sk(sk);
632 struct sockaddr_nl *nladdr=(struct sockaddr_nl*)addr;
634 if (addr->sa_family == AF_UNSPEC) {
635 sk->sk_state = NETLINK_UNCONNECTED;
640 if (addr->sa_family != AF_NETLINK)
643 /* Only superuser is allowed to send multicasts */
644 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND))
648 err = netlink_autobind(sock);
651 sk->sk_state = NETLINK_CONNECTED;
652 nlk->dst_pid = nladdr->nl_pid;
653 nlk->dst_group = ffs(nladdr->nl_groups);
659 static int netlink_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
661 struct sock *sk = sock->sk;
662 struct netlink_sock *nlk = nlk_sk(sk);
663 struct sockaddr_nl *nladdr=(struct sockaddr_nl *)addr;
665 nladdr->nl_family = AF_NETLINK;
667 *addr_len = sizeof(*nladdr);
670 nladdr->nl_pid = nlk->dst_pid;
671 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
673 nladdr->nl_pid = nlk->pid;
674 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
679 static void netlink_overrun(struct sock *sk)
681 if (!test_and_set_bit(0, &nlk_sk(sk)->state)) {
682 sk->sk_err = ENOBUFS;
683 sk->sk_error_report(sk);
687 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
689 int protocol = ssk->sk_protocol;
691 struct netlink_sock *nlk;
693 sock = netlink_lookup(protocol, pid);
695 return ERR_PTR(-ECONNREFUSED);
697 /* Don't bother queuing skb if kernel socket has no input function */
699 if ((nlk->pid == 0 && !nlk->data_ready) ||
700 (sock->sk_state == NETLINK_CONNECTED &&
701 nlk->dst_pid != nlk_sk(ssk)->pid)) {
703 return ERR_PTR(-ECONNREFUSED);
708 struct sock *netlink_getsockbyfilp(struct file *filp)
710 struct inode *inode = filp->f_path.dentry->d_inode;
713 if (!S_ISSOCK(inode->i_mode))
714 return ERR_PTR(-ENOTSOCK);
716 sock = SOCKET_I(inode)->sk;
717 if (sock->sk_family != AF_NETLINK)
718 return ERR_PTR(-EINVAL);
725 * Attach a skb to a netlink socket.
726 * The caller must hold a reference to the destination socket. On error, the
727 * reference is dropped. The skb is not send to the destination, just all
728 * all error checks are performed and memory in the queue is reserved.
730 * < 0: error. skb freed, reference to sock dropped.
732 * 1: repeat lookup - reference dropped while waiting for socket memory.
734 int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
735 long timeo, struct sock *ssk)
737 struct netlink_sock *nlk;
741 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
742 test_bit(0, &nlk->state)) {
743 DECLARE_WAITQUEUE(wait, current);
745 if (!ssk || nlk_sk(ssk)->pid == 0)
752 __set_current_state(TASK_INTERRUPTIBLE);
753 add_wait_queue(&nlk->wait, &wait);
755 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
756 test_bit(0, &nlk->state)) &&
757 !sock_flag(sk, SOCK_DEAD))
758 timeo = schedule_timeout(timeo);
760 __set_current_state(TASK_RUNNING);
761 remove_wait_queue(&nlk->wait, &wait);
764 if (signal_pending(current)) {
766 return sock_intr_errno(timeo);
770 skb_set_owner_r(skb, sk);
774 int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol)
778 skb_queue_tail(&sk->sk_receive_queue, skb);
779 sk->sk_data_ready(sk, len);
784 void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
790 static inline struct sk_buff *netlink_trim(struct sk_buff *skb,
797 delta = skb->end - skb->tail;
798 if (delta * 2 < skb->truesize)
801 if (skb_shared(skb)) {
802 struct sk_buff *nskb = skb_clone(skb, allocation);
809 if (!pskb_expand_head(skb, 0, -delta, allocation))
810 skb->truesize -= delta;
815 int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock)
821 skb = netlink_trim(skb, gfp_any());
823 timeo = sock_sndtimeo(ssk, nonblock);
825 sk = netlink_getsockbypid(ssk, pid);
830 err = netlink_attachskb(sk, skb, nonblock, timeo, ssk);
836 return netlink_sendskb(sk, skb, ssk->sk_protocol);
839 int netlink_has_listeners(struct sock *sk, unsigned int group)
843 BUG_ON(!(nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET));
844 if (group - 1 < nl_table[sk->sk_protocol].groups)
845 res = test_bit(group - 1, nl_table[sk->sk_protocol].listeners);
848 EXPORT_SYMBOL_GPL(netlink_has_listeners);
850 static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
852 struct netlink_sock *nlk = nlk_sk(sk);
854 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
855 !test_bit(0, &nlk->state)) {
856 skb_set_owner_r(skb, sk);
857 skb_queue_tail(&sk->sk_receive_queue, skb);
858 sk->sk_data_ready(sk, skb->len);
859 return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf;
864 struct netlink_broadcast_data {
865 struct sock *exclude_sk;
872 struct sk_buff *skb, *skb2;
875 static inline int do_one_broadcast(struct sock *sk,
876 struct netlink_broadcast_data *p)
878 struct netlink_sock *nlk = nlk_sk(sk);
881 if (p->exclude_sk == sk)
884 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
885 !test_bit(p->group - 1, nlk->groups))
894 if (p->skb2 == NULL) {
895 if (skb_shared(p->skb)) {
896 p->skb2 = skb_clone(p->skb, p->allocation);
898 p->skb2 = skb_get(p->skb);
900 * skb ownership may have been set when
901 * delivered to a previous socket.
906 if (p->skb2 == NULL) {
908 /* Clone failed. Notify ALL listeners. */
910 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
923 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
924 u32 group, gfp_t allocation)
926 struct netlink_broadcast_data info;
927 struct hlist_node *node;
930 skb = netlink_trim(skb, allocation);
932 info.exclude_sk = ssk;
938 info.allocation = allocation;
942 /* While we sleep in clone, do not allow to change socket list */
944 netlink_lock_table();
946 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
947 do_one_broadcast(sk, &info);
951 netlink_unlock_table();
954 kfree_skb(info.skb2);
956 if (info.delivered) {
957 if (info.congested && (allocation & __GFP_WAIT))
966 struct netlink_set_err_data {
967 struct sock *exclude_sk;
973 static inline int do_one_set_err(struct sock *sk,
974 struct netlink_set_err_data *p)
976 struct netlink_sock *nlk = nlk_sk(sk);
978 if (sk == p->exclude_sk)
981 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
982 !test_bit(p->group - 1, nlk->groups))
985 sk->sk_err = p->code;
986 sk->sk_error_report(sk);
991 void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
993 struct netlink_set_err_data info;
994 struct hlist_node *node;
997 info.exclude_sk = ssk;
1002 read_lock(&nl_table_lock);
1004 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
1005 do_one_set_err(sk, &info);
1007 read_unlock(&nl_table_lock);
1010 static int netlink_setsockopt(struct socket *sock, int level, int optname,
1011 char __user *optval, int optlen)
1013 struct sock *sk = sock->sk;
1014 struct netlink_sock *nlk = nlk_sk(sk);
1017 if (level != SOL_NETLINK)
1018 return -ENOPROTOOPT;
1020 if (optlen >= sizeof(int) &&
1021 get_user(val, (int __user *)optval))
1025 case NETLINK_PKTINFO:
1027 nlk->flags |= NETLINK_RECV_PKTINFO;
1029 nlk->flags &= ~NETLINK_RECV_PKTINFO;
1032 case NETLINK_ADD_MEMBERSHIP:
1033 case NETLINK_DROP_MEMBERSHIP: {
1034 unsigned int subscriptions;
1035 int old, new = optname == NETLINK_ADD_MEMBERSHIP ? 1 : 0;
1037 if (!netlink_capable(sock, NL_NONROOT_RECV))
1039 if (nlk->groups == NULL) {
1040 err = netlink_alloc_groups(sk);
1044 if (!val || val - 1 >= nlk->ngroups)
1046 netlink_table_grab();
1047 old = test_bit(val - 1, nlk->groups);
1048 subscriptions = nlk->subscriptions - old + new;
1050 __set_bit(val - 1, nlk->groups);
1052 __clear_bit(val - 1, nlk->groups);
1053 netlink_update_subscriptions(sk, subscriptions);
1054 netlink_update_listeners(sk);
1055 netlink_table_ungrab();
1065 static int netlink_getsockopt(struct socket *sock, int level, int optname,
1066 char __user *optval, int __user *optlen)
1068 struct sock *sk = sock->sk;
1069 struct netlink_sock *nlk = nlk_sk(sk);
1072 if (level != SOL_NETLINK)
1073 return -ENOPROTOOPT;
1075 if (get_user(len, optlen))
1081 case NETLINK_PKTINFO:
1082 if (len < sizeof(int))
1085 val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
1086 if (put_user(len, optlen) ||
1087 put_user(val, optval))
1097 static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
1099 struct nl_pktinfo info;
1101 info.group = NETLINK_CB(skb).dst_group;
1102 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
1105 static inline void netlink_rcv_wake(struct sock *sk)
1107 struct netlink_sock *nlk = nlk_sk(sk);
1109 if (skb_queue_empty(&sk->sk_receive_queue))
1110 clear_bit(0, &nlk->state);
1111 if (!test_bit(0, &nlk->state))
1112 wake_up_interruptible(&nlk->wait);
1115 static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1116 struct msghdr *msg, size_t len)
1118 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1119 struct sock *sk = sock->sk;
1120 struct netlink_sock *nlk = nlk_sk(sk);
1121 struct sockaddr_nl *addr=msg->msg_name;
1124 struct sk_buff *skb;
1126 struct scm_cookie scm;
1128 if (msg->msg_flags&MSG_OOB)
1131 if (NULL == siocb->scm)
1133 err = scm_send(sock, msg, siocb->scm);
1137 if (msg->msg_namelen) {
1138 if (addr->nl_family != AF_NETLINK)
1140 dst_pid = addr->nl_pid;
1141 dst_group = ffs(addr->nl_groups);
1142 if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
1145 dst_pid = nlk->dst_pid;
1146 dst_group = nlk->dst_group;
1150 err = netlink_autobind(sock);
1156 if (len > sk->sk_sndbuf - 32)
1159 skb = alloc_skb(len, GFP_KERNEL);
1163 NETLINK_CB(skb).pid = nlk->pid;
1164 NETLINK_CB(skb).dst_group = dst_group;
1165 NETLINK_CB(skb).loginuid = audit_get_loginuid(current->audit_context);
1166 selinux_get_task_sid(current, &(NETLINK_CB(skb).sid));
1167 memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1169 /* What can I do? Netlink is asynchronous, so that
1170 we will have to save current capabilities to
1171 check them, when this message will be delivered
1172 to corresponding kernel module. --ANK (980802)
1176 if (memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len)) {
1181 err = security_netlink_send(sk, skb);
1188 atomic_inc(&skb->users);
1189 netlink_broadcast(sk, skb, dst_pid, dst_group, GFP_KERNEL);
1191 err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
1197 static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1198 struct msghdr *msg, size_t len,
1201 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1202 struct scm_cookie scm;
1203 struct sock *sk = sock->sk;
1204 struct netlink_sock *nlk = nlk_sk(sk);
1205 int noblock = flags&MSG_DONTWAIT;
1207 struct sk_buff *skb;
1215 skb = skb_recv_datagram(sk,flags,noblock,&err);
1219 msg->msg_namelen = 0;
1223 msg->msg_flags |= MSG_TRUNC;
1227 skb_reset_transport_header(skb);
1228 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1230 if (msg->msg_name) {
1231 struct sockaddr_nl *addr = (struct sockaddr_nl*)msg->msg_name;
1232 addr->nl_family = AF_NETLINK;
1234 addr->nl_pid = NETLINK_CB(skb).pid;
1235 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
1236 msg->msg_namelen = sizeof(*addr);
1239 if (nlk->flags & NETLINK_RECV_PKTINFO)
1240 netlink_cmsg_recv_pktinfo(msg, skb);
1242 if (NULL == siocb->scm) {
1243 memset(&scm, 0, sizeof(scm));
1246 siocb->scm->creds = *NETLINK_CREDS(skb);
1247 skb_free_datagram(sk, skb);
1249 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2)
1252 scm_recv(sock, msg, siocb->scm, flags);
1254 if (flags & MSG_TRUNC)
1258 netlink_rcv_wake(sk);
1259 return err ? : copied;
1262 static void netlink_data_ready(struct sock *sk, int len)
1264 struct netlink_sock *nlk = nlk_sk(sk);
1266 if (nlk->data_ready)
1267 nlk->data_ready(sk, len);
1268 netlink_rcv_wake(sk);
1272 * We export these functions to other modules. They provide a
1273 * complete set of kernel non-blocking support for message
1278 netlink_kernel_create(int unit, unsigned int groups,
1279 void (*input)(struct sock *sk, int len),
1280 struct mutex *cb_mutex, struct module *module)
1282 struct socket *sock;
1284 struct netlink_sock *nlk;
1285 unsigned long *listeners = NULL;
1289 if (unit<0 || unit>=MAX_LINKS)
1292 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
1295 if (__netlink_create(sock, cb_mutex, unit) < 0)
1296 goto out_sock_release;
1301 listeners = kzalloc(NLGRPSZ(groups), GFP_KERNEL);
1303 goto out_sock_release;
1306 sk->sk_data_ready = netlink_data_ready;
1308 nlk_sk(sk)->data_ready = input;
1310 if (netlink_insert(sk, 0))
1311 goto out_sock_release;
1314 nlk->flags |= NETLINK_KERNEL_SOCKET;
1316 netlink_table_grab();
1317 nl_table[unit].groups = groups;
1318 nl_table[unit].listeners = listeners;
1319 nl_table[unit].cb_mutex = cb_mutex;
1320 nl_table[unit].module = module;
1321 nl_table[unit].registered = 1;
1322 netlink_table_ungrab();
1332 void netlink_set_nonroot(int protocol, unsigned int flags)
1334 if ((unsigned int)protocol < MAX_LINKS)
1335 nl_table[protocol].nl_nonroot = flags;
1338 static void netlink_destroy_callback(struct netlink_callback *cb)
1346 * It looks a bit ugly.
1347 * It would be better to create kernel thread.
1350 static int netlink_dump(struct sock *sk)
1352 struct netlink_sock *nlk = nlk_sk(sk);
1353 struct netlink_callback *cb;
1354 struct sk_buff *skb;
1355 struct nlmsghdr *nlh;
1356 int len, err = -ENOBUFS;
1358 skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL);
1362 mutex_lock(nlk->cb_mutex);
1370 len = cb->dump(skb, cb);
1373 mutex_unlock(nlk->cb_mutex);
1374 skb_queue_tail(&sk->sk_receive_queue, skb);
1375 sk->sk_data_ready(sk, len);
1379 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
1383 memcpy(nlmsg_data(nlh), &len, sizeof(len));
1385 skb_queue_tail(&sk->sk_receive_queue, skb);
1386 sk->sk_data_ready(sk, skb->len);
1391 mutex_unlock(nlk->cb_mutex);
1393 netlink_destroy_callback(cb);
1397 mutex_unlock(nlk->cb_mutex);
1403 int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1404 struct nlmsghdr *nlh,
1405 int (*dump)(struct sk_buff *skb, struct netlink_callback*),
1406 int (*done)(struct netlink_callback*))
1408 struct netlink_callback *cb;
1410 struct netlink_sock *nlk;
1412 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
1419 atomic_inc(&skb->users);
1422 sk = netlink_lookup(ssk->sk_protocol, NETLINK_CB(skb).pid);
1424 netlink_destroy_callback(cb);
1425 return -ECONNREFUSED;
1428 /* A dump or destruction is in progress... */
1429 mutex_lock(nlk->cb_mutex);
1430 if (nlk->cb || sock_flag(sk, SOCK_DEAD)) {
1431 mutex_unlock(nlk->cb_mutex);
1432 netlink_destroy_callback(cb);
1437 mutex_unlock(nlk->cb_mutex);
1442 /* We successfully started a dump, by returning -EINTR we
1443 * signal the queue mangement to interrupt processing of
1444 * any netlink messages so userspace gets a chance to read
1449 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1451 struct sk_buff *skb;
1452 struct nlmsghdr *rep;
1453 struct nlmsgerr *errmsg;
1454 size_t payload = sizeof(*errmsg);
1456 /* error messages get the original request appened */
1458 payload += nlmsg_len(nlh);
1460 skb = nlmsg_new(payload, GFP_KERNEL);
1464 sk = netlink_lookup(in_skb->sk->sk_protocol,
1465 NETLINK_CB(in_skb).pid);
1467 sk->sk_err = ENOBUFS;
1468 sk->sk_error_report(sk);
1474 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1475 NLMSG_ERROR, sizeof(struct nlmsgerr), 0);
1476 errmsg = nlmsg_data(rep);
1477 errmsg->error = err;
1478 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
1479 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
1482 static int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
1485 struct nlmsghdr *nlh;
1488 while (skb->len >= nlmsg_total_size(0)) {
1489 nlh = nlmsg_hdr(skb);
1492 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
1495 /* Only requests are handled by the kernel */
1496 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
1499 /* Skip control messages */
1500 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
1504 if (err == -EINTR) {
1505 /* Not an error, but we interrupt processing */
1506 netlink_queue_skip(nlh, skb);
1510 if (nlh->nlmsg_flags & NLM_F_ACK || err)
1511 netlink_ack(skb, nlh, err);
1513 netlink_queue_skip(nlh, skb);
1520 * nelink_run_queue - Process netlink receive queue.
1521 * @sk: Netlink socket containing the queue
1522 * @qlen: Place to store queue length upon entry
1523 * @cb: Callback function invoked for each netlink message found
1525 * Processes as much as there was in the queue upon entry and invokes
1526 * a callback function for each netlink message found. The callback
1527 * function may refuse a message by returning a negative error code
1528 * but setting the error pointer to 0 in which case this function
1529 * returns with a qlen != 0.
1531 * qlen must be initialized to 0 before the initial entry, afterwards
1532 * the function may be called repeatedly until qlen reaches 0.
1534 * The callback function may return -EINTR to signal that processing
1535 * of netlink messages shall be interrupted. In this case the message
1536 * currently being processed will NOT be requeued onto the receive
1539 void netlink_run_queue(struct sock *sk, unsigned int *qlen,
1540 int (*cb)(struct sk_buff *, struct nlmsghdr *))
1542 struct sk_buff *skb;
1544 if (!*qlen || *qlen > skb_queue_len(&sk->sk_receive_queue))
1545 *qlen = skb_queue_len(&sk->sk_receive_queue);
1547 for (; *qlen; (*qlen)--) {
1548 skb = skb_dequeue(&sk->sk_receive_queue);
1549 if (netlink_rcv_skb(skb, cb)) {
1551 skb_queue_head(&sk->sk_receive_queue, skb);
1564 * netlink_queue_skip - Skip netlink message while processing queue.
1565 * @nlh: Netlink message to be skipped
1566 * @skb: Socket buffer containing the netlink messages.
1568 * Pulls the given netlink message off the socket buffer so the next
1569 * call to netlink_queue_run() will not reconsider the message.
1571 void netlink_queue_skip(struct nlmsghdr *nlh, struct sk_buff *skb)
1573 int msglen = NLMSG_ALIGN(nlh->nlmsg_len);
1575 if (msglen > skb->len)
1578 skb_pull(skb, msglen);
1582 * nlmsg_notify - send a notification netlink message
1583 * @sk: netlink socket to use
1584 * @skb: notification message
1585 * @pid: destination netlink pid for reports or 0
1586 * @group: destination multicast group or 0
1587 * @report: 1 to report back, 0 to disable
1588 * @flags: allocation flags
1590 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 pid,
1591 unsigned int group, int report, gfp_t flags)
1596 int exclude_pid = 0;
1599 atomic_inc(&skb->users);
1603 /* errors reported via destination sk->sk_err */
1604 nlmsg_multicast(sk, skb, exclude_pid, group, flags);
1608 err = nlmsg_unicast(sk, skb, pid);
1613 #ifdef CONFIG_PROC_FS
1614 struct nl_seq_iter {
1619 static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
1621 struct nl_seq_iter *iter = seq->private;
1624 struct hlist_node *node;
1627 for (i=0; i<MAX_LINKS; i++) {
1628 struct nl_pid_hash *hash = &nl_table[i].hash;
1630 for (j = 0; j <= hash->mask; j++) {
1631 sk_for_each(s, node, &hash->table[j]) {
1644 static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
1646 read_lock(&nl_table_lock);
1647 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1650 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1653 struct nl_seq_iter *iter;
1658 if (v == SEQ_START_TOKEN)
1659 return netlink_seq_socket_idx(seq, 0);
1665 iter = seq->private;
1667 j = iter->hash_idx + 1;
1670 struct nl_pid_hash *hash = &nl_table[i].hash;
1672 for (; j <= hash->mask; j++) {
1673 s = sk_head(&hash->table[j]);
1682 } while (++i < MAX_LINKS);
1687 static void netlink_seq_stop(struct seq_file *seq, void *v)
1689 read_unlock(&nl_table_lock);
1693 static int netlink_seq_show(struct seq_file *seq, void *v)
1695 if (v == SEQ_START_TOKEN)
1697 "sk Eth Pid Groups "
1698 "Rmem Wmem Dump Locks\n");
1701 struct netlink_sock *nlk = nlk_sk(s);
1703 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %d\n",
1707 nlk->groups ? (u32)nlk->groups[0] : 0,
1708 atomic_read(&s->sk_rmem_alloc),
1709 atomic_read(&s->sk_wmem_alloc),
1711 atomic_read(&s->sk_refcnt)
1718 static struct seq_operations netlink_seq_ops = {
1719 .start = netlink_seq_start,
1720 .next = netlink_seq_next,
1721 .stop = netlink_seq_stop,
1722 .show = netlink_seq_show,
1726 static int netlink_seq_open(struct inode *inode, struct file *file)
1728 struct seq_file *seq;
1729 struct nl_seq_iter *iter;
1732 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1736 err = seq_open(file, &netlink_seq_ops);
1742 seq = file->private_data;
1743 seq->private = iter;
1747 static const struct file_operations netlink_seq_fops = {
1748 .owner = THIS_MODULE,
1749 .open = netlink_seq_open,
1751 .llseek = seq_lseek,
1752 .release = seq_release_private,
1757 int netlink_register_notifier(struct notifier_block *nb)
1759 return atomic_notifier_chain_register(&netlink_chain, nb);
1762 int netlink_unregister_notifier(struct notifier_block *nb)
1764 return atomic_notifier_chain_unregister(&netlink_chain, nb);
1767 static const struct proto_ops netlink_ops = {
1768 .family = PF_NETLINK,
1769 .owner = THIS_MODULE,
1770 .release = netlink_release,
1771 .bind = netlink_bind,
1772 .connect = netlink_connect,
1773 .socketpair = sock_no_socketpair,
1774 .accept = sock_no_accept,
1775 .getname = netlink_getname,
1776 .poll = datagram_poll,
1777 .ioctl = sock_no_ioctl,
1778 .listen = sock_no_listen,
1779 .shutdown = sock_no_shutdown,
1780 .setsockopt = netlink_setsockopt,
1781 .getsockopt = netlink_getsockopt,
1782 .sendmsg = netlink_sendmsg,
1783 .recvmsg = netlink_recvmsg,
1784 .mmap = sock_no_mmap,
1785 .sendpage = sock_no_sendpage,
1788 static struct net_proto_family netlink_family_ops = {
1789 .family = PF_NETLINK,
1790 .create = netlink_create,
1791 .owner = THIS_MODULE, /* for consistency 8) */
1794 static int __init netlink_proto_init(void)
1796 struct sk_buff *dummy_skb;
1800 int err = proto_register(&netlink_proto, 0);
1805 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb));
1807 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
1811 if (num_physpages >= (128 * 1024))
1812 max = num_physpages >> (21 - PAGE_SHIFT);
1814 max = num_physpages >> (23 - PAGE_SHIFT);
1816 order = get_bitmask_order(max) - 1 + PAGE_SHIFT;
1817 max = (1UL << order) / sizeof(struct hlist_head);
1818 order = get_bitmask_order(max > UINT_MAX ? UINT_MAX : max) - 1;
1820 for (i = 0; i < MAX_LINKS; i++) {
1821 struct nl_pid_hash *hash = &nl_table[i].hash;
1823 hash->table = nl_pid_hash_alloc(1 * sizeof(*hash->table));
1826 nl_pid_hash_free(nl_table[i].hash.table,
1827 1 * sizeof(*hash->table));
1831 memset(hash->table, 0, 1 * sizeof(*hash->table));
1832 hash->max_shift = order;
1835 hash->rehash_time = jiffies;
1838 sock_register(&netlink_family_ops);
1839 #ifdef CONFIG_PROC_FS
1840 proc_net_fops_create("netlink", 0, &netlink_seq_fops);
1842 /* The netlink device handler may be needed early. */
1847 panic("netlink_init: Cannot allocate nl_table\n");
1850 core_initcall(netlink_proto_init);
1852 EXPORT_SYMBOL(netlink_ack);
1853 EXPORT_SYMBOL(netlink_run_queue);
1854 EXPORT_SYMBOL(netlink_queue_skip);
1855 EXPORT_SYMBOL(netlink_broadcast);
1856 EXPORT_SYMBOL(netlink_dump_start);
1857 EXPORT_SYMBOL(netlink_kernel_create);
1858 EXPORT_SYMBOL(netlink_register_notifier);
1859 EXPORT_SYMBOL(netlink_set_nonroot);
1860 EXPORT_SYMBOL(netlink_unicast);
1861 EXPORT_SYMBOL(netlink_unregister_notifier);
1862 EXPORT_SYMBOL(nlmsg_notify);