2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
7 * Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
8 * Copyright Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
9 * Copyright Tomi Manninen OH2BNS (oh2bns@sral.fi)
11 #include <linux/errno.h>
12 #include <linux/types.h>
13 #include <linux/socket.h>
15 #include <linux/kernel.h>
16 #include <linux/timer.h>
17 #include <linux/string.h>
18 #include <linux/sockios.h>
19 #include <linux/net.h>
21 #include <linux/inet.h>
22 #include <linux/netdevice.h>
24 #include <linux/if_arp.h>
25 #include <linux/skbuff.h>
27 #include <asm/uaccess.h>
28 #include <asm/system.h>
29 #include <linux/fcntl.h>
30 #include <linux/termios.h> /* For TIOCINQ/OUTQ */
32 #include <linux/interrupt.h>
33 #include <linux/notifier.h>
34 #include <linux/netfilter.h>
35 #include <linux/init.h>
36 #include <linux/spinlock.h>
37 #include <net/netrom.h>
38 #include <linux/seq_file.h>
40 static unsigned int nr_neigh_no = 1;
42 static HLIST_HEAD(nr_node_list);
43 static DEFINE_SPINLOCK(nr_node_list_lock);
44 static HLIST_HEAD(nr_neigh_list);
45 static DEFINE_SPINLOCK(nr_neigh_list_lock);
47 static struct nr_node *nr_node_get(ax25_address *callsign)
49 struct nr_node *found = NULL;
50 struct nr_node *nr_node;
51 struct hlist_node *node;
53 spin_lock_bh(&nr_node_list_lock);
54 nr_node_for_each(nr_node, node, &nr_node_list)
55 if (ax25cmp(callsign, &nr_node->callsign) == 0) {
56 nr_node_hold(nr_node);
60 spin_unlock_bh(&nr_node_list_lock);
64 static struct nr_neigh *nr_neigh_get_dev(ax25_address *callsign,
65 struct net_device *dev)
67 struct nr_neigh *found = NULL;
68 struct nr_neigh *nr_neigh;
69 struct hlist_node *node;
71 spin_lock_bh(&nr_neigh_list_lock);
72 nr_neigh_for_each(nr_neigh, node, &nr_neigh_list)
73 if (ax25cmp(callsign, &nr_neigh->callsign) == 0 &&
74 nr_neigh->dev == dev) {
75 nr_neigh_hold(nr_neigh);
79 spin_unlock_bh(&nr_neigh_list_lock);
83 static void nr_remove_neigh(struct nr_neigh *);
86 * Add a new route to a node, and in the process add the node and the
87 * neighbour if it is new.
89 static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
90 ax25_address *ax25, ax25_digi *ax25_digi, struct net_device *dev,
91 int quality, int obs_count)
93 struct nr_node *nr_node;
94 struct nr_neigh *nr_neigh;
95 struct nr_route nr_route;
97 struct net_device *odev;
99 if ((odev=nr_dev_get(nr)) != NULL) { /* Can't add routes to ourself */
104 nr_node = nr_node_get(nr);
106 nr_neigh = nr_neigh_get_dev(ax25, dev);
109 * The L2 link to a neighbour has failed in the past
110 * and now a frame comes from this neighbour. We assume
111 * it was a temporary trouble with the link and reset the
112 * routes now (and not wait for a node broadcast).
114 if (nr_neigh != NULL && nr_neigh->failed != 0 && quality == 0) {
115 struct nr_node *nr_nodet;
116 struct hlist_node *node;
118 spin_lock_bh(&nr_node_list_lock);
119 nr_node_for_each(nr_nodet, node, &nr_node_list) {
120 nr_node_lock(nr_nodet);
121 for (i = 0; i < nr_nodet->count; i++)
122 if (nr_nodet->routes[i].neighbour == nr_neigh)
123 if (i < nr_nodet->which)
125 nr_node_unlock(nr_nodet);
127 spin_unlock_bh(&nr_node_list_lock);
130 if (nr_neigh != NULL)
131 nr_neigh->failed = 0;
133 if (quality == 0 && nr_neigh != NULL && nr_node != NULL) {
134 nr_neigh_put(nr_neigh);
135 nr_node_put(nr_node);
139 if (nr_neigh == NULL) {
140 if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL) {
142 nr_node_put(nr_node);
146 nr_neigh->callsign = *ax25;
147 nr_neigh->digipeat = NULL;
148 nr_neigh->ax25 = NULL;
150 nr_neigh->quality = sysctl_netrom_default_path_quality;
151 nr_neigh->locked = 0;
153 nr_neigh->number = nr_neigh_no++;
154 nr_neigh->failed = 0;
155 atomic_set(&nr_neigh->refcount, 1);
157 if (ax25_digi != NULL && ax25_digi->ndigi > 0) {
158 nr_neigh->digipeat = kmemdup(ax25_digi,
161 if (nr_neigh->digipeat == NULL) {
164 nr_node_put(nr_node);
169 spin_lock_bh(&nr_neigh_list_lock);
170 hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list);
171 nr_neigh_hold(nr_neigh);
172 spin_unlock_bh(&nr_neigh_list_lock);
175 if (quality != 0 && ax25cmp(nr, ax25) == 0 && !nr_neigh->locked)
176 nr_neigh->quality = quality;
178 if (nr_node == NULL) {
179 if ((nr_node = kmalloc(sizeof(*nr_node), GFP_ATOMIC)) == NULL) {
181 nr_neigh_put(nr_neigh);
185 nr_node->callsign = *nr;
186 strcpy(nr_node->mnemonic, mnemonic);
190 atomic_set(&nr_node->refcount, 1);
191 spin_lock_init(&nr_node->node_lock);
193 nr_node->routes[0].quality = quality;
194 nr_node->routes[0].obs_count = obs_count;
195 nr_node->routes[0].neighbour = nr_neigh;
197 nr_neigh_hold(nr_neigh);
200 spin_lock_bh(&nr_node_list_lock);
201 hlist_add_head(&nr_node->node_node, &nr_node_list);
202 /* refcount initialized at 1 */
203 spin_unlock_bh(&nr_node_list_lock);
207 nr_node_lock(nr_node);
210 strcpy(nr_node->mnemonic, mnemonic);
212 for (found = 0, i = 0; i < nr_node->count; i++) {
213 if (nr_node->routes[i].neighbour == nr_neigh) {
214 nr_node->routes[i].quality = quality;
215 nr_node->routes[i].obs_count = obs_count;
222 /* We have space at the bottom, slot it in */
223 if (nr_node->count < 3) {
224 nr_node->routes[2] = nr_node->routes[1];
225 nr_node->routes[1] = nr_node->routes[0];
227 nr_node->routes[0].quality = quality;
228 nr_node->routes[0].obs_count = obs_count;
229 nr_node->routes[0].neighbour = nr_neigh;
233 nr_neigh_hold(nr_neigh);
236 /* It must be better than the worst */
237 if (quality > nr_node->routes[2].quality) {
238 nr_node->routes[2].neighbour->count--;
239 nr_neigh_put(nr_node->routes[2].neighbour);
241 if (nr_node->routes[2].neighbour->count == 0 && !nr_node->routes[2].neighbour->locked)
242 nr_remove_neigh(nr_node->routes[2].neighbour);
244 nr_node->routes[2].quality = quality;
245 nr_node->routes[2].obs_count = obs_count;
246 nr_node->routes[2].neighbour = nr_neigh;
248 nr_neigh_hold(nr_neigh);
254 /* Now re-sort the routes in quality order */
255 switch (nr_node->count) {
257 if (nr_node->routes[1].quality > nr_node->routes[0].quality) {
258 switch (nr_node->which) {
259 case 0: nr_node->which = 1; break;
260 case 1: nr_node->which = 0; break;
263 nr_route = nr_node->routes[0];
264 nr_node->routes[0] = nr_node->routes[1];
265 nr_node->routes[1] = nr_route;
267 if (nr_node->routes[2].quality > nr_node->routes[1].quality) {
268 switch (nr_node->which) {
269 case 1: nr_node->which = 2;
272 case 2: nr_node->which = 1;
278 nr_route = nr_node->routes[1];
279 nr_node->routes[1] = nr_node->routes[2];
280 nr_node->routes[2] = nr_route;
283 if (nr_node->routes[1].quality > nr_node->routes[0].quality) {
284 switch (nr_node->which) {
285 case 0: nr_node->which = 1;
288 case 1: nr_node->which = 0;
293 nr_route = nr_node->routes[0];
294 nr_node->routes[0] = nr_node->routes[1];
295 nr_node->routes[1] = nr_route;
301 for (i = 0; i < nr_node->count; i++) {
302 if (nr_node->routes[i].neighbour == nr_neigh) {
303 if (i < nr_node->which)
309 nr_neigh_put(nr_neigh);
310 nr_node_unlock(nr_node);
311 nr_node_put(nr_node);
315 static inline void __nr_remove_node(struct nr_node *nr_node)
317 hlist_del_init(&nr_node->node_node);
318 nr_node_put(nr_node);
321 #define nr_remove_node_locked(__node) \
322 __nr_remove_node(__node)
324 static void nr_remove_node(struct nr_node *nr_node)
326 spin_lock_bh(&nr_node_list_lock);
327 __nr_remove_node(nr_node);
328 spin_unlock_bh(&nr_node_list_lock);
331 static inline void __nr_remove_neigh(struct nr_neigh *nr_neigh)
333 hlist_del_init(&nr_neigh->neigh_node);
334 nr_neigh_put(nr_neigh);
337 #define nr_remove_neigh_locked(__neigh) \
338 __nr_remove_neigh(__neigh)
340 static void nr_remove_neigh(struct nr_neigh *nr_neigh)
342 spin_lock_bh(&nr_neigh_list_lock);
343 __nr_remove_neigh(nr_neigh);
344 spin_unlock_bh(&nr_neigh_list_lock);
348 * "Delete" a node. Strictly speaking remove a route to a node. The node
349 * is only deleted if no routes are left to it.
351 static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct net_device *dev)
353 struct nr_node *nr_node;
354 struct nr_neigh *nr_neigh;
357 nr_node = nr_node_get(callsign);
362 nr_neigh = nr_neigh_get_dev(neighbour, dev);
364 if (nr_neigh == NULL) {
365 nr_node_put(nr_node);
369 nr_node_lock(nr_node);
370 for (i = 0; i < nr_node->count; i++) {
371 if (nr_node->routes[i].neighbour == nr_neigh) {
373 nr_neigh_put(nr_neigh);
375 if (nr_neigh->count == 0 && !nr_neigh->locked)
376 nr_remove_neigh(nr_neigh);
377 nr_neigh_put(nr_neigh);
381 if (nr_node->count == 0) {
382 nr_remove_node(nr_node);
386 nr_node->routes[0] = nr_node->routes[1];
388 nr_node->routes[1] = nr_node->routes[2];
392 nr_node_put(nr_node);
394 nr_node_unlock(nr_node);
399 nr_neigh_put(nr_neigh);
400 nr_node_unlock(nr_node);
401 nr_node_put(nr_node);
407 * Lock a neighbour with a quality.
409 static int __must_check nr_add_neigh(ax25_address *callsign,
410 ax25_digi *ax25_digi, struct net_device *dev, unsigned int quality)
412 struct nr_neigh *nr_neigh;
414 nr_neigh = nr_neigh_get_dev(callsign, dev);
416 nr_neigh->quality = quality;
417 nr_neigh->locked = 1;
418 nr_neigh_put(nr_neigh);
422 if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL)
425 nr_neigh->callsign = *callsign;
426 nr_neigh->digipeat = NULL;
427 nr_neigh->ax25 = NULL;
429 nr_neigh->quality = quality;
430 nr_neigh->locked = 1;
432 nr_neigh->number = nr_neigh_no++;
433 nr_neigh->failed = 0;
434 atomic_set(&nr_neigh->refcount, 1);
436 if (ax25_digi != NULL && ax25_digi->ndigi > 0) {
437 nr_neigh->digipeat = kmemdup(ax25_digi, sizeof(*ax25_digi),
439 if (nr_neigh->digipeat == NULL) {
445 spin_lock_bh(&nr_neigh_list_lock);
446 hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list);
447 /* refcount is initialized at 1 */
448 spin_unlock_bh(&nr_neigh_list_lock);
454 * "Delete" a neighbour. The neighbour is only removed if the number
455 * of nodes that may use it is zero.
457 static int nr_del_neigh(ax25_address *callsign, struct net_device *dev, unsigned int quality)
459 struct nr_neigh *nr_neigh;
461 nr_neigh = nr_neigh_get_dev(callsign, dev);
463 if (nr_neigh == NULL) return -EINVAL;
465 nr_neigh->quality = quality;
466 nr_neigh->locked = 0;
468 if (nr_neigh->count == 0)
469 nr_remove_neigh(nr_neigh);
470 nr_neigh_put(nr_neigh);
476 * Decrement the obsolescence count by one. If a route is reduced to a
477 * count of zero, remove it. Also remove any unlocked neighbours with
478 * zero nodes routing via it.
480 static int nr_dec_obs(void)
482 struct nr_neigh *nr_neigh;
484 struct hlist_node *node, *nodet;
487 spin_lock_bh(&nr_node_list_lock);
488 nr_node_for_each_safe(s, node, nodet, &nr_node_list) {
490 for (i = 0; i < s->count; i++) {
491 switch (s->routes[i].obs_count) {
492 case 0: /* A locked entry */
495 case 1: /* From 1 -> 0 */
496 nr_neigh = s->routes[i].neighbour;
499 nr_neigh_put(nr_neigh);
501 if (nr_neigh->count == 0 && !nr_neigh->locked)
502 nr_remove_neigh(nr_neigh);
508 s->routes[0] = s->routes[1];
510 s->routes[1] = s->routes[2];
517 s->routes[i].obs_count--;
524 nr_remove_node_locked(s);
527 spin_unlock_bh(&nr_node_list_lock);
533 * A device has been removed. Remove its routes and neighbours.
535 void nr_rt_device_down(struct net_device *dev)
538 struct hlist_node *node, *nodet, *node2, *node2t;
542 spin_lock_bh(&nr_neigh_list_lock);
543 nr_neigh_for_each_safe(s, node, nodet, &nr_neigh_list) {
545 spin_lock_bh(&nr_node_list_lock);
546 nr_node_for_each_safe(t, node2, node2t, &nr_node_list) {
548 for (i = 0; i < t->count; i++) {
549 if (t->routes[i].neighbour == s) {
554 t->routes[0] = t->routes[1];
556 t->routes[1] = t->routes[2];
564 nr_remove_node_locked(t);
567 spin_unlock_bh(&nr_node_list_lock);
569 nr_remove_neigh_locked(s);
572 spin_unlock_bh(&nr_neigh_list_lock);
576 * Check that the device given is a valid AX.25 interface that is "up".
577 * Or a valid ethernet interface with an AX.25 callsign binding.
579 static struct net_device *nr_ax25_dev_get(char *devname)
581 struct net_device *dev;
583 if ((dev = dev_get_by_name(&init_net, devname)) == NULL)
586 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_AX25)
594 * Find the first active NET/ROM device, usually "nr0".
596 struct net_device *nr_dev_first(void)
598 struct net_device *dev, *first = NULL;
600 read_lock(&dev_base_lock);
601 for_each_netdev(&init_net, dev) {
602 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM)
603 if (first == NULL || strncmp(dev->name, first->name, 3) < 0)
608 read_unlock(&dev_base_lock);
614 * Find the NET/ROM device for the given callsign.
616 struct net_device *nr_dev_get(ax25_address *addr)
618 struct net_device *dev;
620 read_lock(&dev_base_lock);
621 for_each_netdev(&init_net, dev) {
622 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM && ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0) {
629 read_unlock(&dev_base_lock);
633 static ax25_digi *nr_call_to_digi(int ndigis, ax25_address *digipeaters)
635 static ax25_digi ax25_digi;
641 for (i = 0; i < ndigis; i++) {
642 ax25_digi.calls[i] = digipeaters[i];
643 ax25_digi.repeated[i] = 0;
646 ax25_digi.ndigi = ndigis;
647 ax25_digi.lastrepeat = -1;
653 * Handle the ioctls that control the routing functions.
655 int nr_rt_ioctl(unsigned int cmd, void __user *arg)
657 struct nr_route_struct nr_route;
658 struct net_device *dev;
663 if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct)))
665 if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
667 if (nr_route.ndigis < 0 || nr_route.ndigis > AX25_MAX_DIGIS) {
671 switch (nr_route.type) {
673 ret = nr_add_node(&nr_route.callsign,
676 nr_call_to_digi(nr_route.ndigis, nr_route.digipeaters),
677 dev, nr_route.quality,
681 ret = nr_add_neigh(&nr_route.callsign,
682 nr_call_to_digi(nr_route.ndigis, nr_route.digipeaters),
683 dev, nr_route.quality);
692 if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct)))
694 if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
696 switch (nr_route.type) {
698 ret = nr_del_node(&nr_route.callsign,
699 &nr_route.neighbour, dev);
702 ret = nr_del_neigh(&nr_route.callsign,
703 dev, nr_route.quality);
722 * A level 2 link has timed out, therefore it appears to be a poor link,
723 * then don't use that neighbour until it is reset.
725 void nr_link_failed(ax25_cb *ax25, int reason)
727 struct nr_neigh *s, *nr_neigh = NULL;
728 struct hlist_node *node;
729 struct nr_node *nr_node = NULL;
731 spin_lock_bh(&nr_neigh_list_lock);
732 nr_neigh_for_each(s, node, &nr_neigh_list) {
733 if (s->ax25 == ax25) {
739 spin_unlock_bh(&nr_neigh_list_lock);
741 if (nr_neigh == NULL)
744 nr_neigh->ax25 = NULL;
747 if (++nr_neigh->failed < sysctl_netrom_link_fails_count) {
748 nr_neigh_put(nr_neigh);
751 spin_lock_bh(&nr_node_list_lock);
752 nr_node_for_each(nr_node, node, &nr_node_list) {
753 nr_node_lock(nr_node);
754 if (nr_node->which < nr_node->count &&
755 nr_node->routes[nr_node->which].neighbour == nr_neigh)
757 nr_node_unlock(nr_node);
759 spin_unlock_bh(&nr_node_list_lock);
760 nr_neigh_put(nr_neigh);
764 * Route a frame to an appropriate AX.25 connection. A NULL ax25_cb
765 * indicates an internally generated frame.
767 int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
769 ax25_address *nr_src, *nr_dest;
770 struct nr_neigh *nr_neigh;
771 struct nr_node *nr_node;
772 struct net_device *dev;
776 struct sk_buff *skbn;
779 nr_src = (ax25_address *)(skb->data + 0);
780 nr_dest = (ax25_address *)(skb->data + 7);
783 ret = nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat,
784 ax25->ax25_dev->dev, 0,
785 sysctl_netrom_obsolescence_count_initialiser);
790 if ((dev = nr_dev_get(nr_dest)) != NULL) { /* Its for me */
791 if (ax25 == NULL) /* Its from me */
792 ret = nr_loopback_queue(skb);
794 ret = nr_rx_frame(skb, dev);
799 if (!sysctl_netrom_routing_control && ax25 != NULL)
802 /* Its Time-To-Live has expired */
803 if (skb->data[14] == 1) {
807 nr_node = nr_node_get(nr_dest);
810 nr_node_lock(nr_node);
812 if (nr_node->which >= nr_node->count) {
813 nr_node_unlock(nr_node);
814 nr_node_put(nr_node);
818 nr_neigh = nr_node->routes[nr_node->which].neighbour;
820 if ((dev = nr_dev_first()) == NULL) {
821 nr_node_unlock(nr_node);
822 nr_node_put(nr_node);
826 /* We are going to change the netrom headers so we should get our
827 own skb, we also did not know until now how much header space
828 we had to reserve... - RXQ */
829 if ((skbn=skb_copy_expand(skb, dev->hard_header_len, 0, GFP_ATOMIC)) == NULL) {
830 nr_node_unlock(nr_node);
831 nr_node_put(nr_node);
839 dptr = skb_push(skb, 1);
840 *dptr = AX25_P_NETROM;
842 ax25s = ax25_send_frame(skb, 256, (ax25_address *)dev->dev_addr, &nr_neigh->callsign, nr_neigh->digipeat, nr_neigh->dev);
843 if (nr_neigh->ax25 && ax25s) {
844 /* We were already holding this ax25_cb */
847 nr_neigh->ax25 = ax25s;
850 ret = (nr_neigh->ax25 != NULL);
851 nr_node_unlock(nr_node);
852 nr_node_put(nr_node);
857 #ifdef CONFIG_PROC_FS
859 static void *nr_node_start(struct seq_file *seq, loff_t *pos)
861 struct nr_node *nr_node;
862 struct hlist_node *node;
865 spin_lock_bh(&nr_node_list_lock);
867 return SEQ_START_TOKEN;
869 nr_node_for_each(nr_node, node, &nr_node_list) {
878 static void *nr_node_next(struct seq_file *seq, void *v, loff_t *pos)
880 struct hlist_node *node;
883 node = (v == SEQ_START_TOKEN)
885 : ((struct nr_node *)v)->node_node.next;
887 return hlist_entry(node, struct nr_node, node_node);
890 static void nr_node_stop(struct seq_file *seq, void *v)
892 spin_unlock_bh(&nr_node_list_lock);
895 static int nr_node_show(struct seq_file *seq, void *v)
900 if (v == SEQ_START_TOKEN)
902 "callsign mnemonic w n qual obs neigh qual obs neigh qual obs neigh\n");
904 struct nr_node *nr_node = v;
905 nr_node_lock(nr_node);
906 seq_printf(seq, "%-9s %-7s %d %d",
907 ax2asc(buf, &nr_node->callsign),
908 (nr_node->mnemonic[0] == '\0') ? "*" : nr_node->mnemonic,
912 for (i = 0; i < nr_node->count; i++) {
913 seq_printf(seq, " %3d %d %05d",
914 nr_node->routes[i].quality,
915 nr_node->routes[i].obs_count,
916 nr_node->routes[i].neighbour->number);
918 nr_node_unlock(nr_node);
925 static const struct seq_operations nr_node_seqops = {
926 .start = nr_node_start,
927 .next = nr_node_next,
928 .stop = nr_node_stop,
929 .show = nr_node_show,
932 static int nr_node_info_open(struct inode *inode, struct file *file)
934 return seq_open(file, &nr_node_seqops);
937 const struct file_operations nr_nodes_fops = {
938 .owner = THIS_MODULE,
939 .open = nr_node_info_open,
942 .release = seq_release,
945 static void *nr_neigh_start(struct seq_file *seq, loff_t *pos)
947 struct nr_neigh *nr_neigh;
948 struct hlist_node *node;
951 spin_lock_bh(&nr_neigh_list_lock);
953 return SEQ_START_TOKEN;
955 nr_neigh_for_each(nr_neigh, node, &nr_neigh_list) {
962 static void *nr_neigh_next(struct seq_file *seq, void *v, loff_t *pos)
964 struct hlist_node *node;
967 node = (v == SEQ_START_TOKEN)
968 ? nr_neigh_list.first
969 : ((struct nr_neigh *)v)->neigh_node.next;
971 return hlist_entry(node, struct nr_neigh, neigh_node);
974 static void nr_neigh_stop(struct seq_file *seq, void *v)
976 spin_unlock_bh(&nr_neigh_list_lock);
979 static int nr_neigh_show(struct seq_file *seq, void *v)
984 if (v == SEQ_START_TOKEN)
985 seq_puts(seq, "addr callsign dev qual lock count failed digipeaters\n");
987 struct nr_neigh *nr_neigh = v;
989 seq_printf(seq, "%05d %-9s %-4s %3d %d %3d %3d",
991 ax2asc(buf, &nr_neigh->callsign),
992 nr_neigh->dev ? nr_neigh->dev->name : "???",
998 if (nr_neigh->digipeat != NULL) {
999 for (i = 0; i < nr_neigh->digipeat->ndigi; i++)
1000 seq_printf(seq, " %s",
1001 ax2asc(buf, &nr_neigh->digipeat->calls[i]));
1004 seq_puts(seq, "\n");
1009 static const struct seq_operations nr_neigh_seqops = {
1010 .start = nr_neigh_start,
1011 .next = nr_neigh_next,
1012 .stop = nr_neigh_stop,
1013 .show = nr_neigh_show,
1016 static int nr_neigh_info_open(struct inode *inode, struct file *file)
1018 return seq_open(file, &nr_neigh_seqops);
1021 const struct file_operations nr_neigh_fops = {
1022 .owner = THIS_MODULE,
1023 .open = nr_neigh_info_open,
1025 .llseek = seq_lseek,
1026 .release = seq_release,
1032 * Free all memory associated with the nodes and routes lists.
1034 void __exit nr_rt_free(void)
1036 struct nr_neigh *s = NULL;
1037 struct nr_node *t = NULL;
1038 struct hlist_node *node, *nodet;
1040 spin_lock_bh(&nr_neigh_list_lock);
1041 spin_lock_bh(&nr_node_list_lock);
1042 nr_node_for_each_safe(t, node, nodet, &nr_node_list) {
1044 nr_remove_node_locked(t);
1047 nr_neigh_for_each_safe(s, node, nodet, &nr_neigh_list) {
1052 nr_remove_neigh_locked(s);
1054 spin_unlock_bh(&nr_node_list_lock);
1055 spin_unlock_bh(&nr_neigh_list_lock);