]> err.no Git - linux-2.6/blob - net/core/link_watch.c
netdev: Move rest of qdisc state into struct netdev_queue
[linux-2.6] / net / core / link_watch.c
1 /*
2  * Linux network device link state notification
3  *
4  * Author:
5  *     Stefan Rompf <sux@loplof.de>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; either version
10  * 2 of the License, or (at your option) any later version.
11  *
12  */
13
14 #include <linux/module.h>
15 #include <linux/netdevice.h>
16 #include <linux/if.h>
17 #include <net/sock.h>
18 #include <net/pkt_sched.h>
19 #include <linux/rtnetlink.h>
20 #include <linux/jiffies.h>
21 #include <linux/spinlock.h>
22 #include <linux/slab.h>
23 #include <linux/workqueue.h>
24 #include <linux/bitops.h>
25 #include <asm/types.h>
26
27
28 enum lw_bits {
29         LW_URGENT = 0,
30 };
31
32 static unsigned long linkwatch_flags;
33 static unsigned long linkwatch_nextevent;
34
35 static void linkwatch_event(struct work_struct *dummy);
36 static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
37
38 static struct net_device *lweventlist;
39 static DEFINE_SPINLOCK(lweventlist_lock);
40
41 static unsigned char default_operstate(const struct net_device *dev)
42 {
43         if (!netif_carrier_ok(dev))
44                 return (dev->ifindex != dev->iflink ?
45                         IF_OPER_LOWERLAYERDOWN : IF_OPER_DOWN);
46
47         if (netif_dormant(dev))
48                 return IF_OPER_DORMANT;
49
50         return IF_OPER_UP;
51 }
52
53
54 static void rfc2863_policy(struct net_device *dev)
55 {
56         unsigned char operstate = default_operstate(dev);
57
58         if (operstate == dev->operstate)
59                 return;
60
61         write_lock_bh(&dev_base_lock);
62
63         switch(dev->link_mode) {
64         case IF_LINK_MODE_DORMANT:
65                 if (operstate == IF_OPER_UP)
66                         operstate = IF_OPER_DORMANT;
67                 break;
68
69         case IF_LINK_MODE_DEFAULT:
70         default:
71                 break;
72         }
73
74         dev->operstate = operstate;
75
76         write_unlock_bh(&dev_base_lock);
77 }
78
79
80 static int linkwatch_urgent_event(struct net_device *dev)
81 {
82         struct netdev_queue *txq = &dev->tx_queue;
83
84         return netif_running(dev) && netif_carrier_ok(dev) &&
85                txq->qdisc != txq->qdisc_sleeping;
86 }
87
88
89 static void linkwatch_add_event(struct net_device *dev)
90 {
91         unsigned long flags;
92
93         spin_lock_irqsave(&lweventlist_lock, flags);
94         dev->link_watch_next = lweventlist;
95         lweventlist = dev;
96         spin_unlock_irqrestore(&lweventlist_lock, flags);
97 }
98
99
100 static void linkwatch_schedule_work(int urgent)
101 {
102         unsigned long delay = linkwatch_nextevent - jiffies;
103
104         if (test_bit(LW_URGENT, &linkwatch_flags))
105                 return;
106
107         /* Minimise down-time: drop delay for up event. */
108         if (urgent) {
109                 if (test_and_set_bit(LW_URGENT, &linkwatch_flags))
110                         return;
111                 delay = 0;
112         }
113
114         /* If we wrap around we'll delay it by at most HZ. */
115         if (delay > HZ)
116                 delay = 0;
117
118         /*
119          * This is true if we've scheduled it immeditately or if we don't
120          * need an immediate execution and it's already pending.
121          */
122         if (schedule_delayed_work(&linkwatch_work, delay) == !delay)
123                 return;
124
125         /* Don't bother if there is nothing urgent. */
126         if (!test_bit(LW_URGENT, &linkwatch_flags))
127                 return;
128
129         /* It's already running which is good enough. */
130         if (!cancel_delayed_work(&linkwatch_work))
131                 return;
132
133         /* Otherwise we reschedule it again for immediate exection. */
134         schedule_delayed_work(&linkwatch_work, 0);
135 }
136
137
138 static void __linkwatch_run_queue(int urgent_only)
139 {
140         struct net_device *next;
141
142         /*
143          * Limit the number of linkwatch events to one
144          * per second so that a runaway driver does not
145          * cause a storm of messages on the netlink
146          * socket.  This limit does not apply to up events
147          * while the device qdisc is down.
148          */
149         if (!urgent_only)
150                 linkwatch_nextevent = jiffies + HZ;
151         /* Limit wrap-around effect on delay. */
152         else if (time_after(linkwatch_nextevent, jiffies + HZ))
153                 linkwatch_nextevent = jiffies;
154
155         clear_bit(LW_URGENT, &linkwatch_flags);
156
157         spin_lock_irq(&lweventlist_lock);
158         next = lweventlist;
159         lweventlist = NULL;
160         spin_unlock_irq(&lweventlist_lock);
161
162         while (next) {
163                 struct net_device *dev = next;
164
165                 next = dev->link_watch_next;
166
167                 if (urgent_only && !linkwatch_urgent_event(dev)) {
168                         linkwatch_add_event(dev);
169                         continue;
170                 }
171
172                 /*
173                  * Make sure the above read is complete since it can be
174                  * rewritten as soon as we clear the bit below.
175                  */
176                 smp_mb__before_clear_bit();
177
178                 /* We are about to handle this device,
179                  * so new events can be accepted
180                  */
181                 clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
182
183                 rfc2863_policy(dev);
184                 if (dev->flags & IFF_UP) {
185                         if (netif_carrier_ok(dev)) {
186                                 struct netdev_queue *txq = &dev->tx_queue;
187
188                                 WARN_ON(txq->qdisc_sleeping == &noop_qdisc);
189                                 dev_activate(dev);
190                         } else
191                                 dev_deactivate(dev);
192
193                         netdev_state_change(dev);
194                 }
195
196                 dev_put(dev);
197         }
198
199         if (lweventlist)
200                 linkwatch_schedule_work(0);
201 }
202
203
204 /* Must be called with the rtnl semaphore held */
205 void linkwatch_run_queue(void)
206 {
207         __linkwatch_run_queue(0);
208 }
209
210
211 static void linkwatch_event(struct work_struct *dummy)
212 {
213         rtnl_lock();
214         __linkwatch_run_queue(time_after(linkwatch_nextevent, jiffies));
215         rtnl_unlock();
216 }
217
218
219 void linkwatch_fire_event(struct net_device *dev)
220 {
221         int urgent = linkwatch_urgent_event(dev);
222
223         if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) {
224                 dev_hold(dev);
225
226                 linkwatch_add_event(dev);
227         } else if (!urgent)
228                 return;
229
230         linkwatch_schedule_work(urgent);
231 }
232
233 EXPORT_SYMBOL(linkwatch_fire_event);