2 * linux/kernel/time/tick-broadcast.c
4 * This file contains functions which emulate a local clock-event
5 * device via a broadcast event source.
7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
11 * This code is licenced under the GPL version 2. For details see
12 * kernel-base/COPYING.
14 #include <linux/cpu.h>
15 #include <linux/err.h>
16 #include <linux/hrtimer.h>
17 #include <linux/irq.h>
18 #include <linux/percpu.h>
19 #include <linux/profile.h>
20 #include <linux/sched.h>
21 #include <linux/tick.h>
23 #include "tick-internal.h"
26 * Broadcast support for broken x86 hardware, where the local apic
27 * timer stops in C3 state.
30 struct tick_device tick_broadcast_device;
31 static cpumask_t tick_broadcast_mask;
32 static DEFINE_SPINLOCK(tick_broadcast_lock);
35 * Start the device in periodic mode
37 static void tick_broadcast_start_periodic(struct clock_event_device *bc)
39 if (bc && bc->mode == CLOCK_EVT_MODE_SHUTDOWN)
40 tick_setup_periodic(bc, 1);
44 * Check, if the device can be utilized as broadcast device:
46 int tick_check_broadcast_device(struct clock_event_device *dev)
48 if (tick_broadcast_device.evtdev ||
49 (dev->features & CLOCK_EVT_FEAT_C3STOP))
52 clockevents_exchange_device(NULL, dev);
53 tick_broadcast_device.evtdev = dev;
54 if (!cpus_empty(tick_broadcast_mask))
55 tick_broadcast_start_periodic(dev);
60 * Check, if the device is the broadcast device
62 int tick_is_broadcast_device(struct clock_event_device *dev)
64 return (dev && tick_broadcast_device.evtdev == dev);
68 * Check, if the device is disfunctional and a place holder, which
69 * needs to be handled by the broadcast device.
71 int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
76 spin_lock_irqsave(&tick_broadcast_lock, flags);
79 * Devices might be registered with both periodic and oneshot
80 * mode disabled. This signals, that the device needs to be
81 * operated from the broadcast device and is a placeholder for
82 * the cpu local device.
84 if (!tick_device_is_functional(dev)) {
85 dev->event_handler = tick_handle_periodic;
86 cpu_set(cpu, tick_broadcast_mask);
87 tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
91 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
96 * Broadcast the event to the cpus, which are set in the mask
98 int tick_do_broadcast(cpumask_t mask)
100 int ret = 0, cpu = smp_processor_id();
101 struct tick_device *td;
104 * Check, if the current cpu is in the mask
106 if (cpu_isset(cpu, mask)) {
107 cpu_clear(cpu, mask);
108 td = &per_cpu(tick_cpu_device, cpu);
109 td->evtdev->event_handler(td->evtdev);
113 if (!cpus_empty(mask)) {
115 * It might be necessary to actually check whether the devices
116 * have different broadcast functions. For now, just use the
117 * one of the first device. This works as long as we have this
118 * misfeature only on x86 (lapic)
120 cpu = first_cpu(mask);
121 td = &per_cpu(tick_cpu_device, cpu);
122 td->evtdev->broadcast(mask);
129 * Periodic broadcast:
130 * - invoke the broadcast handlers
132 static void tick_do_periodic_broadcast(void)
136 spin_lock(&tick_broadcast_lock);
138 cpus_and(mask, cpu_online_map, tick_broadcast_mask);
139 tick_do_broadcast(mask);
141 spin_unlock(&tick_broadcast_lock);
145 * Event handler for periodic broadcast ticks
147 static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
149 dev->next_event.tv64 = KTIME_MAX;
151 tick_do_periodic_broadcast();
154 * The device is in periodic mode. No reprogramming necessary:
156 if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
160 * Setup the next period for devices, which do not have
164 ktime_t next = ktime_add(dev->next_event, tick_period);
166 if (!clockevents_program_event(dev, next, ktime_get()))
168 tick_do_periodic_broadcast();
173 * Powerstate information: The system enters/leaves a state, where
174 * affected devices might stop
176 static void tick_do_broadcast_on_off(void *why)
178 struct clock_event_device *bc, *dev;
179 struct tick_device *td;
180 unsigned long flags, *reason = why;
183 spin_lock_irqsave(&tick_broadcast_lock, flags);
185 cpu = smp_processor_id();
186 td = &per_cpu(tick_cpu_device, cpu);
188 bc = tick_broadcast_device.evtdev;
191 * Is the device in broadcast mode forever or is it not
192 * affected by the powerstate ?
194 if (!dev || !tick_device_is_functional(dev) ||
195 !(dev->features & CLOCK_EVT_FEAT_C3STOP))
198 if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_ON) {
199 if (!cpu_isset(cpu, tick_broadcast_mask)) {
200 cpu_set(cpu, tick_broadcast_mask);
201 if (td->mode == TICKDEV_MODE_PERIODIC)
202 clockevents_set_mode(dev,
203 CLOCK_EVT_MODE_SHUTDOWN);
206 if (cpu_isset(cpu, tick_broadcast_mask)) {
207 cpu_clear(cpu, tick_broadcast_mask);
208 if (td->mode == TICKDEV_MODE_PERIODIC)
209 tick_setup_periodic(dev, 0);
213 if (cpus_empty(tick_broadcast_mask))
214 clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
216 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
217 tick_broadcast_start_periodic(bc);
219 tick_broadcast_setup_oneshot(bc);
222 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
226 * Powerstate information: The system enters/leaves a state, where
227 * affected devices might stop.
229 void tick_broadcast_on_off(unsigned long reason, int *oncpu)
234 tick_do_broadcast_on_off(&reason);
236 smp_call_function_single(*oncpu, tick_do_broadcast_on_off,
242 * Set the periodic handler depending on broadcast on/off
244 void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
247 dev->event_handler = tick_handle_periodic;
249 dev->event_handler = tick_handle_periodic_broadcast;
253 * Remove a CPU from broadcasting
255 void tick_shutdown_broadcast(unsigned int *cpup)
257 struct clock_event_device *bc;
259 unsigned int cpu = *cpup;
261 spin_lock_irqsave(&tick_broadcast_lock, flags);
263 bc = tick_broadcast_device.evtdev;
264 cpu_clear(cpu, tick_broadcast_mask);
266 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
267 if (bc && cpus_empty(tick_broadcast_mask))
268 clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
271 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
274 #ifdef CONFIG_TICK_ONESHOT
276 static cpumask_t tick_broadcast_oneshot_mask;
278 static int tick_broadcast_set_event(ktime_t expires, int force)
280 struct clock_event_device *bc = tick_broadcast_device.evtdev;
281 ktime_t now = ktime_get();
285 res = clockevents_program_event(bc, expires, now);
289 expires = ktime_add(now, ktime_set(0, bc->min_delta_ns));
294 * Reprogram the broadcast device:
296 * Called with tick_broadcast_lock held and interrupts disabled.
298 static int tick_broadcast_reprogram(void)
300 ktime_t expires = { .tv64 = KTIME_MAX };
301 struct tick_device *td;
305 * Find the event which expires next:
307 for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS;
308 cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) {
309 td = &per_cpu(tick_cpu_device, cpu);
310 if (td->evtdev->next_event.tv64 < expires.tv64)
311 expires = td->evtdev->next_event;
314 if (expires.tv64 == KTIME_MAX)
317 return tick_broadcast_set_event(expires, 0);
321 * Handle oneshot mode broadcasting
323 static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
325 struct tick_device *td;
330 spin_lock(&tick_broadcast_lock);
332 dev->next_event.tv64 = KTIME_MAX;
333 mask = CPU_MASK_NONE;
335 /* Find all expired events */
336 for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS;
337 cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) {
338 td = &per_cpu(tick_cpu_device, cpu);
339 if (td->evtdev->next_event.tv64 <= now.tv64)
344 * Wakeup the cpus which have an expired event. The broadcast
345 * device is reprogrammed in the return from idle code.
347 if (!tick_do_broadcast(mask)) {
349 * The global event did not expire any CPU local
350 * events. This happens in dyntick mode, as the
351 * maximum PIT delta is quite small.
353 if (tick_broadcast_reprogram())
356 spin_unlock(&tick_broadcast_lock);
360 * Powerstate information: The system enters/leaves a state, where
361 * affected devices might stop
363 void tick_broadcast_oneshot_control(unsigned long reason)
365 struct clock_event_device *bc, *dev;
366 struct tick_device *td;
370 spin_lock_irqsave(&tick_broadcast_lock, flags);
373 * Periodic mode does not care about the enter/exit of power
376 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
379 bc = tick_broadcast_device.evtdev;
380 cpu = smp_processor_id();
381 td = &per_cpu(tick_cpu_device, cpu);
384 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
387 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
388 if (!cpu_isset(cpu, tick_broadcast_oneshot_mask)) {
389 cpu_set(cpu, tick_broadcast_oneshot_mask);
390 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
391 if (dev->next_event.tv64 < bc->next_event.tv64)
392 tick_broadcast_set_event(dev->next_event, 1);
395 if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) {
396 cpu_clear(cpu, tick_broadcast_oneshot_mask);
397 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
398 if (dev->next_event.tv64 != KTIME_MAX)
399 tick_program_event(dev->next_event, 1);
404 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
408 * tick_broadcast_setup_highres - setup the broadcast device for highres
410 void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
412 if (bc->mode != CLOCK_EVT_MODE_ONESHOT) {
413 bc->event_handler = tick_handle_oneshot_broadcast;
414 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
415 bc->next_event.tv64 = KTIME_MAX;
420 * Select oneshot operating mode for the broadcast device
422 void tick_broadcast_switch_to_oneshot(void)
424 struct clock_event_device *bc;
427 spin_lock_irqsave(&tick_broadcast_lock, flags);
429 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
430 bc = tick_broadcast_device.evtdev;
432 tick_broadcast_setup_oneshot(bc);
433 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
438 * Remove a dead CPU from broadcasting
440 void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
442 struct clock_event_device *bc;
444 unsigned int cpu = *cpup;
446 spin_lock_irqsave(&tick_broadcast_lock, flags);
448 bc = tick_broadcast_device.evtdev;
449 cpu_clear(cpu, tick_broadcast_oneshot_mask);
451 if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT) {
452 if (bc && cpus_empty(tick_broadcast_oneshot_mask))
453 clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
456 spin_unlock_irqrestore(&tick_broadcast_lock, flags);