#include <asm/setup.h>
#include <asm/io.h>
-extern struct notifier_block *panic_notifier_list;
+extern struct atomic_notifier_head panic_notifier_list;
static int alpha_panic_event(struct notifier_block *, unsigned long, void *);
static struct notifier_block alpha_panic_block = {
alpha_panic_event,
}
/* Register a call for panic conditions. */
- notifier_chain_register(&panic_notifier_list, &alpha_panic_block);
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &alpha_panic_block);
#ifdef CONFIG_ALPHA_GENERIC
/* Assume that we've booted from SRM if we haven't booted from MILO.
/* TODO: Setup front panel switch here */
/* Setup panic notifier */
- notifier_chain_register(&panic_notifier_list, &panic_block);
+ atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
return 0;
}
static int __init voiceblue_setup(void)
{
/* Setup panic notifier */
- notifier_chain_register(&panic_notifier_list, &panic_block);
+ atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
return 0;
}
asmlinkage void machine_check(void);
static int kstack_depth_to_print = 24;
-struct notifier_block *i386die_chain;
-static DEFINE_SPINLOCK(die_notifier_lock);
+ATOMIC_NOTIFIER_HEAD(i386die_chain);
int register_die_notifier(struct notifier_block *nb)
{
- int err = 0;
- unsigned long flags;
-
vmalloc_sync_all();
- spin_lock_irqsave(&die_notifier_lock, flags);
- err = notifier_chain_register(&i386die_chain, nb);
- spin_unlock_irqrestore(&die_notifier_lock, flags);
- return err;
+ return atomic_notifier_chain_register(&i386die_chain, nb);
}
EXPORT_SYMBOL(register_die_notifier);
+int unregister_die_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&i386die_chain, nb);
+}
+EXPORT_SYMBOL(unregister_die_notifier);
+
static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
{
return p > (void *)tinfo &&
fpswa_interface_t *fpswa_interface;
EXPORT_SYMBOL(fpswa_interface);
-struct notifier_block *ia64die_chain;
+ATOMIC_NOTIFIER_HEAD(ia64die_chain);
int
register_die_notifier(struct notifier_block *nb)
{
- return notifier_chain_register(&ia64die_chain, nb);
+ return atomic_notifier_chain_register(&ia64die_chain, nb);
}
EXPORT_SYMBOL_GPL(register_die_notifier);
int
unregister_die_notifier(struct notifier_block *nb)
{
- return notifier_chain_unregister(&ia64die_chain, nb);
+ return atomic_notifier_chain_unregister(&ia64die_chain, nb);
}
EXPORT_SYMBOL_GPL(unregister_die_notifier);
/* Set up panic notifier */
for (i = 0; i < sizeof(lasat_panic_block) / sizeof(struct notifier_block); i++)
- notifier_chain_register(&panic_notifier_list, &lasat_panic_block[i]);
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &lasat_panic_block[i]);
lasat_reboot_setup();
request_irq(SGI_PANEL_IRQ, panel_int, 0, "Front Panel", NULL);
init_timer(&blink_timer);
blink_timer.function = blink_timeout;
- notifier_chain_register(&panic_notifier_list, &panic_block);
+ atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
return 0;
}
init_timer(&blink_timer);
blink_timer.function = blink_timeout;
- notifier_chain_register(&panic_notifier_list, &panic_block);
+ atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
request_irq(MACEISA_RTC_IRQ, ip32_rtc_int, 0, "rtc", NULL);
if (handle) {
/* initialize panic notifier chain */
- notifier_chain_register(&panic_notifier_list, &pdc_chassis_panic_block);
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &pdc_chassis_panic_block);
/* initialize reboot notifier chain */
register_reboot_notifier(&pdc_chassis_reboot_block);
panic_timeout = 180;
if (ppc_md.panic)
- notifier_chain_register(&panic_notifier_list, &ppc64_panic_block);
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &ppc64_panic_block);
init_mm.start_code = PAGE_OFFSET;
init_mm.end_code = (unsigned long) _etext;
EXPORT_SYMBOL(__debugger_fault_handler);
#endif
-struct notifier_block *powerpc_die_chain;
-static DEFINE_SPINLOCK(die_notifier_lock);
+ATOMIC_NOTIFIER_HEAD(powerpc_die_chain);
int register_die_notifier(struct notifier_block *nb)
{
- int err = 0;
- unsigned long flags;
+ return atomic_notifier_chain_register(&powerpc_die_chain, nb);
+}
+EXPORT_SYMBOL(register_die_notifier);
- spin_lock_irqsave(&die_notifier_lock, flags);
- err = notifier_chain_register(&powerpc_die_chain, nb);
- spin_unlock_irqrestore(&die_notifier_lock, flags);
- return err;
+int unregister_die_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&powerpc_die_chain, nb);
}
+EXPORT_SYMBOL(unregister_die_notifier);
/*
* Trap & Exception support
return parent;
}
-static struct notifier_block *pSeries_reconfig_chain;
+static BLOCKING_NOTIFIER_HEAD(pSeries_reconfig_chain);
int pSeries_reconfig_notifier_register(struct notifier_block *nb)
{
- return notifier_chain_register(&pSeries_reconfig_chain, nb);
+ return blocking_notifier_chain_register(&pSeries_reconfig_chain, nb);
}
void pSeries_reconfig_notifier_unregister(struct notifier_block *nb)
{
- notifier_chain_unregister(&pSeries_reconfig_chain, nb);
+ blocking_notifier_chain_unregister(&pSeries_reconfig_chain, nb);
}
static int pSeries_reconfig_add_node(const char *path, struct property *proplist)
goto out_err;
}
- err = notifier_call_chain(&pSeries_reconfig_chain,
+ err = blocking_notifier_call_chain(&pSeries_reconfig_chain,
PSERIES_RECONFIG_ADD, np);
if (err == NOTIFY_BAD) {
printk(KERN_ERR "Failed to add device node %s\n", path);
remove_node_proc_entries(np);
- notifier_call_chain(&pSeries_reconfig_chain,
+ blocking_notifier_call_chain(&pSeries_reconfig_chain,
PSERIES_RECONFIG_REMOVE, np);
of_detach_node(np);
hex = 0xfff;
if (!notifier_installed) {
++notifier_installed;
- notifier_chain_register(&panic_notifier_list,
+ atomic_notifier_chain_register(&panic_notifier_list,
&ibm_statusled_block);
}
}
/*
* Need to know about CPUs going idle?
*/
-static struct notifier_block *idle_chain;
+static ATOMIC_NOTIFIER_HEAD(idle_chain);
int register_idle_notifier(struct notifier_block *nb)
{
- return notifier_chain_register(&idle_chain, nb);
+ return atomic_notifier_chain_register(&idle_chain, nb);
}
EXPORT_SYMBOL(register_idle_notifier);
int unregister_idle_notifier(struct notifier_block *nb)
{
- return notifier_chain_unregister(&idle_chain, nb);
+ return atomic_notifier_chain_unregister(&idle_chain, nb);
}
EXPORT_SYMBOL(unregister_idle_notifier);
/* disable monitor call class 0 */
__ctl_clear_bit(8, 15);
- notifier_call_chain(&idle_chain, CPU_NOT_IDLE,
+ atomic_notifier_call_chain(&idle_chain, CPU_NOT_IDLE,
(void *)(long) smp_processor_id());
}
return;
}
- rc = notifier_call_chain(&idle_chain, CPU_IDLE, (void *)(long) cpu);
+ rc = atomic_notifier_call_chain(&idle_chain,
+ CPU_IDLE, (void *)(long) cpu);
if (rc != NOTIFY_OK && rc != NOTIFY_DONE)
BUG();
if (rc != NOTIFY_OK) {
#include <linux/kmod.h>
#endif
-struct notifier_block *sparc64die_chain;
-static DEFINE_SPINLOCK(die_notifier_lock);
+ATOMIC_NOTIFIER_HEAD(sparc64die_chain);
int register_die_notifier(struct notifier_block *nb)
{
- int err = 0;
- unsigned long flags;
- spin_lock_irqsave(&die_notifier_lock, flags);
- err = notifier_chain_register(&sparc64die_chain, nb);
- spin_unlock_irqrestore(&die_notifier_lock, flags);
- return err;
+ return atomic_notifier_chain_register(&sparc64die_chain, nb);
}
+EXPORT_SYMBOL(register_die_notifier);
+
+int unregister_die_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&sparc64die_chain, nb);
+}
+EXPORT_SYMBOL(unregister_die_notifier);
/* When an irrecoverable trap occurs at tl > 0, the trap entry
* code logs the trap state registers at every level in the trap
static int add_notifier(void)
{
- notifier_chain_register(&panic_notifier_list, &panic_exit_notifier);
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &panic_exit_notifier);
return(0);
}
void __init setup_arch(char **cmdline_p)
{
- notifier_chain_register(&panic_notifier_list, &panic_exit_notifier);
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &panic_exit_notifier);
paging_init();
strlcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
void (*pm_idle)(void);
static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
-static struct notifier_block *idle_notifier;
-static DEFINE_SPINLOCK(idle_notifier_lock);
+static ATOMIC_NOTIFIER_HEAD(idle_notifier);
void idle_notifier_register(struct notifier_block *n)
{
- unsigned long flags;
- spin_lock_irqsave(&idle_notifier_lock, flags);
- notifier_chain_register(&idle_notifier, n);
- spin_unlock_irqrestore(&idle_notifier_lock, flags);
+ atomic_notifier_chain_register(&idle_notifier, n);
}
EXPORT_SYMBOL_GPL(idle_notifier_register);
void idle_notifier_unregister(struct notifier_block *n)
{
- unsigned long flags;
- spin_lock_irqsave(&idle_notifier_lock, flags);
- notifier_chain_unregister(&idle_notifier, n);
- spin_unlock_irqrestore(&idle_notifier_lock, flags);
+ atomic_notifier_chain_unregister(&idle_notifier, n);
}
EXPORT_SYMBOL(idle_notifier_unregister);
void enter_idle(void)
{
__get_cpu_var(idle_state) = CPU_IDLE;
- notifier_call_chain(&idle_notifier, IDLE_START, NULL);
+ atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
}
static void __exit_idle(void)
{
__get_cpu_var(idle_state) = CPU_NOT_IDLE;
- notifier_call_chain(&idle_notifier, IDLE_END, NULL);
+ atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
}
/* Called from interrupts to signify idle end */
asmlinkage void machine_check(void);
asmlinkage void spurious_interrupt_bug(void);
-struct notifier_block *die_chain;
-static DEFINE_SPINLOCK(die_notifier_lock);
+ATOMIC_NOTIFIER_HEAD(die_chain);
int register_die_notifier(struct notifier_block *nb)
{
- int err = 0;
- unsigned long flags;
-
vmalloc_sync_all();
- spin_lock_irqsave(&die_notifier_lock, flags);
- err = notifier_chain_register(&die_chain, nb);
- spin_unlock_irqrestore(&die_notifier_lock, flags);
- return err;
+ return atomic_notifier_chain_register(&die_chain, nb);
+}
+EXPORT_SYMBOL(register_die_notifier);
+
+int unregister_die_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&die_chain, nb);
}
+EXPORT_SYMBOL(unregister_die_notifier);
static inline void conditional_sti(struct pt_regs *regs)
{
void __init platform_setup(char **p_cmdline)
{
- notifier_chain_register(&panic_notifier_list, &iss_panic_block);
+ atomic_notifier_chain_register(&panic_notifier_list, &iss_panic_block);
}
.uevent = memory_uevent,
};
-static struct notifier_block *memory_chain;
+static BLOCKING_NOTIFIER_HEAD(memory_chain);
int register_memory_notifier(struct notifier_block *nb)
{
- return notifier_chain_register(&memory_chain, nb);
+ return blocking_notifier_chain_register(&memory_chain, nb);
}
void unregister_memory_notifier(struct notifier_block *nb)
{
- notifier_chain_unregister(&memory_chain, nb);
+ blocking_notifier_chain_unregister(&memory_chain, nb);
}
/*
static inline int memory_notify(unsigned long val, void *v)
{
- return notifier_call_chain(&memory_chain, val, v);
+ return blocking_notifier_call_chain(&memory_chain, val, v);
}
/*
ipmi_timer.expires = jiffies + IPMI_TIMEOUT_JIFFIES;
add_timer(&ipmi_timer);
- notifier_chain_register(&panic_notifier_list, &panic_block);
+ atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
initialized = 1;
if (!initialized)
return;
- notifier_chain_unregister(&panic_notifier_list, &panic_block);
+ atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
/* This can't be called if any interfaces exist, so no worry about
shutting down the interfaces. */
static int try_smi_init(struct smi_info *smi);
-static struct notifier_block *xaction_notifier_list;
+static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
static int register_xaction_notifier(struct notifier_block * nb)
{
- return notifier_chain_register(&xaction_notifier_list, nb);
+ return atomic_notifier_chain_register(&xaction_notifier_list, nb);
}
static void si_restart_short_timer(struct smi_info *smi_info);
do_gettimeofday(&t);
printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
#endif
- err = notifier_call_chain(&xaction_notifier_list, 0, smi_info);
+ err = atomic_notifier_call_chain(&xaction_notifier_list,
+ 0, smi_info);
if (err & NOTIFY_STOP_MASK) {
rv = SI_SM_CALL_WITHOUT_DELAY;
goto out;
}
register_reboot_notifier(&wdog_reboot_notifier);
- notifier_chain_register(&panic_notifier_list, &wdog_panic_notifier);
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &wdog_panic_notifier);
printk(KERN_INFO PFX "driver initialized\n");
release_nmi(&ipmi_nmi_handler);
#endif
- notifier_chain_unregister(&panic_notifier_list, &wdog_panic_notifier);
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &wdog_panic_notifier);
unregister_reboot_notifier(&wdog_reboot_notifier);
if (! watchdog_user)
* changes to devices when the CPU clock speed changes.
* The mutex locks both lists.
*/
-static struct notifier_block *cpufreq_policy_notifier_list;
-static struct notifier_block *cpufreq_transition_notifier_list;
-static DECLARE_RWSEM (cpufreq_notifier_rwsem);
+static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
+static BLOCKING_NOTIFIER_HEAD(cpufreq_transition_notifier_list);
static LIST_HEAD(cpufreq_governor_list);
dprintk("notification %u of frequency transition to %u kHz\n",
state, freqs->new);
- down_read(&cpufreq_notifier_rwsem);
-
policy = cpufreq_cpu_data[freqs->cpu];
switch (state) {
freqs->old = policy->cur;
}
}
- notifier_call_chain(&cpufreq_transition_notifier_list,
- CPUFREQ_PRECHANGE, freqs);
+ blocking_notifier_call_chain(&cpufreq_transition_notifier_list,
+ CPUFREQ_PRECHANGE, freqs);
adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
break;
case CPUFREQ_POSTCHANGE:
adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
- notifier_call_chain(&cpufreq_transition_notifier_list,
- CPUFREQ_POSTCHANGE, freqs);
+ blocking_notifier_call_chain(&cpufreq_transition_notifier_list,
+ CPUFREQ_POSTCHANGE, freqs);
if (likely(policy) && likely(policy->cpu == freqs->cpu))
policy->cur = freqs->new;
break;
}
- up_read(&cpufreq_notifier_rwsem);
}
EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
freqs.old = cpu_policy->cur;
freqs.new = cur_freq;
- notifier_call_chain(&cpufreq_transition_notifier_list,
+ blocking_notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_SUSPENDCHANGE, &freqs);
adjust_jiffies(CPUFREQ_SUSPENDCHANGE, &freqs);
freqs.old = cpu_policy->cur;
freqs.new = cur_freq;
- notifier_call_chain(&cpufreq_transition_notifier_list,
+ blocking_notifier_call_chain(
+ &cpufreq_transition_notifier_list,
CPUFREQ_RESUMECHANGE, &freqs);
adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs);
* changes in cpufreq policy.
*
* This function may sleep, and has the same return conditions as
- * notifier_chain_register.
+ * blocking_notifier_chain_register.
*/
int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
{
int ret;
- down_write(&cpufreq_notifier_rwsem);
switch (list) {
case CPUFREQ_TRANSITION_NOTIFIER:
- ret = notifier_chain_register(&cpufreq_transition_notifier_list, nb);
+ ret = blocking_notifier_chain_register(
+ &cpufreq_transition_notifier_list, nb);
break;
case CPUFREQ_POLICY_NOTIFIER:
- ret = notifier_chain_register(&cpufreq_policy_notifier_list, nb);
+ ret = blocking_notifier_chain_register(
+ &cpufreq_policy_notifier_list, nb);
break;
default:
ret = -EINVAL;
}
- up_write(&cpufreq_notifier_rwsem);
return ret;
}
* Remove a driver from the CPU frequency notifier list.
*
* This function may sleep, and has the same return conditions as
- * notifier_chain_unregister.
+ * blocking_notifier_chain_unregister.
*/
int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
{
int ret;
- down_write(&cpufreq_notifier_rwsem);
switch (list) {
case CPUFREQ_TRANSITION_NOTIFIER:
- ret = notifier_chain_unregister(&cpufreq_transition_notifier_list, nb);
+ ret = blocking_notifier_chain_unregister(
+ &cpufreq_transition_notifier_list, nb);
break;
case CPUFREQ_POLICY_NOTIFIER:
- ret = notifier_chain_unregister(&cpufreq_policy_notifier_list, nb);
+ ret = blocking_notifier_chain_unregister(
+ &cpufreq_policy_notifier_list, nb);
break;
default:
ret = -EINVAL;
}
- up_write(&cpufreq_notifier_rwsem);
return ret;
}
if (ret)
goto error_out;
- down_read(&cpufreq_notifier_rwsem);
-
/* adjust if necessary - all reasons */
- notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_ADJUST,
- policy);
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ CPUFREQ_ADJUST, policy);
/* adjust if necessary - hardware incompatibility*/
- notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_INCOMPATIBLE,
- policy);
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ CPUFREQ_INCOMPATIBLE, policy);
/* verify the cpu speed can be set within this limit,
which might be different to the first one */
ret = cpufreq_driver->verify(policy);
- if (ret) {
- up_read(&cpufreq_notifier_rwsem);
+ if (ret)
goto error_out;
- }
/* notification of the new policy */
- notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_NOTIFY,
- policy);
-
- up_read(&cpufreq_notifier_rwsem);
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ CPUFREQ_NOTIFY, policy);
data->min = policy->min;
data->max = policy->max;
static int dcdbas_reboot_notify(struct notifier_block *nb, unsigned long code,
void *unused)
{
- static unsigned int notify_cnt = 0;
-
switch (code) {
case SYS_DOWN:
case SYS_HALT:
case SYS_POWER_OFF:
if (host_control_on_shutdown) {
/* firmware is going to perform host control action */
- if (++notify_cnt == 2) {
- printk(KERN_WARNING
- "Please wait for shutdown "
- "action to complete...\n");
- dcdbas_host_control();
- }
- /*
- * register again and initiate the host control
- * action on the second notification to allow
- * everyone that registered to be notified
- */
- register_reboot_notifier(nb);
+ printk(KERN_WARNING "Please wait for shutdown "
+ "action to complete...\n");
+ dcdbas_host_control();
}
break;
}
static struct notifier_block dcdbas_reboot_nb = {
.notifier_call = dcdbas_reboot_notify,
.next = NULL,
- .priority = 0
+ .priority = INT_MIN
};
static DCDBAS_BIN_ATTR_RW(smi_data);
static struct class *adb_dev_class;
struct adb_driver *adb_controller;
-struct notifier_block *adb_client_list = NULL;
+BLOCKING_NOTIFIER_HEAD(adb_client_list);
static int adb_got_sleep;
static int adb_inited;
static pid_t adb_probe_task_pid;
/* Stop autopoll */
if (adb_controller->autopoll)
adb_controller->autopoll(0);
- ret = notifier_call_chain(&adb_client_list, ADB_MSG_POWERDOWN, NULL);
+ ret = blocking_notifier_call_chain(&adb_client_list,
+ ADB_MSG_POWERDOWN, NULL);
if (ret & NOTIFY_STOP_MASK) {
up(&adb_probe_mutex);
return PBOOK_SLEEP_REFUSE;
if (adb_controller->autopoll)
adb_controller->autopoll(0);
- nret = notifier_call_chain(&adb_client_list, ADB_MSG_PRE_RESET, NULL);
+ nret = blocking_notifier_call_chain(&adb_client_list,
+ ADB_MSG_PRE_RESET, NULL);
if (nret & NOTIFY_STOP_MASK) {
if (adb_controller->autopoll)
adb_controller->autopoll(autopoll_devs);
}
up(&adb_handler_sem);
- nret = notifier_call_chain(&adb_client_list, ADB_MSG_POST_RESET, NULL);
+ nret = blocking_notifier_call_chain(&adb_client_list,
+ ADB_MSG_POST_RESET, NULL);
if (nret & NOTIFY_STOP_MASK)
return -EBUSY;
adbhid_probe();
- notifier_chain_register(&adb_client_list, &adbhid_adb_notifier);
+ blocking_notifier_chain_register(&adb_client_list,
+ &adbhid_adb_notifier);
return 0;
}
int __fake_sleep;
int asleep;
-struct notifier_block *sleep_notifier_list;
+BLOCKING_NOTIFIER_HEAD(sleep_notifier_list);
#ifdef CONFIG_ADB
static int adb_dev_map = 0;
static int pmu_fully_inited = 0;
int asleep;
-struct notifier_block *sleep_notifier_list;
+BLOCKING_NOTIFIER_HEAD(sleep_notifier_list);
static int pmu_probe(void);
static int pmu_init(void);
struct adb_request sleep_req;
/* Notify device drivers */
- ret = notifier_call_chain(&sleep_notifier_list, PBOOK_SLEEP, NULL);
+ ret = blocking_notifier_call_chain(&sleep_notifier_list,
+ PBOOK_SLEEP, NULL);
if (ret & NOTIFY_STOP_MASK)
return -EBUSY;
enable_irq(i);
/* Notify drivers */
- notifier_call_chain(&sleep_notifier_list, PBOOK_WAKE, NULL);
+ blocking_notifier_call_chain(&sleep_notifier_list, PBOOK_WAKE, NULL);
/* reenable ADB autopoll */
pmu_adb_autopoll(adb_dev_map);
static LIST_HEAD(wf_controls);
static LIST_HEAD(wf_sensors);
static DEFINE_MUTEX(wf_lock);
-static struct notifier_block *wf_client_list;
+static BLOCKING_NOTIFIER_HEAD(wf_client_list);
static int wf_client_count;
static unsigned int wf_overtemp;
static unsigned int wf_overtemp_counter;
static inline void wf_notify(int event, void *param)
{
- notifier_call_chain(&wf_client_list, event, param);
+ blocking_notifier_call_chain(&wf_client_list, event, param);
}
int wf_critical_overtemp(void)
struct wf_sensor *sr;
mutex_lock(&wf_lock);
- rc = notifier_chain_register(&wf_client_list, nb);
+ rc = blocking_notifier_chain_register(&wf_client_list, nb);
if (rc != 0)
goto bail;
wf_client_count++;
int wf_unregister_client(struct notifier_block *nb)
{
mutex_lock(&wf_lock);
- notifier_chain_unregister(&wf_client_list, nb);
+ blocking_notifier_chain_unregister(&wf_client_list, nb);
wf_client_count++;
if (wf_client_count == 0)
wf_stop_thread();
void ibmasm_register_panic_notifier(void)
{
- notifier_chain_register(&panic_notifier_list, &panic_notifier);
+ atomic_notifier_chain_register(&panic_notifier_list, &panic_notifier);
}
void ibmasm_unregister_panic_notifier(void)
{
- notifier_chain_unregister(&panic_notifier_list, &panic_notifier);
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &panic_notifier);
}
* bond_netdev_event: handle netdev notifier chain events.
*
* This function receives events for the netdev chain. The caller (an
- * ioctl handler calling notifier_call_chain) holds the necessary
+ * ioctl handler calling blocking_notifier_call_chain) holds the necessary
* locks for us to safely manipulate the slave devices (RTNL lock,
* dev_probe_lock).
*/
static struct notifier_block led_notifier = {
.notifier_call = led_halt,
};
+static int notifier_disabled = 0;
static int led_halt(struct notifier_block *nb, unsigned long event, void *buf)
{
char *txt;
-
+
+ if (notifier_disabled)
+ return NOTIFY_OK;
+
+ notifier_disabled = 1;
switch (event) {
case SYS_RESTART: txt = "SYSTEM RESTART";
break;
if (led_func_ptr)
led_func_ptr(0xff); /* turn all LEDs ON */
- unregister_reboot_notifier(&led_notifier);
return NOTIFY_OK;
}
return 1;
}
+static void __exit led_exit(void)
+{
+ unregister_reboot_notifier(&led_notifier);
+ return;
+}
+
#ifdef CONFIG_PROC_FS
module_init(led_create_procfs)
#endif
}
/* Register a call for panic conditions. */
- notifier_chain_register(&panic_notifier_list, &parisc_panic_block);
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &parisc_panic_block);
tasklet_enable(&power_tasklet);
return;
tasklet_disable(&power_tasklet);
- notifier_chain_unregister(&panic_notifier_list, &parisc_panic_block);
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &parisc_panic_block);
power_tasklet.func = NULL;
pdc_soft_power_button(0);
}
static struct notifier_block gdth_notifier = {
gdth_halt, NULL, 0
};
-
+static int notifier_disabled = 0;
static void gdth_delay(int milliseconds)
{
add_timer(&gdth_timer);
#endif
major = register_chrdev(0,"gdth",&gdth_fops);
+ notifier_disabled = 0;
register_reboot_notifier(&gdth_notifier);
}
gdth_polling = FALSE;
return gdth_ctr_vcount;
}
-
static int gdth_release(struct Scsi_Host *shp)
{
int hanum;
char cmnd[MAX_COMMAND_SIZE];
#endif
+ if (notifier_disabled)
+ return NOTIFY_OK;
+
TRACE2(("gdth_halt() event %d\n",(int)event));
if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
return NOTIFY_DONE;
+ notifier_disabled = 1;
printk("GDT-HA: Flushing all host drives .. ");
for (hanum = 0; hanum < gdth_ctr_count; ++hanum) {
gdth_flush(hanum);
#ifdef GDTH_STATISTICS
del_timer(&gdth_timer);
#endif
- unregister_reboot_notifier(&gdth_notifier);
return NOTIFY_OK;
}
#include <linux/mutex.h>
#include "usb.h"
-
-static struct notifier_block *usb_notifier_list;
-static DEFINE_MUTEX(usb_notifier_lock);
-
-static void usb_notifier_chain_register(struct notifier_block **list,
- struct notifier_block *n)
-{
- mutex_lock(&usb_notifier_lock);
- while (*list) {
- if (n->priority > (*list)->priority)
- break;
- list = &((*list)->next);
- }
- n->next = *list;
- *list = n;
- mutex_unlock(&usb_notifier_lock);
-}
-
-static void usb_notifier_chain_unregister(struct notifier_block **nl,
- struct notifier_block *n)
-{
- mutex_lock(&usb_notifier_lock);
- while ((*nl)!=NULL) {
- if ((*nl)==n) {
- *nl = n->next;
- goto exit;
- }
- nl=&((*nl)->next);
- }
-exit:
- mutex_unlock(&usb_notifier_lock);
-}
-
-static int usb_notifier_call_chain(struct notifier_block **n,
- unsigned long val, void *v)
-{
- int ret=NOTIFY_DONE;
- struct notifier_block *nb = *n;
-
- mutex_lock(&usb_notifier_lock);
- while (nb) {
- ret = nb->notifier_call(nb,val,v);
- if (ret&NOTIFY_STOP_MASK) {
- goto exit;
- }
- nb = nb->next;
- }
-exit:
- mutex_unlock(&usb_notifier_lock);
- return ret;
-}
+static BLOCKING_NOTIFIER_HEAD(usb_notifier_list);
/**
* usb_register_notify - register a notifier callback whenever a usb change happens
*/
void usb_register_notify(struct notifier_block *nb)
{
- usb_notifier_chain_register(&usb_notifier_list, nb);
+ blocking_notifier_chain_register(&usb_notifier_list, nb);
}
EXPORT_SYMBOL_GPL(usb_register_notify);
*/
void usb_unregister_notify(struct notifier_block *nb)
{
- usb_notifier_chain_unregister(&usb_notifier_list, nb);
+ blocking_notifier_chain_unregister(&usb_notifier_list, nb);
}
EXPORT_SYMBOL_GPL(usb_unregister_notify);
void usb_notify_add_device(struct usb_device *udev)
{
- usb_notifier_call_chain(&usb_notifier_list, USB_DEVICE_ADD, udev);
+ blocking_notifier_call_chain(&usb_notifier_list, USB_DEVICE_ADD, udev);
}
void usb_notify_remove_device(struct usb_device *udev)
{
- usb_notifier_call_chain(&usb_notifier_list, USB_DEVICE_REMOVE, udev);
+ blocking_notifier_call_chain(&usb_notifier_list,
+ USB_DEVICE_REMOVE, udev);
}
void usb_notify_add_bus(struct usb_bus *ubus)
{
- usb_notifier_call_chain(&usb_notifier_list, USB_BUS_ADD, ubus);
+ blocking_notifier_call_chain(&usb_notifier_list, USB_BUS_ADD, ubus);
}
void usb_notify_remove_bus(struct usb_bus *ubus)
{
- usb_notifier_call_chain(&usb_notifier_list, USB_BUS_REMOVE, ubus);
+ blocking_notifier_call_chain(&usb_notifier_list, USB_BUS_REMOVE, ubus);
}
#define FBPIXMAPSIZE (1024 * 8)
-static struct notifier_block *fb_notifier_list;
+static BLOCKING_NOTIFIER_HEAD(fb_notifier_list);
struct fb_info *registered_fb[FB_MAX];
int num_registered_fb;
event.info = info;
event.data = &mode1;
- ret = notifier_call_chain(&fb_notifier_list,
+ ret = blocking_notifier_call_chain(&fb_notifier_list,
FB_EVENT_MODE_DELETE, &event);
}
info->flags &= ~FBINFO_MISC_USEREVENT;
event.info = info;
- notifier_call_chain(&fb_notifier_list, evnt,
- &event);
+ blocking_notifier_call_chain(&fb_notifier_list,
+ evnt, &event);
}
}
}
event.info = info;
event.data = ␣
- notifier_call_chain(&fb_notifier_list, FB_EVENT_BLANK, &event);
+ blocking_notifier_call_chain(&fb_notifier_list,
+ FB_EVENT_BLANK, &event);
}
return ret;
con2fb.framebuffer = -1;
event.info = info;
event.data = &con2fb;
- notifier_call_chain(&fb_notifier_list,
+ blocking_notifier_call_chain(&fb_notifier_list,
FB_EVENT_GET_CONSOLE_MAP, &event);
return copy_to_user(argp, &con2fb,
sizeof(con2fb)) ? -EFAULT : 0;
return -EINVAL;
event.info = info;
event.data = &con2fb;
- return notifier_call_chain(&fb_notifier_list,
+ return blocking_notifier_call_chain(&fb_notifier_list,
FB_EVENT_SET_CONSOLE_MAP,
&event);
case FBIOBLANK:
devfs_mk_cdev(MKDEV(FB_MAJOR, i),
S_IFCHR | S_IRUGO | S_IWUGO, "fb/%d", i);
event.info = fb_info;
- notifier_call_chain(&fb_notifier_list,
+ blocking_notifier_call_chain(&fb_notifier_list,
FB_EVENT_FB_REGISTERED, &event);
return 0;
}
*/
int fb_register_client(struct notifier_block *nb)
{
- return notifier_chain_register(&fb_notifier_list, nb);
+ return blocking_notifier_chain_register(&fb_notifier_list, nb);
}
/**
*/
int fb_unregister_client(struct notifier_block *nb)
{
- return notifier_chain_unregister(&fb_notifier_list, nb);
+ return blocking_notifier_chain_unregister(&fb_notifier_list, nb);
}
/**
event.info = info;
if (state) {
- notifier_call_chain(&fb_notifier_list, FB_EVENT_SUSPEND, &event);
+ blocking_notifier_call_chain(&fb_notifier_list,
+ FB_EVENT_SUSPEND, &event);
info->state = FBINFO_STATE_SUSPENDED;
} else {
info->state = FBINFO_STATE_RUNNING;
- notifier_call_chain(&fb_notifier_list, FB_EVENT_RESUME, &event);
+ blocking_notifier_call_chain(&fb_notifier_list,
+ FB_EVENT_RESUME, &event);
}
}
if (!list_empty(&info->modelist)) {
event.info = info;
- err = notifier_call_chain(&fb_notifier_list,
+ err = blocking_notifier_call_chain(&fb_notifier_list,
FB_EVENT_NEW_MODELIST,
&event);
}
evnt.info = info;
evnt.data = data;
- return notifier_call_chain(&fb_notifier_list, event, &evnt);
+ return blocking_notifier_call_chain(&fb_notifier_list, event, &evnt);
}
EXPORT_SYMBOL(fb_con_duit);
int signr;
};
-/* Note - you should never unregister because that can race with NMIs.
- If you really want to do it first unregister - then synchronize_sched - then free.
- */
-int register_die_notifier(struct notifier_block *nb);
-extern struct notifier_block *i386die_chain;
+extern int register_die_notifier(struct notifier_block *);
+extern int unregister_die_notifier(struct notifier_block *);
+extern struct atomic_notifier_head i386die_chain;
/* Grossly misnamed. */
.trapnr = trap,
.signr = sig
};
- return notifier_call_chain(&i386die_chain, val, &args);
+ return atomic_notifier_call_chain(&i386die_chain, val, &args);
}
#endif
extern int register_die_notifier(struct notifier_block *);
extern int unregister_die_notifier(struct notifier_block *);
-extern struct notifier_block *ia64die_chain;
+extern struct atomic_notifier_head ia64die_chain;
enum die_val {
DIE_BREAK = 1,
.signr = sig
};
- return notifier_call_chain(&ia64die_chain, val, &args);
+ return atomic_notifier_call_chain(&ia64die_chain, val, &args);
}
#endif
int signr;
};
-/*
- Note - you should never unregister because that can race with NMIs.
- If you really want to do it first unregister - then synchronize_sched -
- then free.
- */
-int register_die_notifier(struct notifier_block *nb);
-extern struct notifier_block *powerpc_die_chain;
+extern int register_die_notifier(struct notifier_block *);
+extern int unregister_die_notifier(struct notifier_block *);
+extern struct atomic_notifier_head powerpc_die_chain;
/* Grossly misnamed. */
enum die_val {
static inline int notify_die(enum die_val val,char *str,struct pt_regs *regs,long err,int trap, int sig)
{
struct die_args args = { .regs=regs, .str=str, .err=err, .trapnr=trap,.signr=sig };
- return notifier_call_chain(&powerpc_die_chain, val, &args);
+ return atomic_notifier_call_chain(&powerpc_die_chain, val, &args);
}
#endif /* __KERNEL__ */
int signr;
};
-/* Note - you should never unregister because that can race with NMIs.
- * If you really want to do it first unregister - then synchronize_sched
- * - then free.
- */
-int register_die_notifier(struct notifier_block *nb);
-extern struct notifier_block *sparc64die_chain;
+extern int register_die_notifier(struct notifier_block *);
+extern int unregister_die_notifier(struct notifier_block *);
+extern struct atomic_notifier_head sparc64die_chain;
extern void bad_trap(struct pt_regs *, long);
.trapnr = trap,
.signr = sig };
- return notifier_call_chain(&sparc64die_chain, val, &args);
+ return atomic_notifier_call_chain(&sparc64die_chain, val, &args);
}
#endif
struct pt_regs;
-struct die_args {
+struct die_args {
struct pt_regs *regs;
const char *str;
- long err;
+ long err;
int trapnr;
int signr;
-};
+};
+
+extern int register_die_notifier(struct notifier_block *);
+extern int unregister_die_notifier(struct notifier_block *);
+extern struct atomic_notifier_head die_chain;
-/* Note - you should never unregister because that can race with NMIs.
- If you really want to do it first unregister - then synchronize_sched - then free.
- */
-int register_die_notifier(struct notifier_block *nb);
-extern struct notifier_block *die_chain;
/* Grossly misnamed. */
-enum die_val {
+enum die_val {
DIE_OOPS = 1,
DIE_INT3,
DIE_DEBUG,
DIE_CALL,
DIE_NMI_IPI,
DIE_PAGE_FAULT,
-};
-
+};
+
static inline int notify_die(enum die_val val, const char *str,
struct pt_regs *regs, long err, int trap, int sig)
{
.trapnr = trap,
.signr = sig
};
- return notifier_call_chain(&die_chain, val, &args);
+ return atomic_notifier_call_chain(&die_chain, val, &args);
}
extern int printk_address(unsigned long address);
ADB_MSG_POST_RESET /* Called after resetting the bus (re-do init & register) */
};
extern struct adb_driver *adb_controller;
-extern struct notifier_block *adb_client_list;
+extern struct blocking_notifier_head adb_client_list;
int adb_request(struct adb_request *req, void (*done)(struct adb_request *),
int flags, int nbytes, ...);
(__x < 0) ? -__x : __x; \
})
-extern struct notifier_block *panic_notifier_list;
+extern struct atomic_notifier_head panic_notifier_list;
extern long (*panic_blink)(long time);
NORET_TYPE void panic(const char * fmt, ...)
__attribute__ ((NORET_AND format (printf, 1, 2)));
#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT)
-struct notifier_block;
#endif /* CONFIG_MEMORY_HOTPLUG */
#define CONNTRACK_ECACHE(x) (__get_cpu_var(ip_conntrack_ecache).x)
-extern struct notifier_block *ip_conntrack_chain;
-extern struct notifier_block *ip_conntrack_expect_chain;
+extern struct atomic_notifier_head ip_conntrack_chain;
+extern struct atomic_notifier_head ip_conntrack_expect_chain;
static inline int ip_conntrack_register_notifier(struct notifier_block *nb)
{
- return notifier_chain_register(&ip_conntrack_chain, nb);
+ return atomic_notifier_chain_register(&ip_conntrack_chain, nb);
}
static inline int ip_conntrack_unregister_notifier(struct notifier_block *nb)
{
- return notifier_chain_unregister(&ip_conntrack_chain, nb);
+ return atomic_notifier_chain_unregister(&ip_conntrack_chain, nb);
}
static inline int
ip_conntrack_expect_register_notifier(struct notifier_block *nb)
{
- return notifier_chain_register(&ip_conntrack_expect_chain, nb);
+ return atomic_notifier_chain_register(&ip_conntrack_expect_chain, nb);
}
static inline int
ip_conntrack_expect_unregister_notifier(struct notifier_block *nb)
{
- return notifier_chain_unregister(&ip_conntrack_expect_chain, nb);
+ return atomic_notifier_chain_unregister(&ip_conntrack_expect_chain,
+ nb);
}
extern void ip_ct_deliver_cached_events(const struct ip_conntrack *ct);
struct ip_conntrack *ct)
{
if (is_confirmed(ct) && !is_dying(ct))
- notifier_call_chain(&ip_conntrack_chain, event, ct);
+ atomic_notifier_call_chain(&ip_conntrack_chain, event, ct);
}
static inline void
ip_conntrack_expect_event(enum ip_conntrack_expect_events event,
struct ip_conntrack_expect *exp)
{
- notifier_call_chain(&ip_conntrack_expect_chain, event, exp);
+ atomic_notifier_call_chain(&ip_conntrack_expect_chain, event, exp);
}
#else /* CONFIG_IP_NF_CONNTRACK_EVENTS */
static inline void ip_conntrack_event_cache(enum ip_conntrack_events event,
#ifndef _LINUX_NOTIFIER_H
#define _LINUX_NOTIFIER_H
#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
-struct notifier_block
-{
- int (*notifier_call)(struct notifier_block *self, unsigned long, void *);
+/*
+ * Notifier chains are of three types:
+ *
+ * Atomic notifier chains: Chain callbacks run in interrupt/atomic
+ * context. Callouts are not allowed to block.
+ * Blocking notifier chains: Chain callbacks run in process context.
+ * Callouts are allowed to block.
+ * Raw notifier chains: There are no restrictions on callbacks,
+ * registration, or unregistration. All locking and protection
+ * must be provided by the caller.
+ *
+ * atomic_notifier_chain_register() may be called from an atomic context,
+ * but blocking_notifier_chain_register() must be called from a process
+ * context. Ditto for the corresponding _unregister() routines.
+ *
+ * atomic_notifier_chain_unregister() and blocking_notifier_chain_unregister()
+ * _must not_ be called from within the call chain.
+ */
+
+struct notifier_block {
+ int (*notifier_call)(struct notifier_block *, unsigned long, void *);
struct notifier_block *next;
int priority;
};
+struct atomic_notifier_head {
+ spinlock_t lock;
+ struct notifier_block *head;
+};
+
+struct blocking_notifier_head {
+ struct rw_semaphore rwsem;
+ struct notifier_block *head;
+};
+
+struct raw_notifier_head {
+ struct notifier_block *head;
+};
+
+#define ATOMIC_INIT_NOTIFIER_HEAD(name) do { \
+ spin_lock_init(&(name)->lock); \
+ (name)->head = NULL; \
+ } while (0)
+#define BLOCKING_INIT_NOTIFIER_HEAD(name) do { \
+ init_rwsem(&(name)->rwsem); \
+ (name)->head = NULL; \
+ } while (0)
+#define RAW_INIT_NOTIFIER_HEAD(name) do { \
+ (name)->head = NULL; \
+ } while (0)
+
+#define ATOMIC_NOTIFIER_INIT(name) { \
+ .lock = SPIN_LOCK_UNLOCKED, \
+ .head = NULL }
+#define BLOCKING_NOTIFIER_INIT(name) { \
+ .rwsem = __RWSEM_INITIALIZER((name).rwsem), \
+ .head = NULL }
+#define RAW_NOTIFIER_INIT(name) { \
+ .head = NULL }
+
+#define ATOMIC_NOTIFIER_HEAD(name) \
+ struct atomic_notifier_head name = \
+ ATOMIC_NOTIFIER_INIT(name)
+#define BLOCKING_NOTIFIER_HEAD(name) \
+ struct blocking_notifier_head name = \
+ BLOCKING_NOTIFIER_INIT(name)
+#define RAW_NOTIFIER_HEAD(name) \
+ struct raw_notifier_head name = \
+ RAW_NOTIFIER_INIT(name)
#ifdef __KERNEL__
-extern int notifier_chain_register(struct notifier_block **list, struct notifier_block *n);
-extern int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n);
-extern int notifier_call_chain(struct notifier_block **n, unsigned long val, void *v);
+extern int atomic_notifier_chain_register(struct atomic_notifier_head *,
+ struct notifier_block *);
+extern int blocking_notifier_chain_register(struct blocking_notifier_head *,
+ struct notifier_block *);
+extern int raw_notifier_chain_register(struct raw_notifier_head *,
+ struct notifier_block *);
+
+extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *,
+ struct notifier_block *);
+extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *,
+ struct notifier_block *);
+extern int raw_notifier_chain_unregister(struct raw_notifier_head *,
+ struct notifier_block *);
+
+extern int atomic_notifier_call_chain(struct atomic_notifier_head *,
+ unsigned long val, void *v);
+extern int blocking_notifier_call_chain(struct blocking_notifier_head *,
+ unsigned long val, void *v);
+extern int raw_notifier_call_chain(struct raw_notifier_head *,
+ unsigned long val, void *v);
#define NOTIFY_DONE 0x0000 /* Don't care */
#define NOTIFY_OK 0x0001 /* Suits me */
#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */
-#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002) /* Bad/Veto action */
+#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002)
+ /* Bad/Veto action */
/*
* Clean way to return from the notifier and stop further calls.
*/
#define CONNTRACK_ECACHE(x) (__get_cpu_var(nf_conntrack_ecache).x)
-extern struct notifier_block *nf_conntrack_chain;
-extern struct notifier_block *nf_conntrack_expect_chain;
+extern struct atomic_notifier_head nf_conntrack_chain;
+extern struct atomic_notifier_head nf_conntrack_expect_chain;
static inline int nf_conntrack_register_notifier(struct notifier_block *nb)
{
- return notifier_chain_register(&nf_conntrack_chain, nb);
+ return atomic_notifier_chain_register(&nf_conntrack_chain, nb);
}
static inline int nf_conntrack_unregister_notifier(struct notifier_block *nb)
{
- return notifier_chain_unregister(&nf_conntrack_chain, nb);
+ return atomic_notifier_chain_unregister(&nf_conntrack_chain, nb);
}
static inline int
nf_conntrack_expect_register_notifier(struct notifier_block *nb)
{
- return notifier_chain_register(&nf_conntrack_expect_chain, nb);
+ return atomic_notifier_chain_register(&nf_conntrack_expect_chain, nb);
}
static inline int
nf_conntrack_expect_unregister_notifier(struct notifier_block *nb)
{
- return notifier_chain_unregister(&nf_conntrack_expect_chain, nb);
+ return atomic_notifier_chain_unregister(&nf_conntrack_expect_chain,
+ nb);
}
extern void nf_ct_deliver_cached_events(const struct nf_conn *ct);
struct nf_conn *ct)
{
if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct))
- notifier_call_chain(&nf_conntrack_chain, event, ct);
+ atomic_notifier_call_chain(&nf_conntrack_chain, event, ct);
}
static inline void
nf_conntrack_expect_event(enum ip_conntrack_expect_events event,
struct nf_conntrack_expect *exp)
{
- notifier_call_chain(&nf_conntrack_expect_chain, event, exp);
+ atomic_notifier_call_chain(&nf_conntrack_expect_chain, event, exp);
}
#else /* CONFIG_NF_CONNTRACK_EVENTS */
static inline void nf_conntrack_event_cache(enum ip_conntrack_events event,
/* This protects CPUs going up and down... */
static DECLARE_MUTEX(cpucontrol);
-static struct notifier_block *cpu_chain;
+static BLOCKING_NOTIFIER_HEAD(cpu_chain);
#ifdef CONFIG_HOTPLUG_CPU
static struct task_struct *lock_cpu_hotplug_owner;
/* Need to know about CPUs going up/down? */
int register_cpu_notifier(struct notifier_block *nb)
{
- int ret;
-
- if ((ret = lock_cpu_hotplug_interruptible()) != 0)
- return ret;
- ret = notifier_chain_register(&cpu_chain, nb);
- unlock_cpu_hotplug();
- return ret;
+ return blocking_notifier_chain_register(&cpu_chain, nb);
}
EXPORT_SYMBOL(register_cpu_notifier);
void unregister_cpu_notifier(struct notifier_block *nb)
{
- lock_cpu_hotplug();
- notifier_chain_unregister(&cpu_chain, nb);
- unlock_cpu_hotplug();
+ blocking_notifier_chain_unregister(&cpu_chain, nb);
}
EXPORT_SYMBOL(unregister_cpu_notifier);
goto out;
}
- err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE,
+ err = blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE,
(void *)(long)cpu);
if (err == NOTIFY_BAD) {
printk("%s: attempt to take down CPU %u failed\n",
p = __stop_machine_run(take_cpu_down, NULL, cpu);
if (IS_ERR(p)) {
/* CPU didn't die: tell everyone. Can't complain. */
- if (notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED,
+ if (blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED,
(void *)(long)cpu) == NOTIFY_BAD)
BUG();
put_cpu();
/* CPU is completely dead: tell everyone. Too late to complain. */
- if (notifier_call_chain(&cpu_chain, CPU_DEAD, (void *)(long)cpu)
- == NOTIFY_BAD)
+ if (blocking_notifier_call_chain(&cpu_chain, CPU_DEAD,
+ (void *)(long)cpu) == NOTIFY_BAD)
BUG();
check_for_tasks(cpu);
goto out;
}
- ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu);
+ ret = blocking_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu);
if (ret == NOTIFY_BAD) {
printk("%s: attempt to bring up CPU %u failed\n",
__FUNCTION__, cpu);
BUG_ON(!cpu_online(cpu));
/* Now call notifier in preparation. */
- notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu);
+ blocking_notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu);
out_notify:
if (ret != 0)
- notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu);
+ blocking_notifier_call_chain(&cpu_chain,
+ CPU_UP_CANCELED, hcpu);
out:
unlock_cpu_hotplug();
return ret;
static DEFINE_MUTEX(module_mutex);
static LIST_HEAD(modules);
-static DEFINE_MUTEX(notify_mutex);
-static struct notifier_block * module_notify_list;
+static BLOCKING_NOTIFIER_HEAD(module_notify_list);
int register_module_notifier(struct notifier_block * nb)
{
- int err;
- mutex_lock(¬ify_mutex);
- err = notifier_chain_register(&module_notify_list, nb);
- mutex_unlock(¬ify_mutex);
- return err;
+ return blocking_notifier_chain_register(&module_notify_list, nb);
}
EXPORT_SYMBOL(register_module_notifier);
int unregister_module_notifier(struct notifier_block * nb)
{
- int err;
- mutex_lock(¬ify_mutex);
- err = notifier_chain_unregister(&module_notify_list, nb);
- mutex_unlock(¬ify_mutex);
- return err;
+ return blocking_notifier_chain_unregister(&module_notify_list, nb);
}
EXPORT_SYMBOL(unregister_module_notifier);
/* Drop lock so they can recurse */
mutex_unlock(&module_mutex);
- mutex_lock(¬ify_mutex);
- notifier_call_chain(&module_notify_list, MODULE_STATE_COMING, mod);
- mutex_unlock(¬ify_mutex);
+ blocking_notifier_call_chain(&module_notify_list,
+ MODULE_STATE_COMING, mod);
/* Start the module */
if (mod->init != NULL)
int panic_timeout;
EXPORT_SYMBOL(panic_timeout);
-struct notifier_block *panic_notifier_list;
+ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
EXPORT_SYMBOL(panic_notifier_list);
smp_send_stop();
#endif
- notifier_call_chain(&panic_notifier_list, 0, buf);
+ atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
if (!panic_blink)
panic_blink = no_blink;
#ifdef CONFIG_PROFILING
-static DECLARE_RWSEM(profile_rwsem);
-static DEFINE_RWLOCK(handoff_lock);
-static struct notifier_block * task_exit_notifier;
-static struct notifier_block * task_free_notifier;
-static struct notifier_block * munmap_notifier;
+static BLOCKING_NOTIFIER_HEAD(task_exit_notifier);
+static ATOMIC_NOTIFIER_HEAD(task_free_notifier);
+static BLOCKING_NOTIFIER_HEAD(munmap_notifier);
void profile_task_exit(struct task_struct * task)
{
- down_read(&profile_rwsem);
- notifier_call_chain(&task_exit_notifier, 0, task);
- up_read(&profile_rwsem);
+ blocking_notifier_call_chain(&task_exit_notifier, 0, task);
}
int profile_handoff_task(struct task_struct * task)
{
int ret;
- read_lock(&handoff_lock);
- ret = notifier_call_chain(&task_free_notifier, 0, task);
- read_unlock(&handoff_lock);
+ ret = atomic_notifier_call_chain(&task_free_notifier, 0, task);
return (ret == NOTIFY_OK) ? 1 : 0;
}
void profile_munmap(unsigned long addr)
{
- down_read(&profile_rwsem);
- notifier_call_chain(&munmap_notifier, 0, (void *)addr);
- up_read(&profile_rwsem);
+ blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr);
}
int task_handoff_register(struct notifier_block * n)
{
- int err = -EINVAL;
-
- write_lock(&handoff_lock);
- err = notifier_chain_register(&task_free_notifier, n);
- write_unlock(&handoff_lock);
- return err;
+ return atomic_notifier_chain_register(&task_free_notifier, n);
}
int task_handoff_unregister(struct notifier_block * n)
{
- int err = -EINVAL;
-
- write_lock(&handoff_lock);
- err = notifier_chain_unregister(&task_free_notifier, n);
- write_unlock(&handoff_lock);
- return err;
+ return atomic_notifier_chain_unregister(&task_free_notifier, n);
}
int profile_event_register(enum profile_type type, struct notifier_block * n)
{
int err = -EINVAL;
- down_write(&profile_rwsem);
-
switch (type) {
case PROFILE_TASK_EXIT:
- err = notifier_chain_register(&task_exit_notifier, n);
+ err = blocking_notifier_chain_register(
+ &task_exit_notifier, n);
break;
case PROFILE_MUNMAP:
- err = notifier_chain_register(&munmap_notifier, n);
+ err = blocking_notifier_chain_register(
+ &munmap_notifier, n);
break;
}
- up_write(&profile_rwsem);
-
return err;
}
{
int err = -EINVAL;
- down_write(&profile_rwsem);
-
switch (type) {
case PROFILE_TASK_EXIT:
- err = notifier_chain_unregister(&task_exit_notifier, n);
+ err = blocking_notifier_chain_unregister(
+ &task_exit_notifier, n);
break;
case PROFILE_MUNMAP:
- err = notifier_chain_unregister(&munmap_notifier, n);
+ err = blocking_notifier_chain_unregister(
+ &munmap_notifier, n);
break;
}
- up_write(&profile_rwsem);
return err;
}
cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
register_cpu_notifier(&cpu_nfb);
- notifier_chain_register(&panic_notifier_list, &panic_block);
+ atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
}
* and the like.
*/
-static struct notifier_block *reboot_notifier_list;
-static DEFINE_RWLOCK(notifier_lock);
+static BLOCKING_NOTIFIER_HEAD(reboot_notifier_list);
+
+/*
+ * Notifier chain core routines. The exported routines below
+ * are layered on top of these, with appropriate locking added.
+ */
+
+static int notifier_chain_register(struct notifier_block **nl,
+ struct notifier_block *n)
+{
+ while ((*nl) != NULL) {
+ if (n->priority > (*nl)->priority)
+ break;
+ nl = &((*nl)->next);
+ }
+ n->next = *nl;
+ rcu_assign_pointer(*nl, n);
+ return 0;
+}
+
+static int notifier_chain_unregister(struct notifier_block **nl,
+ struct notifier_block *n)
+{
+ while ((*nl) != NULL) {
+ if ((*nl) == n) {
+ rcu_assign_pointer(*nl, n->next);
+ return 0;
+ }
+ nl = &((*nl)->next);
+ }
+ return -ENOENT;
+}
+
+static int __kprobes notifier_call_chain(struct notifier_block **nl,
+ unsigned long val, void *v)
+{
+ int ret = NOTIFY_DONE;
+ struct notifier_block *nb;
+
+ nb = rcu_dereference(*nl);
+ while (nb) {
+ ret = nb->notifier_call(nb, val, v);
+ if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK)
+ break;
+ nb = rcu_dereference(nb->next);
+ }
+ return ret;
+}
+
+/*
+ * Atomic notifier chain routines. Registration and unregistration
+ * use a mutex, and call_chain is synchronized by RCU (no locks).
+ */
/**
- * notifier_chain_register - Add notifier to a notifier chain
- * @list: Pointer to root list pointer
+ * atomic_notifier_chain_register - Add notifier to an atomic notifier chain
+ * @nh: Pointer to head of the atomic notifier chain
* @n: New entry in notifier chain
*
- * Adds a notifier to a notifier chain.
+ * Adds a notifier to an atomic notifier chain.
*
* Currently always returns zero.
*/
+
+int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
+ struct notifier_block *n)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&nh->lock, flags);
+ ret = notifier_chain_register(&nh->head, n);
+ spin_unlock_irqrestore(&nh->lock, flags);
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(atomic_notifier_chain_register);
+
+/**
+ * atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain
+ * @nh: Pointer to head of the atomic notifier chain
+ * @n: Entry to remove from notifier chain
+ *
+ * Removes a notifier from an atomic notifier chain.
+ *
+ * Returns zero on success or %-ENOENT on failure.
+ */
+int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
+ struct notifier_block *n)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&nh->lock, flags);
+ ret = notifier_chain_unregister(&nh->head, n);
+ spin_unlock_irqrestore(&nh->lock, flags);
+ synchronize_rcu();
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
+
+/**
+ * atomic_notifier_call_chain - Call functions in an atomic notifier chain
+ * @nh: Pointer to head of the atomic notifier chain
+ * @val: Value passed unmodified to notifier function
+ * @v: Pointer passed unmodified to notifier function
+ *
+ * Calls each function in a notifier chain in turn. The functions
+ * run in an atomic context, so they must not block.
+ * This routine uses RCU to synchronize with changes to the chain.
+ *
+ * If the return value of the notifier can be and'ed
+ * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain
+ * will return immediately, with the return value of
+ * the notifier function which halted execution.
+ * Otherwise the return value is the return value
+ * of the last notifier function called.
+ */
-int notifier_chain_register(struct notifier_block **list, struct notifier_block *n)
+int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
+ unsigned long val, void *v)
{
- write_lock(¬ifier_lock);
- while(*list)
- {
- if(n->priority > (*list)->priority)
- break;
- list= &((*list)->next);
- }
- n->next = *list;
- *list=n;
- write_unlock(¬ifier_lock);
- return 0;
+ int ret;
+
+ rcu_read_lock();
+ ret = notifier_call_chain(&nh->head, val, v);
+ rcu_read_unlock();
+ return ret;
}
-EXPORT_SYMBOL(notifier_chain_register);
+EXPORT_SYMBOL_GPL(atomic_notifier_call_chain);
+
+/*
+ * Blocking notifier chain routines. All access to the chain is
+ * synchronized by an rwsem.
+ */
/**
- * notifier_chain_unregister - Remove notifier from a notifier chain
- * @nl: Pointer to root list pointer
+ * blocking_notifier_chain_register - Add notifier to a blocking notifier chain
+ * @nh: Pointer to head of the blocking notifier chain
* @n: New entry in notifier chain
*
- * Removes a notifier from a notifier chain.
+ * Adds a notifier to a blocking notifier chain.
+ * Must be called in process context.
*
- * Returns zero on success, or %-ENOENT on failure.
+ * Currently always returns zero.
*/
-int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n)
+int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
+ struct notifier_block *n)
{
- write_lock(¬ifier_lock);
- while((*nl)!=NULL)
- {
- if((*nl)==n)
- {
- *nl=n->next;
- write_unlock(¬ifier_lock);
- return 0;
- }
- nl=&((*nl)->next);
- }
- write_unlock(¬ifier_lock);
- return -ENOENT;
+ int ret;
+
+ /*
+ * This code gets used during boot-up, when task switching is
+ * not yet working and interrupts must remain disabled. At
+ * such times we must not call down_write().
+ */
+ if (unlikely(system_state == SYSTEM_BOOTING))
+ return notifier_chain_register(&nh->head, n);
+
+ down_write(&nh->rwsem);
+ ret = notifier_chain_register(&nh->head, n);
+ up_write(&nh->rwsem);
+ return ret;
}
-EXPORT_SYMBOL(notifier_chain_unregister);
+EXPORT_SYMBOL_GPL(blocking_notifier_chain_register);
/**
- * notifier_call_chain - Call functions in a notifier chain
- * @n: Pointer to root pointer of notifier chain
+ * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain
+ * @nh: Pointer to head of the blocking notifier chain
+ * @n: Entry to remove from notifier chain
+ *
+ * Removes a notifier from a blocking notifier chain.
+ * Must be called from process context.
+ *
+ * Returns zero on success or %-ENOENT on failure.
+ */
+int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
+ struct notifier_block *n)
+{
+ int ret;
+
+ /*
+ * This code gets used during boot-up, when task switching is
+ * not yet working and interrupts must remain disabled. At
+ * such times we must not call down_write().
+ */
+ if (unlikely(system_state == SYSTEM_BOOTING))
+ return notifier_chain_unregister(&nh->head, n);
+
+ down_write(&nh->rwsem);
+ ret = notifier_chain_unregister(&nh->head, n);
+ up_write(&nh->rwsem);
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister);
+
+/**
+ * blocking_notifier_call_chain - Call functions in a blocking notifier chain
+ * @nh: Pointer to head of the blocking notifier chain
* @val: Value passed unmodified to notifier function
* @v: Pointer passed unmodified to notifier function
*
- * Calls each function in a notifier chain in turn.
+ * Calls each function in a notifier chain in turn. The functions
+ * run in a process context, so they are allowed to block.
*
- * If the return value of the notifier can be and'd
- * with %NOTIFY_STOP_MASK, then notifier_call_chain
+ * If the return value of the notifier can be and'ed
+ * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain
* will return immediately, with the return value of
* the notifier function which halted execution.
- * Otherwise, the return value is the return value
+ * Otherwise the return value is the return value
* of the last notifier function called.
*/
-int __kprobes notifier_call_chain(struct notifier_block **n, unsigned long val, void *v)
+int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
+ unsigned long val, void *v)
{
- int ret=NOTIFY_DONE;
- struct notifier_block *nb = *n;
+ int ret;
- while(nb)
- {
- ret=nb->notifier_call(nb,val,v);
- if(ret&NOTIFY_STOP_MASK)
- {
- return ret;
- }
- nb=nb->next;
- }
+ down_read(&nh->rwsem);
+ ret = notifier_call_chain(&nh->head, val, v);
+ up_read(&nh->rwsem);
return ret;
}
-EXPORT_SYMBOL(notifier_call_chain);
+EXPORT_SYMBOL_GPL(blocking_notifier_call_chain);
+
+/*
+ * Raw notifier chain routines. There is no protection;
+ * the caller must provide it. Use at your own risk!
+ */
+
+/**
+ * raw_notifier_chain_register - Add notifier to a raw notifier chain
+ * @nh: Pointer to head of the raw notifier chain
+ * @n: New entry in notifier chain
+ *
+ * Adds a notifier to a raw notifier chain.
+ * All locking must be provided by the caller.
+ *
+ * Currently always returns zero.
+ */
+
+int raw_notifier_chain_register(struct raw_notifier_head *nh,
+ struct notifier_block *n)
+{
+ return notifier_chain_register(&nh->head, n);
+}
+
+EXPORT_SYMBOL_GPL(raw_notifier_chain_register);
+
+/**
+ * raw_notifier_chain_unregister - Remove notifier from a raw notifier chain
+ * @nh: Pointer to head of the raw notifier chain
+ * @n: Entry to remove from notifier chain
+ *
+ * Removes a notifier from a raw notifier chain.
+ * All locking must be provided by the caller.
+ *
+ * Returns zero on success or %-ENOENT on failure.
+ */
+int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
+ struct notifier_block *n)
+{
+ return notifier_chain_unregister(&nh->head, n);
+}
+
+EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister);
+
+/**
+ * raw_notifier_call_chain - Call functions in a raw notifier chain
+ * @nh: Pointer to head of the raw notifier chain
+ * @val: Value passed unmodified to notifier function
+ * @v: Pointer passed unmodified to notifier function
+ *
+ * Calls each function in a notifier chain in turn. The functions
+ * run in an undefined context.
+ * All locking must be provided by the caller.
+ *
+ * If the return value of the notifier can be and'ed
+ * with %NOTIFY_STOP_MASK then raw_notifier_call_chain
+ * will return immediately, with the return value of
+ * the notifier function which halted execution.
+ * Otherwise the return value is the return value
+ * of the last notifier function called.
+ */
+
+int raw_notifier_call_chain(struct raw_notifier_head *nh,
+ unsigned long val, void *v)
+{
+ return notifier_call_chain(&nh->head, val, v);
+}
+
+EXPORT_SYMBOL_GPL(raw_notifier_call_chain);
/**
* register_reboot_notifier - Register function to be called at reboot time
* Registers a function with the list of functions
* to be called at reboot time.
*
- * Currently always returns zero, as notifier_chain_register
+ * Currently always returns zero, as blocking_notifier_chain_register
* always returns zero.
*/
int register_reboot_notifier(struct notifier_block * nb)
{
- return notifier_chain_register(&reboot_notifier_list, nb);
+ return blocking_notifier_chain_register(&reboot_notifier_list, nb);
}
EXPORT_SYMBOL(register_reboot_notifier);
int unregister_reboot_notifier(struct notifier_block * nb)
{
- return notifier_chain_unregister(&reboot_notifier_list, nb);
+ return blocking_notifier_chain_unregister(&reboot_notifier_list, nb);
}
EXPORT_SYMBOL(unregister_reboot_notifier);
void kernel_restart_prepare(char *cmd)
{
- notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
+ blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
system_state = SYSTEM_RESTART;
device_shutdown();
}
void kernel_shutdown_prepare(enum system_states state)
{
- notifier_call_chain(&reboot_notifier_list,
+ blocking_notifier_call_chain(&reboot_notifier_list,
(state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
system_state = state;
device_shutdown();
struct hci_proto *hci_proto[HCI_MAX_PROTO];
/* HCI notifiers list */
-static struct notifier_block *hci_notifier;
+static ATOMIC_NOTIFIER_HEAD(hci_notifier);
/* ---- HCI notifications ---- */
int hci_register_notifier(struct notifier_block *nb)
{
- return notifier_chain_register(&hci_notifier, nb);
+ return atomic_notifier_chain_register(&hci_notifier, nb);
}
int hci_unregister_notifier(struct notifier_block *nb)
{
- return notifier_chain_unregister(&hci_notifier, nb);
+ return atomic_notifier_chain_unregister(&hci_notifier, nb);
}
static void hci_notify(struct hci_dev *hdev, int event)
{
- notifier_call_chain(&hci_notifier, event, hdev);
+ atomic_notifier_call_chain(&hci_notifier, event, hdev);
}
/* ---- HCI requests ---- */
* Our notifier list
*/
-static struct notifier_block *netdev_chain;
+static BLOCKING_NOTIFIER_HEAD(netdev_chain);
/*
* Device drivers call our routines to queue packets here. We empty the
if (!err) {
hlist_del(&dev->name_hlist);
hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name));
- notifier_call_chain(&netdev_chain, NETDEV_CHANGENAME, dev);
+ blocking_notifier_call_chain(&netdev_chain,
+ NETDEV_CHANGENAME, dev);
}
return err;
*/
void netdev_features_change(struct net_device *dev)
{
- notifier_call_chain(&netdev_chain, NETDEV_FEAT_CHANGE, dev);
+ blocking_notifier_call_chain(&netdev_chain, NETDEV_FEAT_CHANGE, dev);
}
EXPORT_SYMBOL(netdev_features_change);
void netdev_state_change(struct net_device *dev)
{
if (dev->flags & IFF_UP) {
- notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
+ blocking_notifier_call_chain(&netdev_chain,
+ NETDEV_CHANGE, dev);
rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
}
}
/*
* ... and announce new interface.
*/
- notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
+ blocking_notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
}
return ret;
}
* Tell people we are going down, so that they can
* prepare to death, when device is still operating.
*/
- notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev);
+ blocking_notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev);
dev_deactivate(dev);
/*
* Tell people we are down
*/
- notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
+ blocking_notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
return 0;
}
int err;
rtnl_lock();
- err = notifier_chain_register(&netdev_chain, nb);
+ err = blocking_notifier_chain_register(&netdev_chain, nb);
if (!err) {
for (dev = dev_base; dev; dev = dev->next) {
nb->notifier_call(nb, NETDEV_REGISTER, dev);
int err;
rtnl_lock();
- err = notifier_chain_unregister(&netdev_chain, nb);
+ err = blocking_notifier_chain_unregister(&netdev_chain, nb);
rtnl_unlock();
return err;
}
* @v: pointer passed unmodified to notifier function
*
* Call all network notifier blocks. Parameters and return value
- * are as for notifier_call_chain().
+ * are as for blocking_notifier_call_chain().
*/
int call_netdevice_notifiers(unsigned long val, void *v)
{
- return notifier_call_chain(&netdev_chain, val, v);
+ return blocking_notifier_call_chain(&netdev_chain, val, v);
}
/* When > 0 there are consumers of rx skb time stamps */
if (dev->flags & IFF_UP &&
((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
IFF_VOLATILE)))
- notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
+ blocking_notifier_call_chain(&netdev_chain,
+ NETDEV_CHANGE, dev);
if ((flags ^ dev->gflags) & IFF_PROMISC) {
int inc = (flags & IFF_PROMISC) ? +1 : -1;
else
dev->mtu = new_mtu;
if (!err && dev->flags & IFF_UP)
- notifier_call_chain(&netdev_chain,
- NETDEV_CHANGEMTU, dev);
+ blocking_notifier_call_chain(&netdev_chain,
+ NETDEV_CHANGEMTU, dev);
return err;
}
return -ENODEV;
err = dev->set_mac_address(dev, sa);
if (!err)
- notifier_call_chain(&netdev_chain, NETDEV_CHANGEADDR, dev);
+ blocking_notifier_call_chain(&netdev_chain,
+ NETDEV_CHANGEADDR, dev);
return err;
}
return -EINVAL;
memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
- notifier_call_chain(&netdev_chain,
+ blocking_notifier_call_chain(&netdev_chain,
NETDEV_CHANGEADDR, dev);
return 0;
write_unlock_bh(&dev_base_lock);
/* Notify protocols, that a new device appeared. */
- notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
+ blocking_notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
/* Finish registration after unlock */
net_set_todo(dev);
rtnl_lock();
/* Rebroadcast unregister notification */
- notifier_call_chain(&netdev_chain,
+ blocking_notifier_call_chain(&netdev_chain,
NETDEV_UNREGISTER, dev);
if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
/* Notify protocols, that we are about to destroy
this device. They should clean all the things.
*/
- notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
+ blocking_notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
/*
* Flush the multicast chain
static DEFINE_RWLOCK(dndev_lock);
static struct net_device *decnet_default_device;
-static struct notifier_block *dnaddr_chain;
+static BLOCKING_NOTIFIER_HEAD(dnaddr_chain);
static struct dn_dev *dn_dev_create(struct net_device *dev, int *err);
static void dn_dev_delete(struct net_device *dev);
}
rtmsg_ifa(RTM_DELADDR, ifa1);
- notifier_call_chain(&dnaddr_chain, NETDEV_DOWN, ifa1);
+ blocking_notifier_call_chain(&dnaddr_chain, NETDEV_DOWN, ifa1);
if (destroy) {
dn_dev_free_ifa(ifa1);
dn_db->ifa_list = ifa;
rtmsg_ifa(RTM_NEWADDR, ifa);
- notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa);
+ blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa);
return 0;
}
int register_dnaddr_notifier(struct notifier_block *nb)
{
- return notifier_chain_register(&dnaddr_chain, nb);
+ return blocking_notifier_chain_register(&dnaddr_chain, nb);
}
int unregister_dnaddr_notifier(struct notifier_block *nb)
{
- return notifier_chain_unregister(&dnaddr_chain, nb);
+ return blocking_notifier_chain_unregister(&dnaddr_chain, nb);
}
#ifdef CONFIG_PROC_FS
static void rtmsg_ifa(int event, struct in_ifaddr *);
-static struct notifier_block *inetaddr_chain;
+static BLOCKING_NOTIFIER_HEAD(inetaddr_chain);
static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
int destroy);
#ifdef CONFIG_SYSCTL
*ifap1 = ifa->ifa_next;
rtmsg_ifa(RTM_DELADDR, ifa);
- notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa);
+ blocking_notifier_call_chain(&inetaddr_chain,
+ NETDEV_DOWN, ifa);
inet_free_ifa(ifa);
} else {
promote = ifa;
So that, this order is correct.
*/
rtmsg_ifa(RTM_DELADDR, ifa1);
- notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
+ blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
if (promote) {
promote->ifa_flags &= ~IFA_F_SECONDARY;
rtmsg_ifa(RTM_NEWADDR, promote);
- notifier_call_chain(&inetaddr_chain, NETDEV_UP, promote);
+ blocking_notifier_call_chain(&inetaddr_chain,
+ NETDEV_UP, promote);
for (ifa = promote->ifa_next; ifa; ifa = ifa->ifa_next) {
if (ifa1->ifa_mask != ifa->ifa_mask ||
!inet_ifa_match(ifa1->ifa_address, ifa))
Notifier will trigger FIB update, so that
listeners of netlink will know about new ifaddr */
rtmsg_ifa(RTM_NEWADDR, ifa);
- notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
+ blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
return 0;
}
int register_inetaddr_notifier(struct notifier_block *nb)
{
- return notifier_chain_register(&inetaddr_chain, nb);
+ return blocking_notifier_chain_register(&inetaddr_chain, nb);
}
int unregister_inetaddr_notifier(struct notifier_block *nb)
{
- return notifier_chain_unregister(&inetaddr_chain, nb);
+ return blocking_notifier_chain_unregister(&inetaddr_chain, nb);
}
/* Rename ifa_labels for a device name change. Make some effort to preserve existing
static unsigned int ip_conntrack_next_id;
static unsigned int ip_conntrack_expect_next_id;
#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
-struct notifier_block *ip_conntrack_chain;
-struct notifier_block *ip_conntrack_expect_chain;
+ATOMIC_NOTIFIER_HEAD(ip_conntrack_chain);
+ATOMIC_NOTIFIER_HEAD(ip_conntrack_expect_chain);
DEFINE_PER_CPU(struct ip_conntrack_ecache, ip_conntrack_ecache);
{
DEBUGP("ecache: delivering events for %p\n", ecache->ct);
if (is_confirmed(ecache->ct) && !is_dying(ecache->ct) && ecache->events)
- notifier_call_chain(&ip_conntrack_chain, ecache->events,
+ atomic_notifier_call_chain(&ip_conntrack_chain, ecache->events,
ecache->ct);
ecache->events = 0;
ip_conntrack_put(ecache->ct);
struct prefix_info *pinfo);
static int ipv6_chk_same_addr(const struct in6_addr *addr, struct net_device *dev);
-static struct notifier_block *inet6addr_chain;
+static ATOMIC_NOTIFIER_HEAD(inet6addr_chain);
struct ipv6_devconf ipv6_devconf = {
.forwarding = 0,
read_unlock_bh(&addrconf_lock);
if (likely(err == 0))
- notifier_call_chain(&inet6addr_chain, NETDEV_UP, ifa);
+ atomic_notifier_call_chain(&inet6addr_chain, NETDEV_UP, ifa);
else {
kfree(ifa);
ifa = ERR_PTR(err);
ipv6_ifa_notify(RTM_DELADDR, ifp);
- notifier_call_chain(&inet6addr_chain,NETDEV_DOWN,ifp);
+ atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifp);
addrconf_del_timer(ifp);
int register_inet6addr_notifier(struct notifier_block *nb)
{
- return notifier_chain_register(&inet6addr_chain, nb);
+ return atomic_notifier_chain_register(&inet6addr_chain, nb);
}
int unregister_inet6addr_notifier(struct notifier_block *nb)
{
- return notifier_chain_unregister(&inet6addr_chain,nb);
+ return atomic_notifier_chain_unregister(&inet6addr_chain,nb);
}
/*
static unsigned int nf_conntrack_next_id;
static unsigned int nf_conntrack_expect_next_id;
#ifdef CONFIG_NF_CONNTRACK_EVENTS
-struct notifier_block *nf_conntrack_chain;
-struct notifier_block *nf_conntrack_expect_chain;
+ATOMIC_NOTIFIER_HEAD(nf_conntrack_chain);
+ATOMIC_NOTIFIER_HEAD(nf_conntrack_expect_chain);
DEFINE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache);
DEBUGP("ecache: delivering events for %p\n", ecache->ct);
if (nf_ct_is_confirmed(ecache->ct) && !nf_ct_is_dying(ecache->ct)
&& ecache->events)
- notifier_call_chain(&nf_conntrack_chain, ecache->events,
+ atomic_notifier_call_chain(&nf_conntrack_chain, ecache->events,
ecache->ct);
ecache->events = 0;
static DEFINE_RWLOCK(nl_table_lock);
static atomic_t nl_table_users = ATOMIC_INIT(0);
-static struct notifier_block *netlink_chain;
+static ATOMIC_NOTIFIER_HEAD(netlink_chain);
static u32 netlink_group_mask(u32 group)
{
.protocol = sk->sk_protocol,
.pid = nlk->pid,
};
- notifier_call_chain(&netlink_chain, NETLINK_URELEASE, &n);
+ atomic_notifier_call_chain(&netlink_chain,
+ NETLINK_URELEASE, &n);
}
if (nlk->module)
int netlink_register_notifier(struct notifier_block *nb)
{
- return notifier_chain_register(&netlink_chain, nb);
+ return atomic_notifier_chain_register(&netlink_chain, nb);
}
int netlink_unregister_notifier(struct notifier_block *nb)
{
- return notifier_chain_unregister(&netlink_chain, nb);
+ return atomic_notifier_chain_unregister(&netlink_chain, nb);
}
static const struct proto_ops netlink_ops = {