}
EXPORT_SYMBOL(smp_call_function_single);
+/**
+ * smp_call_function_mask(): Run a function on a set of other CPUs.
+ * @mask: The set of cpus to run on. Must not include the current cpu.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned; otherwise
+ * it returns just before the target cpu calls @func.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
+ int wait)
+{
+ preempt_disable();
+ cpu_clear(smp_processor_id(), mask);
+ __smp_call_function_map(func, info, 0, wait, mask);
+ preempt_enable();
+ return 0;
+}
+EXPORT_SYMBOL(smp_call_function_mask);
+
void smp_send_stop(void)
{
int cpu, rc;
out:
kfree(info);
printk(KERN_INFO "CPUs: %d configured, %d standby\n", c_cpus, s_cpus);
- lock_cpu_hotplug();
+ get_online_cpus();
smp_rescan_cpus();
- unlock_cpu_hotplug();
+ put_online_cpus();
}
/*
if (!lowcore)
return -ENOMEM;
async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
- if (!async_stack)
- goto out_async_stack;
panic_stack = __get_free_page(GFP_KERNEL);
- if (!panic_stack)
- goto out_panic_stack;
-
- *lowcore = S390_lowcore;
+ if (!panic_stack || !async_stack)
+ goto out;
+ /*
+ * Only need to copy the first 512 bytes from address 0. But since
+ * the compiler emits a warning if src == NULL for memcpy use copy_page
+ * instead. Copies more than needed but this code is not performance
+ * critical.
+ */
+ copy_page(lowcore, &S390_lowcore);
+ memset((void *)lowcore + 512, 0, sizeof(*lowcore) - 512);
lowcore->async_stack = async_stack + ASYNC_SIZE;
lowcore->panic_stack = panic_stack + PAGE_SIZE;
out_save_area:
free_page(panic_stack);
#endif
-out_panic_stack:
+out:
free_pages(async_stack, ASYNC_ORDER);
-out_async_stack:
free_pages((unsigned long) lowcore, lc_order);
return -ENOMEM;
}
cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
cpu_lowcore->current_task = (unsigned long) idle;
cpu_lowcore->cpu_data.cpu_nr = cpu;
- cpu_lowcore->softirq_pending = 0;
- cpu_lowcore->ext_call_fast = 0;
+ cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce;
+ cpu_lowcore->ipl_device = S390_lowcore.ipl_device;
eieio();
while (signal_processor(cpu, sigp_restart) == sigp_busy)
void __init smp_prepare_cpus(unsigned int max_cpus)
{
+#ifndef CONFIG_64BIT
+ unsigned long save_area = 0;
+#endif
+ unsigned long async_stack, panic_stack;
+ struct _lowcore *lowcore;
unsigned int cpu;
+ int lc_order;
smp_detect_cpus();
/* request the 0x1201 emergency signal external interrupt */
if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
panic("Couldn't request external interrupt 0x1201");
- memset(lowcore_ptr, 0, sizeof(lowcore_ptr));
print_cpu_info(&S390_lowcore.cpu_data);
- smp_alloc_lowcore(smp_processor_id());
+ /* Reallocate current lowcore, but keep its contents. */
+ lc_order = sizeof(long) == 8 ? 1 : 0;
+ lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, lc_order);
+ panic_stack = __get_free_page(GFP_KERNEL);
+ async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
#ifndef CONFIG_64BIT
if (MACHINE_HAS_IEEE)
- ctl_set_bit(14, 29); /* enable extended save area */
+ save_area = get_zeroed_page(GFP_KERNEL);
#endif
- set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]);
-
+ local_irq_disable();
+ local_mcck_disable();
+ lowcore_ptr[smp_processor_id()] = lowcore;
+ *lowcore = S390_lowcore;
+ lowcore->panic_stack = panic_stack + PAGE_SIZE;
+ lowcore->async_stack = async_stack + ASYNC_SIZE;
+#ifndef CONFIG_64BIT
+ if (MACHINE_HAS_IEEE)
+ lowcore->extended_save_area_addr = (u32) save_area;
+#endif
+ set_prefix((u32)(unsigned long) lowcore);
+ local_mcck_enable();
+ local_irq_enable();
for_each_possible_cpu(cpu)
if (cpu != smp_processor_id())
smp_create_idle(cpu);
return -EINVAL;
mutex_lock(&smp_cpu_state_mutex);
- lock_cpu_hotplug();
+ get_online_cpus();
rc = -EBUSY;
if (cpu_online(cpu))
goto out;
break;
}
out:
- unlock_cpu_hotplug();
+ put_online_cpus();
mutex_unlock(&smp_cpu_state_mutex);
return rc ? rc : count;
}
.notifier_call = smp_cpu_notify,
};
-static int smp_add_present_cpu(int cpu)
+static int __devinit smp_add_present_cpu(int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
struct sys_device *s = &c->sysdev;
}
#ifdef CONFIG_HOTPLUG_CPU
-static ssize_t rescan_store(struct sys_device *dev, const char *buf,
- size_t count)
+static ssize_t __ref rescan_store(struct sys_device *dev,
+ const char *buf, size_t count)
{
cpumask_t newcpus;
int cpu;
int rc;
mutex_lock(&smp_cpu_state_mutex);
- lock_cpu_hotplug();
+ get_online_cpus();
newcpus = cpu_present_map;
rc = smp_rescan_cpus();
if (rc)
}
rc = 0;
out:
- unlock_cpu_hotplug();
+ put_online_cpus();
mutex_unlock(&smp_cpu_state_mutex);
return rc ? rc : count;
}