1 #include <linux/module.h>
2 #include <linux/sched.h>
3 #include <linux/spinlock.h>
4 #include <linux/list.h>
5 #include <asm/alternative.h>
6 #include <asm/sections.h>
8 #ifdef CONFIG_HOTPLUG_CPU
9 static int smp_alt_once;
11 static int __init bootonly(char *str)
16 __setup("smp-alt-boot", bootonly);
18 #define smp_alt_once 1
21 static int debug_alternative;
23 static int __init debug_alt(char *str)
25 debug_alternative = 1;
28 __setup("debug-alternative", debug_alt);
30 static int noreplace_smp;
32 static int __init setup_noreplace_smp(char *str)
37 __setup("noreplace-smp", setup_noreplace_smp);
39 #ifdef CONFIG_PARAVIRT
40 static int noreplace_paravirt = 0;
42 static int __init setup_noreplace_paravirt(char *str)
44 noreplace_paravirt = 1;
47 __setup("noreplace-paravirt", setup_noreplace_paravirt);
50 #define DPRINTK(fmt, args...) if (debug_alternative) \
51 printk(KERN_DEBUG fmt, args)
54 /* Use inline assembly to define this because the nops are defined
55 as inline assembly strings in the include files and we cannot
56 get them easily into strings. */
57 asm("\t.data\nintelnops: "
58 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
59 GENERIC_NOP7 GENERIC_NOP8);
60 extern unsigned char intelnops[];
61 static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
66 intelnops + 1 + 2 + 3,
67 intelnops + 1 + 2 + 3 + 4,
68 intelnops + 1 + 2 + 3 + 4 + 5,
69 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
70 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
75 asm("\t.data\nk8nops: "
76 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
78 extern unsigned char k8nops[];
79 static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
85 k8nops + 1 + 2 + 3 + 4,
86 k8nops + 1 + 2 + 3 + 4 + 5,
87 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
88 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
93 asm("\t.data\nk7nops: "
94 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
96 extern unsigned char k7nops[];
97 static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
103 k7nops + 1 + 2 + 3 + 4,
104 k7nops + 1 + 2 + 3 + 4 + 5,
105 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
106 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
112 extern char __vsyscall_0;
113 static inline unsigned char** find_nop_table(void)
118 #else /* CONFIG_X86_64 */
122 unsigned char **noptable;
124 { X86_FEATURE_K8, k8_nops },
125 { X86_FEATURE_K7, k7_nops },
129 static unsigned char** find_nop_table(void)
131 unsigned char **noptable = intel_nops;
134 for (i = 0; noptypes[i].cpuid >= 0; i++) {
135 if (boot_cpu_has(noptypes[i].cpuid)) {
136 noptable = noptypes[i].noptable;
143 #endif /* CONFIG_X86_64 */
145 static void nop_out(void *insns, unsigned int len)
147 unsigned char **noptable = find_nop_table();
150 unsigned int noplen = len;
151 if (noplen > ASM_NOP_MAX)
152 noplen = ASM_NOP_MAX;
153 memcpy(insns, noptable[noplen], noplen);
159 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
160 extern u8 *__smp_locks[], *__smp_locks_end[];
162 /* Replace instructions with better alternatives for this CPU type.
163 This runs before SMP is initialized to avoid SMP problems with
164 self modifying code. This implies that assymetric systems where
165 APs have less capabilities than the boot processor are not handled.
166 Tough. Make sure you disable such features by hand. */
168 void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
174 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
175 for (a = start; a < end; a++) {
176 BUG_ON(a->replacementlen > a->instrlen);
177 if (!boot_cpu_has(a->cpuid))
181 /* vsyscall code is not mapped yet. resolve it manually. */
182 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
183 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
184 DPRINTK("%s: vsyscall fixup: %p => %p\n",
185 __FUNCTION__, a->instr, instr);
188 memcpy(instr, a->replacement, a->replacementlen);
189 diff = a->instrlen - a->replacementlen;
190 nop_out(instr + a->replacementlen, diff);
196 static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
200 for (ptr = start; ptr < end; ptr++) {
205 **ptr = 0xf0; /* lock prefix */
209 static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
216 for (ptr = start; ptr < end; ptr++) {
225 struct smp_alt_module {
226 /* what is this ??? */
230 /* ptrs to lock prefixes */
234 /* .text segment, needed to avoid patching init code ;) */
238 struct list_head next;
240 static LIST_HEAD(smp_alt_modules);
241 static DEFINE_SPINLOCK(smp_alt);
243 void alternatives_smp_module_add(struct module *mod, char *name,
244 void *locks, void *locks_end,
245 void *text, void *text_end)
247 struct smp_alt_module *smp;
254 if (boot_cpu_has(X86_FEATURE_UP))
255 alternatives_smp_unlock(locks, locks_end,
260 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
262 return; /* we'll run the (safe but slow) SMP code then ... */
267 smp->locks_end = locks_end;
269 smp->text_end = text_end;
270 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
271 __FUNCTION__, smp->locks, smp->locks_end,
272 smp->text, smp->text_end, smp->name);
274 spin_lock_irqsave(&smp_alt, flags);
275 list_add_tail(&smp->next, &smp_alt_modules);
276 if (boot_cpu_has(X86_FEATURE_UP))
277 alternatives_smp_unlock(smp->locks, smp->locks_end,
278 smp->text, smp->text_end);
279 spin_unlock_irqrestore(&smp_alt, flags);
282 void alternatives_smp_module_del(struct module *mod)
284 struct smp_alt_module *item;
287 if (smp_alt_once || noreplace_smp)
290 spin_lock_irqsave(&smp_alt, flags);
291 list_for_each_entry(item, &smp_alt_modules, next) {
292 if (mod != item->mod)
294 list_del(&item->next);
295 spin_unlock_irqrestore(&smp_alt, flags);
296 DPRINTK("%s: %s\n", __FUNCTION__, item->name);
300 spin_unlock_irqrestore(&smp_alt, flags);
303 void alternatives_smp_switch(int smp)
305 struct smp_alt_module *mod;
308 #ifdef CONFIG_LOCKDEP
310 * A not yet fixed binutils section handling bug prevents
311 * alternatives-replacement from working reliably, so turn
314 printk("lockdep: not fixing up alternatives.\n");
318 if (noreplace_smp || smp_alt_once)
320 BUG_ON(!smp && (num_online_cpus() > 1));
322 spin_lock_irqsave(&smp_alt, flags);
324 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
325 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
326 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
327 list_for_each_entry(mod, &smp_alt_modules, next)
328 alternatives_smp_lock(mod->locks, mod->locks_end,
329 mod->text, mod->text_end);
331 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
332 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
333 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
334 list_for_each_entry(mod, &smp_alt_modules, next)
335 alternatives_smp_unlock(mod->locks, mod->locks_end,
336 mod->text, mod->text_end);
338 spin_unlock_irqrestore(&smp_alt, flags);
343 #ifdef CONFIG_PARAVIRT
344 void apply_paravirt(struct paravirt_patch_site *start,
345 struct paravirt_patch_site *end)
347 struct paravirt_patch_site *p;
349 if (noreplace_paravirt)
352 for (p = start; p < end; p++) {
355 used = paravirt_ops.patch(p->instrtype, p->clobbers, p->instr,
358 BUG_ON(used > p->len);
360 /* Pad the rest with nops */
361 nop_out(p->instr + used, p->len - used);
364 /* Sync to be conservative, in case we patched following
368 extern struct paravirt_patch_site __start_parainstructions[],
369 __stop_parainstructions[];
370 #endif /* CONFIG_PARAVIRT */
372 void __init alternative_instructions(void)
376 local_irq_save(flags);
377 apply_alternatives(__alt_instructions, __alt_instructions_end);
379 /* switch to patch-once-at-boottime-only mode and free the
380 * tables in case we know the number of CPUs will never ever
382 #ifdef CONFIG_HOTPLUG_CPU
383 if (num_possible_cpus() < 2)
389 if (1 == num_possible_cpus()) {
390 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
391 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
392 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
393 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
396 free_init_pages("SMP alternatives",
397 (unsigned long)__smp_locks,
398 (unsigned long)__smp_locks_end);
400 alternatives_smp_module_add(NULL, "core kernel",
401 __smp_locks, __smp_locks_end,
403 alternatives_smp_switch(0);
406 apply_paravirt(__parainstructions, __parainstructions_end);
407 local_irq_restore(flags);