X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=include%2Fasm-x86%2Fpercpu.h;h=4e91ee1e37aa170140e92814a08232fef4e6dfd4;hb=6d273f8d011c351c9603c1dbfeae2c7458edd30d;hp=912a3a17b9db85958abee1464d58e64d798f5f31;hpb=5806b81ac1c0c52665b91723fd4146a4f86e386b;p=linux-2.6 diff --git a/include/asm-x86/percpu.h b/include/asm-x86/percpu.h index 912a3a17b9..4e91ee1e37 100644 --- a/include/asm-x86/percpu.h +++ b/include/asm-x86/percpu.h @@ -22,6 +22,32 @@ DECLARE_PER_CPU(struct x8664_pda, pda); +/* + * These are supposed to be implemented as a single instruction which + * operates on the per-cpu data base segment. x86-64 doesn't have + * that yet, so this is a fairly inefficient workaround for the + * meantime. The single instruction is atomic with respect to + * preemption and interrupts, so we need to explicitly disable + * interrupts here to achieve the same effect. However, because it + * can be used from within interrupt-disable/enable, we can't actually + * disable interrupts; disabling preemption is enough. + */ +#define x86_read_percpu(var) \ + ({ \ + typeof(per_cpu_var(var)) __tmp; \ + preempt_disable(); \ + __tmp = __get_cpu_var(var); \ + preempt_enable(); \ + __tmp; \ + }) + +#define x86_write_percpu(var, val) \ + do { \ + preempt_disable(); \ + __get_cpu_var(var) = (val); \ + preempt_enable(); \ + } while(0) + #else /* CONFIG_X86_64 */ #ifdef __ASSEMBLY__