]> err.no Git - linux-2.6/commitdiff
x86, 64-bit: add sync_cmpxchg
authorJeremy Fitzhardinge <jeremy@goop.org>
Wed, 25 Jun 2008 04:19:10 +0000 (00:19 -0400)
committerIngo Molnar <mingo@elte.hu>
Tue, 8 Jul 2008 11:10:58 +0000 (13:10 +0200)
Add sync_cmpxchg to match 32-bit's sync_cmpxchg.

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: xen-devel <xen-devel@lists.xensource.com>
Cc: Stephen Tweedie <sct@redhat.com>
Cc: Eduardo Habkost <ehabkost@redhat.com>
Cc: Mark McLoughlin <markmc@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
include/asm-x86/cmpxchg_64.h

index d9b26b9a28cf9f0e3f64f4206f1922b331408f54..17463ccf816618b59a144ec76f57e51589e8866c 100644 (file)
@@ -93,6 +93,39 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
        return old;
 }
 
+/*
+ * Always use locked operations when touching memory shared with a
+ * hypervisor, since the system may be SMP even if the guest kernel
+ * isn't.
+ */
+static inline unsigned long __sync_cmpxchg(volatile void *ptr,
+                                          unsigned long old,
+                                          unsigned long new, int size)
+{
+       unsigned long prev;
+       switch (size) {
+       case 1:
+               asm volatile("lock; cmpxchgb %b1,%2"
+                            : "=a"(prev)
+                            : "q"(new), "m"(*__xg(ptr)), "0"(old)
+                            : "memory");
+               return prev;
+       case 2:
+               asm volatile("lock; cmpxchgw %w1,%2"
+                            : "=a"(prev)
+                            : "r"(new), "m"(*__xg(ptr)), "0"(old)
+                            : "memory");
+               return prev;
+       case 4:
+               asm volatile("lock; cmpxchgl %1,%2"
+                            : "=a"(prev)
+                            : "r"(new), "m"(*__xg(ptr)), "0"(old)
+                            : "memory");
+               return prev;
+       }
+       return old;
+}
+
 static inline unsigned long __cmpxchg_local(volatile void *ptr,
                                            unsigned long old,
                                            unsigned long new, int size)
@@ -139,6 +172,10 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
        ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
                                             (unsigned long)(n),        \
                                             sizeof(*(ptr))))
+#define sync_cmpxchg(ptr, o, n)                                                \
+       ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o),  \
+                                           (unsigned long)(n),         \
+                                           sizeof(*(ptr))))
 #define cmpxchg64_local(ptr, o, n)                                     \
 ({                                                                     \
        BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \