#define _ASM_POWERPC_SYNCH_H
#ifdef __KERNEL__
-#ifdef __powerpc64__
-#define __SUBARCH_HAS_LWSYNC
-#endif
+#include <linux/stringify.h>
+#include <asm/feature-fixups.h>
-#ifdef __SUBARCH_HAS_LWSYNC
-# define LWSYNC lwsync
-#else
-# define LWSYNC sync
-#endif
-
-
-/*
- * Arguably the bitops and *xchg operations don't imply any memory barrier
- * or SMP ordering, but in fact a lot of drivers expect them to imply
- * both, since they do on x86 cpus.
- */
-#ifdef CONFIG_SMP
-#define EIEIO_ON_SMP "eieio\n"
-#define ISYNC_ON_SMP "\n\tisync"
-#define SYNC_ON_SMP __stringify(LWSYNC) "\n"
-#else
-#define EIEIO_ON_SMP
-#define ISYNC_ON_SMP
-#define SYNC_ON_SMP
-#endif
+#ifndef __ASSEMBLY__
+extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
+extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
+ void *fixup_end);
static inline void eieio(void)
{
{
__asm__ __volatile__ ("isync" : : : "memory");
}
+#endif /* __ASSEMBLY__ */
+
+#if defined(__powerpc64__)
+# define LWSYNC lwsync
+#elif defined(CONFIG_E500)
+# define LWSYNC \
+ START_LWSYNC_SECTION(96); \
+ sync; \
+ MAKE_LWSYNC_SECTION_ENTRY(96, __lwsync_fixup);
+#else
+# define LWSYNC sync
+#endif
#ifdef CONFIG_SMP
-#define eieio_on_smp() eieio()
-#define isync_on_smp() isync()
+#define ISYNC_ON_SMP "\n\tisync\n"
+#define LWSYNC_ON_SMP stringify_in_c(LWSYNC) "\n"
#else
-#define eieio_on_smp() __asm__ __volatile__("": : :"memory")
-#define isync_on_smp() __asm__ __volatile__("": : :"memory")
+#define ISYNC_ON_SMP
+#define LWSYNC_ON_SMP
#endif
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_SYNCH_H */
-