2 * include/asm-s390/system.h
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
8 * Derived from "include/asm-i386/system.h"
11 #ifndef __ASM_SYSTEM_H
12 #define __ASM_SYSTEM_H
14 #include <linux/config.h>
15 #include <linux/kernel.h>
16 #include <asm/types.h>
17 #include <asm/ptrace.h>
18 #include <asm/setup.h>
19 #include <asm/processor.h>
25 extern struct task_struct *__switch_to(void *, void *);
28 #define __FLAG_SHIFT 56
29 #else /* ! __s390x__ */
30 #define __FLAG_SHIFT 24
31 #endif /* ! __s390x__ */
33 static inline void save_fp_regs(s390_fp_regs *fpregs)
40 : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory" );
41 if (!MACHINE_HAS_IEEE)
57 : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory" );
60 static inline void restore_fp_regs(s390_fp_regs *fpregs)
67 : : "a" (fpregs), "m" (*fpregs) );
68 if (!MACHINE_HAS_IEEE)
84 : : "a" (fpregs), "m" (*fpregs) );
87 static inline void save_access_regs(unsigned int *acrs)
89 asm volatile ("stam 0,15,0(%0)" : : "a" (acrs) : "memory" );
92 static inline void restore_access_regs(unsigned int *acrs)
94 asm volatile ("lam 0,15,0(%0)" : : "a" (acrs) );
97 #define switch_to(prev,next,last) do { \
100 save_fp_regs(&prev->thread.fp_regs); \
101 restore_fp_regs(&next->thread.fp_regs); \
102 save_access_regs(&prev->thread.acrs[0]); \
103 restore_access_regs(&next->thread.acrs[0]); \
104 prev = __switch_to(prev,next); \
108 * On SMP systems, when the scheduler does migration-cost autodetection,
109 * it needs a way to flush as much of the CPU's caches as possible.
111 * TODO: fill this in!
113 static inline void sched_cacheflush(void)
117 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
118 extern void account_vtime(struct task_struct *);
119 extern void account_tick_vtime(struct task_struct *);
120 extern void account_system_vtime(struct task_struct *);
123 #define finish_arch_switch(prev) do { \
124 set_fs(current->thread.mm_segment); \
125 account_vtime(prev); \
128 #define nop() __asm__ __volatile__ ("nop")
130 #define xchg(ptr,x) \
131 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(void *)(ptr),sizeof(*(ptr))))
133 static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
135 unsigned long addr, old;
140 addr = (unsigned long) ptr;
141 shift = (3 ^ (addr & 3)) << 3;
150 : "=&d" (old), "=m" (*(int *) addr)
151 : "d" (x << shift), "d" (~(255 << shift)), "a" (addr),
152 "m" (*(int *) addr) : "memory", "cc", "0" );
156 addr = (unsigned long) ptr;
157 shift = (2 ^ (addr & 2)) << 3;
166 : "=&d" (old), "=m" (*(int *) addr)
167 : "d" (x << shift), "d" (~(65535 << shift)), "a" (addr),
168 "m" (*(int *) addr) : "memory", "cc", "0" );
174 "0: cs %0,%2,0(%3)\n"
176 : "=&d" (old), "=m" (*(int *) ptr)
177 : "d" (x), "a" (ptr), "m" (*(int *) ptr)
185 "0: csg %0,%2,0(%3)\n"
187 : "=&d" (old), "=m" (*(long *) ptr)
188 : "d" (x), "a" (ptr), "m" (*(long *) ptr)
192 #endif /* __s390x__ */
198 * Atomic compare and exchange. Compare OLD with MEM, if identical,
199 * store NEW in MEM. Return the initial value in MEM. Success is
200 * indicated by comparing RETURN with OLD.
203 #define __HAVE_ARCH_CMPXCHG 1
205 #define cmpxchg(ptr,o,n)\
206 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
207 (unsigned long)(n),sizeof(*(ptr))))
209 static inline unsigned long
210 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
212 unsigned long addr, prev, tmp;
217 addr = (unsigned long) ptr;
218 shift = (3 ^ (addr & 3)) << 3;
232 : "=&d" (prev), "=&d" (tmp)
233 : "d" (old << shift), "d" (new << shift), "a" (ptr),
234 "d" (~(255 << shift))
236 return prev >> shift;
238 addr = (unsigned long) ptr;
239 shift = (2 ^ (addr & 2)) << 3;
253 : "=&d" (prev), "=&d" (tmp)
254 : "d" (old << shift), "d" (new << shift), "a" (ptr),
255 "d" (~(65535 << shift))
257 return prev >> shift;
261 : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr)
268 : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr)
271 #endif /* __s390x__ */
277 * Force strict CPU ordering.
278 * And yes, this is required on UP too when we're talking
281 * This is very similar to the ppc eieio/sync instruction in that is
282 * does a checkpoint syncronisation & makes sure that
283 * all memory ops have completed wrt other CPU's ( see 7-15 POP DJB ).
286 #define eieio() __asm__ __volatile__ ( "bcr 15,0" : : : "memory" )
287 # define SYNC_OTHER_CORES(x) eieio()
289 #define rmb() eieio()
290 #define wmb() eieio()
291 #define read_barrier_depends() do { } while(0)
292 #define smp_mb() mb()
293 #define smp_rmb() rmb()
294 #define smp_wmb() wmb()
295 #define smp_read_barrier_depends() read_barrier_depends()
296 #define smp_mb__before_clear_bit() smp_mb()
297 #define smp_mb__after_clear_bit() smp_mb()
300 #define set_mb(var, value) do { var = value; mb(); } while (0)
301 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
303 /* interrupt control.. */
304 #define local_irq_enable() ({ \
305 unsigned long __dummy; \
306 __asm__ __volatile__ ( \
308 : "=m" (__dummy) : "a" (&__dummy) : "memory" ); \
311 #define local_irq_disable() ({ \
312 unsigned long __flags; \
313 __asm__ __volatile__ ( \
314 "stnsm 0(%1),0xfc" : "=m" (__flags) : "a" (&__flags) ); \
318 #define local_save_flags(x) \
319 __asm__ __volatile__("stosm 0(%1),0" : "=m" (x) : "a" (&x), "m" (x) )
321 #define local_irq_restore(x) \
322 __asm__ __volatile__("ssm 0(%0)" : : "a" (&x), "m" (x) : "memory")
324 #define irqs_disabled() \
326 unsigned long flags; \
327 local_save_flags(flags); \
328 !((flags >> __FLAG_SHIFT) & 3); \
333 #define __ctl_load(array, low, high) ({ \
334 typedef struct { char _[sizeof(array)]; } addrtype; \
335 __asm__ __volatile__ ( \
337 " lctlg 0,0,0(%0)\n" \
339 : : "a" (&array), "a" (((low)<<4)+(high)), \
340 "m" (*(addrtype *)(array)) : "1" ); \
343 #define __ctl_store(array, low, high) ({ \
344 typedef struct { char _[sizeof(array)]; } addrtype; \
345 __asm__ __volatile__ ( \
347 " stctg 0,0,0(%1)\n" \
349 : "=m" (*(addrtype *)(array)) \
350 : "a" (&array), "a" (((low)<<4)+(high)) : "1" ); \
353 #define __ctl_set_bit(cr, bit) ({ \
355 __asm__ __volatile__ ( \
356 " bras 1,0f\n" /* skip indirect insns */ \
357 " stctg 0,0,0(%1)\n" \
358 " lctlg 0,0,0(%1)\n" \
359 "0: ex %2,0(1)\n" /* execute stctl */ \
361 " ogr 0,%3\n" /* set the bit */ \
363 "1: ex %2,6(1)" /* execute lctl */ \
365 : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
366 "a" (cr*17), "a" (1L<<(bit)) \
367 : "cc", "0", "1" ); \
370 #define __ctl_clear_bit(cr, bit) ({ \
372 __asm__ __volatile__ ( \
373 " bras 1,0f\n" /* skip indirect insns */ \
374 " stctg 0,0,0(%1)\n" \
375 " lctlg 0,0,0(%1)\n" \
376 "0: ex %2,0(1)\n" /* execute stctl */ \
378 " ngr 0,%3\n" /* set the bit */ \
380 "1: ex %2,6(1)" /* execute lctl */ \
382 : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
383 "a" (cr*17), "a" (~(1L<<(bit))) \
384 : "cc", "0", "1" ); \
387 #else /* __s390x__ */
389 #define __ctl_load(array, low, high) ({ \
390 typedef struct { char _[sizeof(array)]; } addrtype; \
391 __asm__ __volatile__ ( \
393 " lctl 0,0,0(%0)\n" \
395 : : "a" (&array), "a" (((low)<<4)+(high)), \
396 "m" (*(addrtype *)(array)) : "1" ); \
399 #define __ctl_store(array, low, high) ({ \
400 typedef struct { char _[sizeof(array)]; } addrtype; \
401 __asm__ __volatile__ ( \
403 " stctl 0,0,0(%1)\n" \
405 : "=m" (*(addrtype *)(array)) \
406 : "a" (&array), "a" (((low)<<4)+(high)): "1" ); \
409 #define __ctl_set_bit(cr, bit) ({ \
411 __asm__ __volatile__ ( \
412 " bras 1,0f\n" /* skip indirect insns */ \
413 " stctl 0,0,0(%1)\n" \
414 " lctl 0,0,0(%1)\n" \
415 "0: ex %2,0(1)\n" /* execute stctl */ \
417 " or 0,%3\n" /* set the bit */ \
419 "1: ex %2,4(1)" /* execute lctl */ \
421 : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
422 "a" (cr*17), "a" (1<<(bit)) \
423 : "cc", "0", "1" ); \
426 #define __ctl_clear_bit(cr, bit) ({ \
428 __asm__ __volatile__ ( \
429 " bras 1,0f\n" /* skip indirect insns */ \
430 " stctl 0,0,0(%1)\n" \
431 " lctl 0,0,0(%1)\n" \
432 "0: ex %2,0(1)\n" /* execute stctl */ \
434 " nr 0,%3\n" /* set the bit */ \
436 "1: ex %2,4(1)" /* execute lctl */ \
438 : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
439 "a" (cr*17), "a" (~(1<<(bit))) \
440 : "cc", "0", "1" ); \
442 #endif /* __s390x__ */
444 /* For spinlocks etc */
445 #define local_irq_save(x) ((x) = local_irq_disable())
448 * Use to set psw mask except for the first byte which
449 * won't be changed by this function.
452 __set_psw_mask(unsigned long mask)
454 local_save_flags(mask);
455 __load_psw_mask(mask);
458 #define local_mcck_enable() __set_psw_mask(PSW_KERNEL_BITS)
459 #define local_mcck_disable() __set_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK)
463 extern void smp_ctl_set_bit(int cr, int bit);
464 extern void smp_ctl_clear_bit(int cr, int bit);
465 #define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
466 #define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
470 #define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
471 #define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
473 #endif /* CONFIG_SMP */
475 extern void (*_machine_restart)(char *command);
476 extern void (*_machine_halt)(void);
477 extern void (*_machine_power_off)(void);
479 #define arch_align_stack(x) (x)
481 #endif /* __KERNEL__ */