2 * include/asm-s390/system.h
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
8 * Derived from "include/asm-i386/system.h"
11 #ifndef __ASM_SYSTEM_H
12 #define __ASM_SYSTEM_H
14 #include <linux/config.h>
15 #include <linux/kernel.h>
16 #include <asm/types.h>
17 #include <asm/ptrace.h>
18 #include <asm/setup.h>
19 #include <asm/processor.h>
25 extern struct task_struct *__switch_to(void *, void *);
28 #define __FLAG_SHIFT 56
29 #else /* ! __s390x__ */
30 #define __FLAG_SHIFT 24
31 #endif /* ! __s390x__ */
33 static inline void save_fp_regs(s390_fp_regs *fpregs)
40 : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory" );
41 if (!MACHINE_HAS_IEEE)
57 : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory" );
60 static inline void restore_fp_regs(s390_fp_regs *fpregs)
67 : : "a" (fpregs), "m" (*fpregs) );
68 if (!MACHINE_HAS_IEEE)
84 : : "a" (fpregs), "m" (*fpregs) );
87 static inline void save_access_regs(unsigned int *acrs)
89 asm volatile ("stam 0,15,0(%0)" : : "a" (acrs) : "memory" );
92 static inline void restore_access_regs(unsigned int *acrs)
94 asm volatile ("lam 0,15,0(%0)" : : "a" (acrs) );
97 #define switch_to(prev,next,last) do { \
100 save_fp_regs(&prev->thread.fp_regs); \
101 restore_fp_regs(&next->thread.fp_regs); \
102 save_access_regs(&prev->thread.acrs[0]); \
103 restore_access_regs(&next->thread.acrs[0]); \
104 prev = __switch_to(prev,next); \
108 * On SMP systems, when the scheduler does migration-cost autodetection,
109 * it needs a way to flush as much of the CPU's caches as possible.
111 * TODO: fill this in!
113 static inline void sched_cacheflush(void)
117 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
118 extern void account_user_vtime(struct task_struct *);
119 extern void account_system_vtime(struct task_struct *);
122 #define finish_arch_switch(prev) do { \
123 set_fs(current->thread.mm_segment); \
124 account_system_vtime(prev); \
127 #define nop() __asm__ __volatile__ ("nop")
129 #define xchg(ptr,x) \
130 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(void *)(ptr),sizeof(*(ptr))))
132 static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
134 unsigned long addr, old;
139 addr = (unsigned long) ptr;
140 shift = (3 ^ (addr & 3)) << 3;
149 : "=&d" (old), "=m" (*(int *) addr)
150 : "d" (x << shift), "d" (~(255 << shift)), "a" (addr),
151 "m" (*(int *) addr) : "memory", "cc", "0" );
155 addr = (unsigned long) ptr;
156 shift = (2 ^ (addr & 2)) << 3;
165 : "=&d" (old), "=m" (*(int *) addr)
166 : "d" (x << shift), "d" (~(65535 << shift)), "a" (addr),
167 "m" (*(int *) addr) : "memory", "cc", "0" );
173 "0: cs %0,%2,0(%3)\n"
175 : "=&d" (old), "=m" (*(int *) ptr)
176 : "d" (x), "a" (ptr), "m" (*(int *) ptr)
184 "0: csg %0,%2,0(%3)\n"
186 : "=&d" (old), "=m" (*(long *) ptr)
187 : "d" (x), "a" (ptr), "m" (*(long *) ptr)
191 #endif /* __s390x__ */
197 * Atomic compare and exchange. Compare OLD with MEM, if identical,
198 * store NEW in MEM. Return the initial value in MEM. Success is
199 * indicated by comparing RETURN with OLD.
202 #define __HAVE_ARCH_CMPXCHG 1
204 #define cmpxchg(ptr,o,n)\
205 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
206 (unsigned long)(n),sizeof(*(ptr))))
208 static inline unsigned long
209 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
211 unsigned long addr, prev, tmp;
216 addr = (unsigned long) ptr;
217 shift = (3 ^ (addr & 3)) << 3;
231 : "=&d" (prev), "=&d" (tmp)
232 : "d" (old << shift), "d" (new << shift), "a" (ptr),
233 "d" (~(255 << shift))
235 return prev >> shift;
237 addr = (unsigned long) ptr;
238 shift = (2 ^ (addr & 2)) << 3;
252 : "=&d" (prev), "=&d" (tmp)
253 : "d" (old << shift), "d" (new << shift), "a" (ptr),
254 "d" (~(65535 << shift))
256 return prev >> shift;
260 : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr)
267 : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr)
270 #endif /* __s390x__ */
276 * Force strict CPU ordering.
277 * And yes, this is required on UP too when we're talking
280 * This is very similar to the ppc eieio/sync instruction in that is
281 * does a checkpoint syncronisation & makes sure that
282 * all memory ops have completed wrt other CPU's ( see 7-15 POP DJB ).
285 #define eieio() __asm__ __volatile__ ( "bcr 15,0" : : : "memory" )
286 # define SYNC_OTHER_CORES(x) eieio()
288 #define rmb() eieio()
289 #define wmb() eieio()
290 #define read_barrier_depends() do { } while(0)
291 #define smp_mb() mb()
292 #define smp_rmb() rmb()
293 #define smp_wmb() wmb()
294 #define smp_read_barrier_depends() read_barrier_depends()
295 #define smp_mb__before_clear_bit() smp_mb()
296 #define smp_mb__after_clear_bit() smp_mb()
299 #define set_mb(var, value) do { var = value; mb(); } while (0)
300 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
302 /* interrupt control.. */
303 #define local_irq_enable() ({ \
304 unsigned long __dummy; \
305 __asm__ __volatile__ ( \
307 : "=m" (__dummy) : "a" (&__dummy) : "memory" ); \
310 #define local_irq_disable() ({ \
311 unsigned long __flags; \
312 __asm__ __volatile__ ( \
313 "stnsm 0(%1),0xfc" : "=m" (__flags) : "a" (&__flags) ); \
317 #define local_save_flags(x) \
318 __asm__ __volatile__("stosm 0(%1),0" : "=m" (x) : "a" (&x), "m" (x) )
320 #define local_irq_restore(x) \
321 __asm__ __volatile__("ssm 0(%0)" : : "a" (&x), "m" (x) : "memory")
323 #define irqs_disabled() \
325 unsigned long flags; \
326 local_save_flags(flags); \
327 !((flags >> __FLAG_SHIFT) & 3); \
332 #define __ctl_load(array, low, high) ({ \
333 typedef struct { char _[sizeof(array)]; } addrtype; \
334 __asm__ __volatile__ ( \
336 " lctlg 0,0,0(%0)\n" \
338 : : "a" (&array), "a" (((low)<<4)+(high)), \
339 "m" (*(addrtype *)(array)) : "1" ); \
342 #define __ctl_store(array, low, high) ({ \
343 typedef struct { char _[sizeof(array)]; } addrtype; \
344 __asm__ __volatile__ ( \
346 " stctg 0,0,0(%1)\n" \
348 : "=m" (*(addrtype *)(array)) \
349 : "a" (&array), "a" (((low)<<4)+(high)) : "1" ); \
352 #define __ctl_set_bit(cr, bit) ({ \
354 __asm__ __volatile__ ( \
355 " bras 1,0f\n" /* skip indirect insns */ \
356 " stctg 0,0,0(%1)\n" \
357 " lctlg 0,0,0(%1)\n" \
358 "0: ex %2,0(1)\n" /* execute stctl */ \
360 " ogr 0,%3\n" /* set the bit */ \
362 "1: ex %2,6(1)" /* execute lctl */ \
364 : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
365 "a" (cr*17), "a" (1L<<(bit)) \
366 : "cc", "0", "1" ); \
369 #define __ctl_clear_bit(cr, bit) ({ \
371 __asm__ __volatile__ ( \
372 " bras 1,0f\n" /* skip indirect insns */ \
373 " stctg 0,0,0(%1)\n" \
374 " lctlg 0,0,0(%1)\n" \
375 "0: ex %2,0(1)\n" /* execute stctl */ \
377 " ngr 0,%3\n" /* set the bit */ \
379 "1: ex %2,6(1)" /* execute lctl */ \
381 : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
382 "a" (cr*17), "a" (~(1L<<(bit))) \
383 : "cc", "0", "1" ); \
386 #else /* __s390x__ */
388 #define __ctl_load(array, low, high) ({ \
389 typedef struct { char _[sizeof(array)]; } addrtype; \
390 __asm__ __volatile__ ( \
392 " lctl 0,0,0(%0)\n" \
394 : : "a" (&array), "a" (((low)<<4)+(high)), \
395 "m" (*(addrtype *)(array)) : "1" ); \
398 #define __ctl_store(array, low, high) ({ \
399 typedef struct { char _[sizeof(array)]; } addrtype; \
400 __asm__ __volatile__ ( \
402 " stctl 0,0,0(%1)\n" \
404 : "=m" (*(addrtype *)(array)) \
405 : "a" (&array), "a" (((low)<<4)+(high)): "1" ); \
408 #define __ctl_set_bit(cr, bit) ({ \
410 __asm__ __volatile__ ( \
411 " bras 1,0f\n" /* skip indirect insns */ \
412 " stctl 0,0,0(%1)\n" \
413 " lctl 0,0,0(%1)\n" \
414 "0: ex %2,0(1)\n" /* execute stctl */ \
416 " or 0,%3\n" /* set the bit */ \
418 "1: ex %2,4(1)" /* execute lctl */ \
420 : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
421 "a" (cr*17), "a" (1<<(bit)) \
422 : "cc", "0", "1" ); \
425 #define __ctl_clear_bit(cr, bit) ({ \
427 __asm__ __volatile__ ( \
428 " bras 1,0f\n" /* skip indirect insns */ \
429 " stctl 0,0,0(%1)\n" \
430 " lctl 0,0,0(%1)\n" \
431 "0: ex %2,0(1)\n" /* execute stctl */ \
433 " nr 0,%3\n" /* set the bit */ \
435 "1: ex %2,4(1)" /* execute lctl */ \
437 : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
438 "a" (cr*17), "a" (~(1<<(bit))) \
439 : "cc", "0", "1" ); \
441 #endif /* __s390x__ */
443 /* For spinlocks etc */
444 #define local_irq_save(x) ((x) = local_irq_disable())
447 * Use to set psw mask except for the first byte which
448 * won't be changed by this function.
451 __set_psw_mask(unsigned long mask)
453 local_save_flags(mask);
454 __load_psw_mask(mask);
457 #define local_mcck_enable() __set_psw_mask(PSW_KERNEL_BITS)
458 #define local_mcck_disable() __set_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK)
462 extern void smp_ctl_set_bit(int cr, int bit);
463 extern void smp_ctl_clear_bit(int cr, int bit);
464 #define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
465 #define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
469 #define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
470 #define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
472 #endif /* CONFIG_SMP */
474 extern void (*_machine_restart)(char *command);
475 extern void (*_machine_halt)(void);
476 extern void (*_machine_power_off)(void);
478 #define arch_align_stack(x) (x)
480 #endif /* __KERNEL__ */