]> err.no Git - linux-2.6/blob - include/asm-i386/mutex.h
Merge ssh://master.kernel.org/pub/scm/linux/kernel/git/sam/kbuild
[linux-2.6] / include / asm-i386 / mutex.h
1 /*
2  * Assembly implementation of the mutex fastpath, based on atomic
3  * decrement/increment.
4  *
5  * started by Ingo Molnar:
6  *
7  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8  */
9 #ifndef _ASM_MUTEX_H
10 #define _ASM_MUTEX_H
11
12 /**
13  *  __mutex_fastpath_lock - try to take the lock by moving the count
14  *                          from 1 to a 0 value
15  *  @count: pointer of type atomic_t
16  *  @fn: function to call if the original value was not 1
17  *
18  * Change the count from 1 to a value lower than 1, and call <fn> if it
19  * wasn't 1 originally. This function MUST leave the value lower than 1
20  * even when the "1" assertion wasn't true.
21  */
22 #define __mutex_fastpath_lock(count, fail_fn)                           \
23 do {                                                                    \
24         unsigned int dummy;                                             \
25                                                                         \
26         typecheck(atomic_t *, count);                                   \
27         typecheck_fn(fastcall void (*)(atomic_t *), fail_fn);           \
28                                                                         \
29         __asm__ __volatile__(                                           \
30                 LOCK    "   decl (%%eax)        \n"                     \
31                         "   js "#fail_fn"       \n"                     \
32                                                                         \
33                 :"=a" (dummy)                                           \
34                 : "a" (count)                                           \
35                 : "memory", "ecx", "edx");                              \
36 } while (0)
37
38
39 /**
40  *  __mutex_fastpath_lock_retval - try to take the lock by moving the count
41  *                                 from 1 to a 0 value
42  *  @count: pointer of type atomic_t
43  *  @fail_fn: function to call if the original value was not 1
44  *
45  * Change the count from 1 to a value lower than 1, and call <fail_fn> if it
46  * wasn't 1 originally. This function returns 0 if the fastpath succeeds,
47  * or anything the slow path function returns
48  */
49 static inline int
50 __mutex_fastpath_lock_retval(atomic_t *count,
51                              int fastcall (*fail_fn)(atomic_t *))
52 {
53         if (unlikely(atomic_dec_return(count) < 0))
54                 return fail_fn(count);
55         else
56                 return 0;
57 }
58
59 /**
60  *  __mutex_fastpath_unlock - try to promote the mutex from 0 to 1
61  *  @count: pointer of type atomic_t
62  *  @fail_fn: function to call if the original value was not 0
63  *
64  * try to promote the mutex from 0 to 1. if it wasn't 0, call <fail_fn>.
65  * In the failure case, this function is allowed to either set the value
66  * to 1, or to set it to a value lower than 1.
67  *
68  * If the implementation sets it to a value of lower than 1, the
69  * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
70  * to return 0 otherwise.
71  */
72 #define __mutex_fastpath_unlock(count, fail_fn)                         \
73 do {                                                                    \
74         unsigned int dummy;                                             \
75                                                                         \
76         typecheck(atomic_t *, count);                                   \
77         typecheck_fn(fastcall void (*)(atomic_t *), fail_fn);           \
78                                                                         \
79         __asm__ __volatile__(                                           \
80                 LOCK    "   incl (%%eax)        \n"                     \
81                         "   jle "#fail_fn"      \n"                     \
82                                                                         \
83                 :"=a" (dummy)                                           \
84                 : "a" (count)                                           \
85                 : "memory", "ecx", "edx");                              \
86 } while (0)
87
88 #define __mutex_slowpath_needs_to_unlock()      1
89
90 /**
91  * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
92  *
93  *  @count: pointer of type atomic_t
94  *  @fail_fn: fallback function
95  *
96  * Change the count from 1 to a value lower than 1, and return 0 (failure)
97  * if it wasn't 1 originally, or return 1 (success) otherwise. This function
98  * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
99  * Additionally, if the value was < 0 originally, this function must not leave
100  * it to 0 on failure.
101  */
102 static inline int
103 __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
104 {
105         /*
106          * We have two variants here. The cmpxchg based one is the best one
107          * because it never induce a false contention state.  It is included
108          * here because architectures using the inc/dec algorithms over the
109          * xchg ones are much more likely to support cmpxchg natively.
110          *
111          * If not we fall back to the spinlock based variant - that is
112          * just as efficient (and simpler) as a 'destructive' probing of
113          * the mutex state would be.
114          */
115 #ifdef __HAVE_ARCH_CMPXCHG
116         if (likely(atomic_cmpxchg(count, 1, 0)) == 1)
117                 return 1;
118         return 0;
119 #else
120         return fail_fn(count);
121 #endif
122 }
123
124 #endif