12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
26 #define OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
27
28 #include "runtime/os.hpp"
29
30 // Implementation of class atomic
31
32 inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; }
33 inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; }
34 inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; }
35 inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
36 inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; }
37
38 inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; }
39 inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; }
40 inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; }
41 inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
42 inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
43
44
45 inline jint Atomic::add (jint add_value, volatile jint* dest) {
46 jint addend = add_value;
47 __asm__ volatile ( "lock xaddl %0,(%2)"
48 : "=r" (addend)
49 : "0" (addend), "r" (dest)
50 : "cc", "memory");
51 return addend + add_value;
52 }
53
54 inline void Atomic::inc (volatile jint* dest) {
55 __asm__ volatile ( "lock addl $1,(%0)" :
56 : "r" (dest) : "cc", "memory");
57 }
58
59 inline void Atomic::inc_ptr(volatile void* dest) {
60 inc_ptr((volatile intptr_t*)dest);
61 }
62
63 inline void Atomic::dec (volatile jint* dest) {
64 __asm__ volatile ( "lock subl $1,(%0)" :
65 : "r" (dest) : "cc", "memory");
66 }
67
68 inline void Atomic::dec_ptr(volatile void* dest) {
69 dec_ptr((volatile intptr_t*)dest);
70 }
71
72 inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) {
73 __asm__ volatile ( "xchgl (%2),%0"
74 : "=r" (exchange_value)
75 : "0" (exchange_value), "r" (dest)
76 : "memory");
77 return exchange_value;
78 }
79
80 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
81 return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
82 }
83
84 #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
85 inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) {
86 __asm__ volatile ( "lock cmpxchgb %1,(%3)"
87 : "=a" (exchange_value)
88 : "q" (exchange_value), "a" (compare_value), "r" (dest)
89 : "cc", "memory");
90 return exchange_value;
91 }
92
93 inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) {
94 __asm__ volatile ( "lock cmpxchgl %1,(%3)"
95 : "=a" (exchange_value)
96 : "r" (exchange_value), "a" (compare_value), "r" (dest)
97 : "cc", "memory");
98 return exchange_value;
99 }
100
101 #ifdef AMD64
102 inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
103 inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
104
105 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
106 intptr_t addend = add_value;
107 __asm__ __volatile__ ( "lock xaddq %0,(%2)"
108 : "=r" (addend)
109 : "0" (addend), "r" (dest)
110 : "cc", "memory");
111 return addend + add_value;
112 }
113
114 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
115 return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
116 }
117
118 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
119 __asm__ __volatile__ ( "lock addq $1,(%0)"
120 :
121 : "r" (dest)
122 : "cc", "memory");
123 }
124
125 inline void Atomic::dec_ptr(volatile intptr_t* dest) {
126 __asm__ __volatile__ ( "lock subq $1,(%0)"
127 :
128 : "r" (dest)
129 : "cc", "memory");
130 }
131
132 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
133 __asm__ __volatile__ ("xchgq (%2),%0"
134 : "=r" (exchange_value)
135 : "0" (exchange_value), "r" (dest)
136 : "memory");
137 return exchange_value;
138 }
139
140 inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) {
141 __asm__ __volatile__ ( "lock cmpxchgq %1,(%3)"
142 : "=a" (exchange_value)
143 : "r" (exchange_value), "a" (compare_value), "r" (dest)
144 : "cc", "memory");
145 return exchange_value;
146 }
147
148 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
149 return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
150 }
151
152 inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) {
153 return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
154 }
155
156 inline jlong Atomic::load(const volatile jlong* src) { return *src; }
157
158 #else // !AMD64
159
160 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
161 return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest);
162 }
163
164 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
165 return (void*)Atomic::add((jint)add_value, (volatile jint*)dest);
166 }
167
168
169 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
170 inc((volatile jint*)dest);
171 }
172
173 inline void Atomic::dec_ptr(volatile intptr_t* dest) {
174 dec((volatile jint*)dest);
175 }
176
177 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
178 return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
179 }
180
181 extern "C" {
182 // defined in bsd_x86.s
183 jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
184 void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst);
185 }
186
187 inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) {
188 return _Atomic_cmpxchg_long(exchange_value, dest, compare_value, os::is_MP());
189 }
190
191 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
192 return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
193 }
194
195 inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) {
196 return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
197 }
198
199 inline jlong Atomic::load(const volatile jlong* src) {
200 volatile jlong dest;
201 _Atomic_move_long(src, &dest);
202 return dest;
203 }
204
205 inline void Atomic::store(jlong store_value, jlong* dest) {
206 _Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
207 }
208
209 inline void Atomic::store(jlong store_value, volatile jlong* dest) {
210 _Atomic_move_long((volatile jlong*)&store_value, dest);
211 }
212
213 #endif // AMD64
214
215 #endif // OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
|
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
26 #define OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
27
28 #include "runtime/os.hpp"
29
30 // Implementation of class atomic
31
32 template <>
33 inline int32_t Atomic::specialized_add<int32_t>(int32_t add_value, volatile int32_t* dest) {
34 int32_t addend = add_value;
35 __asm__ volatile ( "lock xaddl %0,(%2)"
36 : "=r" (addend)
37 : "0" (addend), "r" (dest)
38 : "cc", "memory");
39 return addend + add_value;
40 }
41
42 template <>
43 inline void Atomic::specialized_inc<int32_t>(volatile int32_t* dest) {
44 __asm__ volatile ( "lock addl $1,(%0)" :
45 : "r" (dest) : "cc", "memory");
46 }
47
48 template <>
49 inline void Atomic::specialized_dec<int32_t>(volatile int32_t* dest) {
50 __asm__ volatile ( "lock subl $1,(%0)" :
51 : "r" (dest) : "cc", "memory");
52 }
53
54 template <>
55 inline int32_t Atomic::specialized_xchg<int32_t>(int32_t exchange_value, volatile int32_t* dest) {
56 __asm__ volatile ( "xchgl (%2),%0"
57 : "=r" (exchange_value)
58 : "0" (exchange_value), "r" (dest)
59 : "memory");
60 return exchange_value;
61 }
62
63 #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
64 template <>
65 inline int8_t Atomic::specialized_cmpxchg<int8_t>(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value, cmpxchg_memory_order order) {
66 __asm__ volatile ("lock cmpxchgb %1,(%3)"
67 : "=a" (exchange_value)
68 : "q" (exchange_value), "a" (compare_value), "r" (dest)
69 : "cc", "memory");
70 return exchange_value;
71 }
72
73 template <>
74 inline int32_t Atomic::specialized_cmpxchg<int32_t>(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value, cmpxchg_memory_order order) {
75 __asm__ volatile ("lock cmpxchgl %1,(%3)"
76 : "=a" (exchange_value)
77 : "r" (exchange_value), "a" (compare_value), "r" (dest)
78 : "cc", "memory");
79 return exchange_value;
80 }
81
82 #ifdef AMD64
83
84 template <>
85 inline int64_t Atomic::specialized_add<int64_t>(int64_t add_value, volatile int64_t* dest) {
86 int64_t addend = add_value;
87 __asm__ __volatile__ ("lock xaddq %0,(%2)"
88 : "=r" (addend)
89 : "0" (addend), "r" (dest)
90 : "cc", "memory");
91 return addend + add_value;
92 }
93
94 template <>
95 inline void Atomic::specialized_inc<int64_t>(volatile int64_t* dest) {
96 __asm__ __volatile__ ("lock addq $1,(%0)"
97 :
98 : "r" (dest)
99 : "cc", "memory");
100 }
101
102 template <>
103 inline void Atomic::specialized_dec<int64_t>(volatile int64_t* dest) {
104 __asm__ __volatile__ ("lock subq $1,(%0)"
105 :
106 : "r" (dest)
107 : "cc", "memory");
108 }
109
110 template <>
111 inline int64_t Atomic::specialized_xchg<int64_t>(int64_t exchange_value, volatile int64_t* dest) {
112 __asm__ __volatile__ ("xchgq (%2),%0"
113 : "=r" (exchange_value)
114 : "0" (exchange_value), "r" (dest)
115 : "memory");
116 return exchange_value;
117 }
118
119 template <>
120 inline int64_t Atomic::specialized_cmpxchg<int64_t>(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) {
121 __asm__ __volatile__ ("lock cmpxchgq %1,(%3)"
122 : "=a" (exchange_value)
123 : "r" (exchange_value), "a" (compare_value), "r" (dest)
124 : "cc", "memory");
125 return exchange_value;
126 }
127
128 #else // !AMD64
129
130 extern "C" {
131 // defined in bsd_x86.s
132 int64_t _Atomic_cmpxchg_long(int64_t, volatile int64_t*, int64_t);
133 void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst);
134 }
135
136 template <>
137 inline void Atomic::specialized_store<int64_t>(int64_t store_value, volatile int64_t* dest) {
138 _Atomic_move_long(&store_value, dest);
139 }
140
141 template <>
142 inline int64_t Atomic::specialized_load<int64_t>(const volatile int64_t* src) {
143 volatile int64_t dest;
144 _Atomic_move_long(src, &dest);
145 return dest;
146 }
147
148 template <>
149 inline int64_t Atomic::specialized_cmpxchg<int64_t>(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) {
150 return _Atomic_cmpxchg_long(exchange_value, dest, compare_value);
151 }
152
153 #endif // AMD64
154
155 #endif // OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
|