68 }
69
70 inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) {
71 __asm__ volatile ( "xchgl (%2),%0"
72 : "=r" (exchange_value)
73 : "0" (exchange_value), "r" (dest)
74 : "memory");
75 return exchange_value;
76 }
77
78 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
79 return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
80 }
81
82 template<>
83 template<typename T>
84 inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
85 T volatile* dest,
86 T compare_value,
87 cmpxchg_memory_order /* order */) const {
88 __asm__ volatile ( "lock cmpxchgb %1,(%3)"
89 : "=a" (exchange_value)
90 : "q" (exchange_value), "a" (compare_value), "r" (dest)
91 : "cc", "memory");
92 return exchange_value;
93 }
94
95 template<>
96 template<typename T>
97 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
98 T volatile* dest,
99 T compare_value,
100 cmpxchg_memory_order /* order */) const {
101 __asm__ volatile ( "lock cmpxchgl %1,(%3)"
102 : "=a" (exchange_value)
103 : "r" (exchange_value), "a" (compare_value), "r" (dest)
104 : "cc", "memory");
105 return exchange_value;
106 }
107
108 #ifdef AMD64
109 inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
110 inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
111
112 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
113 intptr_t addend = add_value;
114 __asm__ __volatile__ ( "lock xaddq %0,(%2)"
115 : "=r" (addend)
116 : "0" (addend), "r" (dest)
117 : "cc", "memory");
118 return addend + add_value;
119 }
120
133 __asm__ __volatile__ ( "lock subq $1,(%0)"
134 :
135 : "r" (dest)
136 : "cc", "memory");
137 }
138
139 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
140 __asm__ __volatile__ ("xchgq (%2),%0"
141 : "=r" (exchange_value)
142 : "0" (exchange_value), "r" (dest)
143 : "memory");
144 return exchange_value;
145 }
146
147 template<>
148 template<typename T>
149 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
150 T volatile* dest,
151 T compare_value,
152 cmpxchg_memory_order /* order */) const {
153 __asm__ __volatile__ ( "lock cmpxchgq %1,(%3)"
154 : "=a" (exchange_value)
155 : "r" (exchange_value), "a" (compare_value), "r" (dest)
156 : "cc", "memory");
157 return exchange_value;
158 }
159
160 inline jlong Atomic::load(const volatile jlong* src) { return *src; }
161
162 #else // !AMD64
163
164 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
165 return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest);
166 }
167
168 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
169 return (void*)Atomic::add((jint)add_value, (volatile jint*)dest);
170 }
171
172
177 inline void Atomic::dec_ptr(volatile intptr_t* dest) {
178 dec((volatile jint*)dest);
179 }
180
181 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
182 return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
183 }
184
185 extern "C" {
186 // defined in bsd_x86.s
187 jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
188 void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst);
189 }
190
191 template<>
192 template<typename T>
193 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
194 T volatile* dest,
195 T compare_value,
196 cmpxchg_memory_order order) const {
197 return cmpxchg_using_stub<jlong>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
198 }
199
200 inline jlong Atomic::load(const volatile jlong* src) {
201 volatile jlong dest;
202 _Atomic_move_long(src, &dest);
203 return dest;
204 }
205
206 inline void Atomic::store(jlong store_value, jlong* dest) {
207 _Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
208 }
209
210 inline void Atomic::store(jlong store_value, volatile jlong* dest) {
211 _Atomic_move_long((volatile jlong*)&store_value, dest);
212 }
213
214 #endif // AMD64
215
216 #endif // OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
|
68 }
69
70 inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) {
71 __asm__ volatile ( "xchgl (%2),%0"
72 : "=r" (exchange_value)
73 : "0" (exchange_value), "r" (dest)
74 : "memory");
75 return exchange_value;
76 }
77
78 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
79 return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
80 }
81
82 template<>
83 template<typename T>
84 inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
85 T volatile* dest,
86 T compare_value,
87 cmpxchg_memory_order /* order */) const {
88 STATIC_ASSERT(1 == sizeof(T));
89 __asm__ volatile ( "lock cmpxchgb %1,(%3)"
90 : "=a" (exchange_value)
91 : "q" (exchange_value), "a" (compare_value), "r" (dest)
92 : "cc", "memory");
93 return exchange_value;
94 }
95
96 template<>
97 template<typename T>
98 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
99 T volatile* dest,
100 T compare_value,
101 cmpxchg_memory_order /* order */) const {
102 STATIC_ASSERT(4 == sizeof(T));
103 __asm__ volatile ( "lock cmpxchgl %1,(%3)"
104 : "=a" (exchange_value)
105 : "r" (exchange_value), "a" (compare_value), "r" (dest)
106 : "cc", "memory");
107 return exchange_value;
108 }
109
110 #ifdef AMD64
111 inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
112 inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
113
114 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
115 intptr_t addend = add_value;
116 __asm__ __volatile__ ( "lock xaddq %0,(%2)"
117 : "=r" (addend)
118 : "0" (addend), "r" (dest)
119 : "cc", "memory");
120 return addend + add_value;
121 }
122
135 __asm__ __volatile__ ( "lock subq $1,(%0)"
136 :
137 : "r" (dest)
138 : "cc", "memory");
139 }
140
141 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
142 __asm__ __volatile__ ("xchgq (%2),%0"
143 : "=r" (exchange_value)
144 : "0" (exchange_value), "r" (dest)
145 : "memory");
146 return exchange_value;
147 }
148
149 template<>
150 template<typename T>
151 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
152 T volatile* dest,
153 T compare_value,
154 cmpxchg_memory_order /* order */) const {
155 STATIC_ASSERT(8 == sizeof(T));
156 __asm__ __volatile__ ( "lock cmpxchgq %1,(%3)"
157 : "=a" (exchange_value)
158 : "r" (exchange_value), "a" (compare_value), "r" (dest)
159 : "cc", "memory");
160 return exchange_value;
161 }
162
163 inline jlong Atomic::load(const volatile jlong* src) { return *src; }
164
165 #else // !AMD64
166
167 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
168 return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest);
169 }
170
171 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
172 return (void*)Atomic::add((jint)add_value, (volatile jint*)dest);
173 }
174
175
180 inline void Atomic::dec_ptr(volatile intptr_t* dest) {
181 dec((volatile jint*)dest);
182 }
183
184 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
185 return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
186 }
187
188 extern "C" {
189 // defined in bsd_x86.s
190 jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
191 void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst);
192 }
193
194 template<>
195 template<typename T>
196 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
197 T volatile* dest,
198 T compare_value,
199 cmpxchg_memory_order order) const {
200 STATIC_ASSERT(8 == sizeof(T));
201 return cmpxchg_using_helper<jlong>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
202 }
203
204 inline jlong Atomic::load(const volatile jlong* src) {
205 volatile jlong dest;
206 _Atomic_move_long(src, &dest);
207 return dest;
208 }
209
210 inline void Atomic::store(jlong store_value, jlong* dest) {
211 _Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
212 }
213
214 inline void Atomic::store(jlong store_value, volatile jlong* dest) {
215 _Atomic_move_long((volatile jlong*)&store_value, dest);
216 }
217
218 #endif // AMD64
219
220 #endif // OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
|