23 */
24
25 #ifndef OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
26 #define OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
27
28 // Implementation of class atomic
29
30 inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; }
31 inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; }
32 inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; }
33 inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
34 inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; }
35
36 inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; }
37 inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; }
38 inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; }
39 inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
40 inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
41
42
43 inline jint Atomic::add (jint add_value, volatile jint* dest) {
44 jint addend = add_value;
45 __asm__ volatile ( "lock xaddl %0,(%2)"
46 : "=r" (addend)
47 : "0" (addend), "r" (dest)
48 : "cc", "memory");
49 return addend + add_value;
50 }
51
52 inline void Atomic::inc (volatile jint* dest) {
53 __asm__ volatile ( "lock addl $1,(%0)" :
54 : "r" (dest) : "cc", "memory");
55 }
56
57 inline void Atomic::inc_ptr(volatile void* dest) {
58 inc_ptr((volatile intptr_t*)dest);
59 }
60
61 inline void Atomic::dec (volatile jint* dest) {
62 __asm__ volatile ( "lock subl $1,(%0)" :
63 : "r" (dest) : "cc", "memory");
64 }
65
66 inline void Atomic::dec_ptr(volatile void* dest) {
67 dec_ptr((volatile intptr_t*)dest);
68 }
69
70 inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) {
71 __asm__ volatile ( "xchgl (%2),%0"
94 }
95
96 template<>
97 template<typename T>
98 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
99 T volatile* dest,
100 T compare_value,
101 cmpxchg_memory_order /* order */) const {
102 STATIC_ASSERT(4 == sizeof(T));
103 __asm__ volatile ( "lock cmpxchgl %1,(%3)"
104 : "=a" (exchange_value)
105 : "r" (exchange_value), "a" (compare_value), "r" (dest)
106 : "cc", "memory");
107 return exchange_value;
108 }
109
110 #ifdef AMD64
111 inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
112 inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
113
114 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
115 intptr_t addend = add_value;
116 __asm__ __volatile__ ( "lock xaddq %0,(%2)"
117 : "=r" (addend)
118 : "0" (addend), "r" (dest)
119 : "cc", "memory");
120 return addend + add_value;
121 }
122
123 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
124 return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
125 }
126
127 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
128 __asm__ __volatile__ ( "lock addq $1,(%0)"
129 :
130 : "r" (dest)
131 : "cc", "memory");
132 }
133
134 inline void Atomic::dec_ptr(volatile intptr_t* dest) {
135 __asm__ __volatile__ ( "lock subq $1,(%0)"
136 :
137 : "r" (dest)
138 : "cc", "memory");
139 }
140
141 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
142 __asm__ __volatile__ ("xchgq (%2),%0"
143 : "=r" (exchange_value)
144 : "0" (exchange_value), "r" (dest)
146 return exchange_value;
147 }
148
149 template<>
150 template<typename T>
151 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
152 T volatile* dest,
153 T compare_value,
154 cmpxchg_memory_order /* order */) const {
155 STATIC_ASSERT(8 == sizeof(T));
156 __asm__ __volatile__ ( "lock cmpxchgq %1,(%3)"
157 : "=a" (exchange_value)
158 : "r" (exchange_value), "a" (compare_value), "r" (dest)
159 : "cc", "memory");
160 return exchange_value;
161 }
162
163 inline jlong Atomic::load(const volatile jlong* src) { return *src; }
164
165 #else // !AMD64
166
167 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
168 return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest);
169 }
170
171 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
172 return (void*)Atomic::add((jint)add_value, (volatile jint*)dest);
173 }
174
175
176 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
177 inc((volatile jint*)dest);
178 }
179
180 inline void Atomic::dec_ptr(volatile intptr_t* dest) {
181 dec((volatile jint*)dest);
182 }
183
184 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
185 return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
186 }
187
188 extern "C" {
189 // defined in bsd_x86.s
190 jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
191 void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst);
192 }
193
194 template<>
|
23 */
24
25 #ifndef OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
26 #define OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
27
28 // Implementation of class atomic
29
30 inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; }
31 inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; }
32 inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; }
33 inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
34 inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; }
35
36 inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; }
37 inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; }
38 inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; }
39 inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
40 inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
41
42
43 template<size_t byte_size>
44 struct Atomic::PlatformAdd
45 : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
46 {
47 template<typename I, typename D>
48 D fetch_and_add(I add_value, D volatile* dest) const;
49 };
50
51 template<>
52 template<typename I, typename D>
53 inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest) const {
54 STATIC_ASSERT(4 == sizeof(I));
55 STATIC_ASSERT(4 == sizeof(D));
56 D old_value;
57 __asm__ volatile ( "lock xaddl %0,(%2)"
58 : "=r" (old_value)
59 : "0" (add_value), "r" (dest)
60 : "cc", "memory");
61 return old_value;
62 }
63
64 template<>
65 struct Atomic::PlatformAdd<2>: Atomic::AddShortUsingInt {};
66
67 inline void Atomic::inc (volatile jint* dest) {
68 __asm__ volatile ( "lock addl $1,(%0)" :
69 : "r" (dest) : "cc", "memory");
70 }
71
72 inline void Atomic::inc_ptr(volatile void* dest) {
73 inc_ptr((volatile intptr_t*)dest);
74 }
75
76 inline void Atomic::dec (volatile jint* dest) {
77 __asm__ volatile ( "lock subl $1,(%0)" :
78 : "r" (dest) : "cc", "memory");
79 }
80
81 inline void Atomic::dec_ptr(volatile void* dest) {
82 dec_ptr((volatile intptr_t*)dest);
83 }
84
85 inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) {
86 __asm__ volatile ( "xchgl (%2),%0"
109 }
110
111 template<>
112 template<typename T>
113 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
114 T volatile* dest,
115 T compare_value,
116 cmpxchg_memory_order /* order */) const {
117 STATIC_ASSERT(4 == sizeof(T));
118 __asm__ volatile ( "lock cmpxchgl %1,(%3)"
119 : "=a" (exchange_value)
120 : "r" (exchange_value), "a" (compare_value), "r" (dest)
121 : "cc", "memory");
122 return exchange_value;
123 }
124
125 #ifdef AMD64
126 inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
127 inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
128
129 template<>
130 template<typename I, typename D>
131 inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) const {
132 STATIC_ASSERT(8 == sizeof(I));
133 STATIC_ASSERT(8 == sizeof(D));
134 D old_value;
135 __asm__ __volatile__ ( "lock xaddq %0,(%2)"
136 : "=r" (old_value)
137 : "0" (add_value), "r" (dest)
138 : "cc", "memory");
139 return old_value;
140 }
141
142 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
143 __asm__ __volatile__ ( "lock addq $1,(%0)"
144 :
145 : "r" (dest)
146 : "cc", "memory");
147 }
148
149 inline void Atomic::dec_ptr(volatile intptr_t* dest) {
150 __asm__ __volatile__ ( "lock subq $1,(%0)"
151 :
152 : "r" (dest)
153 : "cc", "memory");
154 }
155
156 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
157 __asm__ __volatile__ ("xchgq (%2),%0"
158 : "=r" (exchange_value)
159 : "0" (exchange_value), "r" (dest)
161 return exchange_value;
162 }
163
164 template<>
165 template<typename T>
166 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
167 T volatile* dest,
168 T compare_value,
169 cmpxchg_memory_order /* order */) const {
170 STATIC_ASSERT(8 == sizeof(T));
171 __asm__ __volatile__ ( "lock cmpxchgq %1,(%3)"
172 : "=a" (exchange_value)
173 : "r" (exchange_value), "a" (compare_value), "r" (dest)
174 : "cc", "memory");
175 return exchange_value;
176 }
177
178 inline jlong Atomic::load(const volatile jlong* src) { return *src; }
179
180 #else // !AMD64
181
182 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
183 inc((volatile jint*)dest);
184 }
185
186 inline void Atomic::dec_ptr(volatile intptr_t* dest) {
187 dec((volatile jint*)dest);
188 }
189
190 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
191 return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
192 }
193
194 extern "C" {
195 // defined in bsd_x86.s
196 jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
197 void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst);
198 }
199
200 template<>
|