10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
26 #define OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
27
28 // Implementation of class atomic
29
30 inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; }
31 inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; }
32 inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; }
33 inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
34 inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; }
35
36 inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; }
37 inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; }
38 inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; }
39 inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
40 inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
41
42
43 template<size_t byte_size>
44 struct Atomic::PlatformAdd
45 : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
46 {
47 template<typename I, typename D>
48 D fetch_and_add(I add_value, D volatile* dest) const;
49 };
50
51 template<>
52 template<typename I, typename D>
53 inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest) const {
54 STATIC_ASSERT(4 == sizeof(I));
55 STATIC_ASSERT(4 == sizeof(D));
56 D old_value;
57 __asm__ volatile ( "lock xaddl %0,(%2)"
58 : "=r" (old_value)
59 : "0" (add_value), "r" (dest)
60 : "cc", "memory");
61 return old_value;
62 }
85 : "q" (exchange_value), "a" (compare_value), "r" (dest)
86 : "cc", "memory");
87 return exchange_value;
88 }
89
90 template<>
91 template<typename T>
92 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
93 T volatile* dest,
94 T compare_value,
95 cmpxchg_memory_order /* order */) const {
96 STATIC_ASSERT(4 == sizeof(T));
97 __asm__ volatile ( "lock cmpxchgl %1,(%3)"
98 : "=a" (exchange_value)
99 : "r" (exchange_value), "a" (compare_value), "r" (dest)
100 : "cc", "memory");
101 return exchange_value;
102 }
103
104 #ifdef AMD64
105 inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
106 inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
107
108 template<>
109 template<typename I, typename D>
110 inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) const {
111 STATIC_ASSERT(8 == sizeof(I));
112 STATIC_ASSERT(8 == sizeof(D));
113 D old_value;
114 __asm__ __volatile__ ( "lock xaddq %0,(%2)"
115 : "=r" (old_value)
116 : "0" (add_value), "r" (dest)
117 : "cc", "memory");
118 return old_value;
119 }
120
121 template<>
122 template<typename T>
123 inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
124 T volatile* dest) const {
125 STATIC_ASSERT(8 == sizeof(T));
126 __asm__ __volatile__ ("xchgq (%2),%0"
127 : "=r" (exchange_value)
128 : "0" (exchange_value), "r" (dest)
129 : "memory");
130 return exchange_value;
131 }
132
133 template<>
134 template<typename T>
135 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
136 T volatile* dest,
137 T compare_value,
138 cmpxchg_memory_order /* order */) const {
139 STATIC_ASSERT(8 == sizeof(T));
140 __asm__ __volatile__ ( "lock cmpxchgq %1,(%3)"
141 : "=a" (exchange_value)
142 : "r" (exchange_value), "a" (compare_value), "r" (dest)
143 : "cc", "memory");
144 return exchange_value;
145 }
146
147 inline jlong Atomic::load(const volatile jlong* src) { return *src; }
148
149 #else // !AMD64
150
151 extern "C" {
152 // defined in bsd_x86.s
153 jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
154 void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst);
155 }
156
157 template<>
158 template<typename T>
159 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
160 T volatile* dest,
161 T compare_value,
162 cmpxchg_memory_order order) const {
163 STATIC_ASSERT(8 == sizeof(T));
164 return cmpxchg_using_helper<jlong>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
165 }
166
167 inline jlong Atomic::load(const volatile jlong* src) {
168 volatile jlong dest;
169 _Atomic_move_long(src, &dest);
170 return dest;
171 }
172
173 inline void Atomic::store(jlong store_value, jlong* dest) {
174 _Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
175 }
176
177 inline void Atomic::store(jlong store_value, volatile jlong* dest) {
178 _Atomic_move_long((volatile jlong*)&store_value, dest);
179 }
180
181 #endif // AMD64
182
183 #endif // OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
26 #define OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
27
28 // Implementation of class atomic
29
30 template<size_t byte_size>
31 struct Atomic::PlatformAdd
32 : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
33 {
34 template<typename I, typename D>
35 D fetch_and_add(I add_value, D volatile* dest) const;
36 };
37
38 template<>
39 template<typename I, typename D>
40 inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest) const {
41 STATIC_ASSERT(4 == sizeof(I));
42 STATIC_ASSERT(4 == sizeof(D));
43 D old_value;
44 __asm__ volatile ( "lock xaddl %0,(%2)"
45 : "=r" (old_value)
46 : "0" (add_value), "r" (dest)
47 : "cc", "memory");
48 return old_value;
49 }
72 : "q" (exchange_value), "a" (compare_value), "r" (dest)
73 : "cc", "memory");
74 return exchange_value;
75 }
76
77 template<>
78 template<typename T>
79 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
80 T volatile* dest,
81 T compare_value,
82 cmpxchg_memory_order /* order */) const {
83 STATIC_ASSERT(4 == sizeof(T));
84 __asm__ volatile ( "lock cmpxchgl %1,(%3)"
85 : "=a" (exchange_value)
86 : "r" (exchange_value), "a" (compare_value), "r" (dest)
87 : "cc", "memory");
88 return exchange_value;
89 }
90
91 #ifdef AMD64
92 template<>
93 template<typename I, typename D>
94 inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) const {
95 STATIC_ASSERT(8 == sizeof(I));
96 STATIC_ASSERT(8 == sizeof(D));
97 D old_value;
98 __asm__ __volatile__ ( "lock xaddq %0,(%2)"
99 : "=r" (old_value)
100 : "0" (add_value), "r" (dest)
101 : "cc", "memory");
102 return old_value;
103 }
104
105 template<>
106 template<typename T>
107 inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
108 T volatile* dest) const {
109 STATIC_ASSERT(8 == sizeof(T));
110 __asm__ __volatile__ ("xchgq (%2),%0"
111 : "=r" (exchange_value)
112 : "0" (exchange_value), "r" (dest)
113 : "memory");
114 return exchange_value;
115 }
116
117 template<>
118 template<typename T>
119 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
120 T volatile* dest,
121 T compare_value,
122 cmpxchg_memory_order /* order */) const {
123 STATIC_ASSERT(8 == sizeof(T));
124 __asm__ __volatile__ ( "lock cmpxchgq %1,(%3)"
125 : "=a" (exchange_value)
126 : "r" (exchange_value), "a" (compare_value), "r" (dest)
127 : "cc", "memory");
128 return exchange_value;
129 }
130
131 #else // !AMD64
132
133 extern "C" {
134 // defined in bsd_x86.s
135 jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
136 void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst);
137 }
138
139 template<>
140 template<typename T>
141 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
142 T volatile* dest,
143 T compare_value,
144 cmpxchg_memory_order order) const {
145 STATIC_ASSERT(8 == sizeof(T));
146 return cmpxchg_using_helper<jlong>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
147 }
148
149 template<>
150 template<typename T>
151 inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
152 STATIC_ASSERT(8 == sizeof(T));
153 volatile jlong dest;
154 _Atomic_move_long(reinterpret_cast<const volatile jlong*>(src), reinterpret_cast<volatile jlong*>(&dest));
155 return PrimitiveConversions::cast<T>(dest);
156 }
157
158 template<>
159 template<typename T>
160 inline void Atomic::PlatformStore<8>::operator()(T store_value,
161 T volatile* dest) const {
162 STATIC_ASSERT(8 == sizeof(T));
163 _Atomic_move_long(reinterpret_cast<const volatile jlong*>(&store_value), reinterpret_cast<volatile jlong*>(dest));
164 }
165
166 #endif // AMD64
167
168 #endif // OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
|