1 /*
2 * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
30 // The following alternative implementations are needed because
31 // Windows 95 doesn't support (some of) the corresponding Windows NT
32 // calls. Furthermore, these versions allow inlining in the caller.
33 // (More precisely: The documentation for InterlockedExchange says
34 // it is supported for Windows 95. However, when single-stepping
35 // through the assembly code we cannot step into the routine and
36 // when looking at the routine address we see only garbage code.
37 // Better safe then sorry!). Was bug 7/31/98 (gri).
38 //
39 // Performance note: On uniprocessors, the 'lock' prefixes are not
40 // necessary (and expensive). We should generate separate cases if
41 // this becomes a performance problem.
42
43 #pragma warning(disable: 4035) // Disables warnings reporting missing return statement
44
45 template<size_t byte_size>
46 struct Atomic::PlatformAdd
47 : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
48 {
49 template<typename I, typename D>
50 D add_and_fetch(I add_value, D volatile* dest) const;
51 };
52
53 #ifdef AMD64
54 template<>
55 template<typename I, typename D>
56 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
57 return add_using_helper<int32_t>(os::atomic_add_func, add_value, dest);
58 }
59
60 template<>
61 template<typename I, typename D>
62 inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
63 return add_using_helper<int64_t>(os::atomic_add_long_func, add_value, dest);
64 }
65
66 #define DEFINE_STUB_XCHG(ByteSize, StubType, StubName) \
67 template<> \
68 template<typename T> \
69 inline T Atomic::PlatformXchg<ByteSize>::operator()(T exchange_value, \
70 T volatile* dest) const { \
71 STATIC_ASSERT(ByteSize == sizeof(T)); \
72 return xchg_using_helper<StubType>(StubName, exchange_value, dest); \
73 }
74
75 DEFINE_STUB_XCHG(4, int32_t, os::atomic_xchg_func)
76 DEFINE_STUB_XCHG(8, int64_t, os::atomic_xchg_long_func)
77
78 #undef DEFINE_STUB_XCHG
79
80 #define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName) \
81 template<> \
82 template<typename T> \
83 inline T Atomic::PlatformCmpxchg<ByteSize>::operator()(T exchange_value, \
84 T volatile* dest, \
85 T compare_value, \
86 cmpxchg_memory_order order) const { \
87 STATIC_ASSERT(ByteSize == sizeof(T)); \
88 return cmpxchg_using_helper<StubType>(StubName, exchange_value, dest, compare_value); \
89 }
90
91 DEFINE_STUB_CMPXCHG(1, int8_t, os::atomic_cmpxchg_byte_func)
92 DEFINE_STUB_CMPXCHG(4, int32_t, os::atomic_cmpxchg_func)
93 DEFINE_STUB_CMPXCHG(8, int64_t, os::atomic_cmpxchg_long_func)
94
95 #undef DEFINE_STUB_CMPXCHG
96
97 #else // !AMD64
98
99 template<>
100 template<typename I, typename D>
101 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
102 STATIC_ASSERT(4 == sizeof(I));
103 STATIC_ASSERT(4 == sizeof(D));
104 __asm {
105 mov edx, dest;
106 mov eax, add_value;
107 mov ecx, eax;
108 lock xadd dword ptr [edx], eax;
109 add eax, ecx;
110 }
111 }
112
113 template<>
114 template<typename T>
115 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
116 T volatile* dest) const {
117 STATIC_ASSERT(4 == sizeof(T));
118 // alternative for InterlockedExchange
119 __asm {
120 mov eax, exchange_value;
121 mov ecx, dest;
122 xchg eax, dword ptr [ecx];
123 }
124 }
125
126 template<>
127 template<typename T>
128 inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
129 T volatile* dest,
130 T compare_value,
131 cmpxchg_memory_order order) const {
132 STATIC_ASSERT(1 == sizeof(T));
133 // alternative for InterlockedCompareExchange
134 __asm {
135 mov edx, dest
136 mov cl, exchange_value
137 mov al, compare_value
138 lock cmpxchg byte ptr [edx], cl
139 }
140 }
141
142 template<>
143 template<typename T>
144 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
145 T volatile* dest,
146 T compare_value,
147 cmpxchg_memory_order order) const {
148 STATIC_ASSERT(4 == sizeof(T));
149 // alternative for InterlockedCompareExchange
150 __asm {
151 mov edx, dest
152 mov ecx, exchange_value
153 mov eax, compare_value
154 lock cmpxchg dword ptr [edx], ecx
155 }
156 }
157
158 template<>
159 template<typename T>
160 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
161 T volatile* dest,
162 T compare_value,
163 cmpxchg_memory_order order) const {
164 STATIC_ASSERT(8 == sizeof(T));
165 int32_t ex_lo = (int32_t)exchange_value;
166 int32_t ex_hi = *( ((int32_t*)&exchange_value) + 1 );
167 int32_t cmp_lo = (int32_t)compare_value;
168 int32_t cmp_hi = *( ((int32_t*)&compare_value) + 1 );
169 __asm {
170 push ebx
171 push edi
172 mov eax, cmp_lo
173 mov edx, cmp_hi
174 mov edi, dest
175 mov ebx, ex_lo
176 mov ecx, ex_hi
177 lock cmpxchg8b qword ptr [edi]
178 pop edi
179 pop ebx
180 }
181 }
182
183 template<>
|
1 /*
2 * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
30 // The following alternative implementations are needed because
31 // Windows 95 doesn't support (some of) the corresponding Windows NT
32 // calls. Furthermore, these versions allow inlining in the caller.
33 // (More precisely: The documentation for InterlockedExchange says
34 // it is supported for Windows 95. However, when single-stepping
35 // through the assembly code we cannot step into the routine and
36 // when looking at the routine address we see only garbage code.
37 // Better safe then sorry!). Was bug 7/31/98 (gri).
38 //
39 // Performance note: On uniprocessors, the 'lock' prefixes are not
40 // necessary (and expensive). We should generate separate cases if
41 // this becomes a performance problem.
42
43 #pragma warning(disable: 4035) // Disables warnings reporting missing return statement
44
45 template<size_t byte_size>
46 struct Atomic::PlatformAdd
47 : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
48 {
49 template<typename I, typename D>
50 D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
51 };
52
53 #ifdef AMD64
54 template<>
55 template<typename I, typename D>
56 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
57 atomic_memory_order order) const {
58 return add_using_helper<int32_t>(os::atomic_add_func, add_value, dest);
59 }
60
61 template<>
62 template<typename I, typename D>
63 inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
64 atomic_memory_order order) const {
65 return add_using_helper<int64_t>(os::atomic_add_long_func, add_value, dest);
66 }
67
68 #define DEFINE_STUB_XCHG(ByteSize, StubType, StubName) \
69 template<> \
70 template<typename T> \
71 inline T Atomic::PlatformXchg<ByteSize>::operator()(T exchange_value, \
72 T volatile* dest, \
73 atomic_memory_order order) const { \
74 STATIC_ASSERT(ByteSize == sizeof(T)); \
75 return xchg_using_helper<StubType>(StubName, exchange_value, dest); \
76 }
77
78 DEFINE_STUB_XCHG(4, int32_t, os::atomic_xchg_func)
79 DEFINE_STUB_XCHG(8, int64_t, os::atomic_xchg_long_func)
80
81 #undef DEFINE_STUB_XCHG
82
83 #define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName) \
84 template<> \
85 template<typename T> \
86 inline T Atomic::PlatformCmpxchg<ByteSize>::operator()(T exchange_value, \
87 T volatile* dest, \
88 T compare_value, \
89 atomic_memory_order order) const { \
90 STATIC_ASSERT(ByteSize == sizeof(T)); \
91 return cmpxchg_using_helper<StubType>(StubName, exchange_value, dest, compare_value); \
92 }
93
94 DEFINE_STUB_CMPXCHG(1, int8_t, os::atomic_cmpxchg_byte_func)
95 DEFINE_STUB_CMPXCHG(4, int32_t, os::atomic_cmpxchg_func)
96 DEFINE_STUB_CMPXCHG(8, int64_t, os::atomic_cmpxchg_long_func)
97
98 #undef DEFINE_STUB_CMPXCHG
99
100 #else // !AMD64
101
102 template<>
103 template<typename I, typename D>
104 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
105 atomic_memory_order order) const {
106 STATIC_ASSERT(4 == sizeof(I));
107 STATIC_ASSERT(4 == sizeof(D));
108 __asm {
109 mov edx, dest;
110 mov eax, add_value;
111 mov ecx, eax;
112 lock xadd dword ptr [edx], eax;
113 add eax, ecx;
114 }
115 }
116
117 template<>
118 template<typename T>
119 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
120 T volatile* dest,
121 atomic_memory_order order) const {
122 STATIC_ASSERT(4 == sizeof(T));
123 // alternative for InterlockedExchange
124 __asm {
125 mov eax, exchange_value;
126 mov ecx, dest;
127 xchg eax, dword ptr [ecx];
128 }
129 }
130
131 template<>
132 template<typename T>
133 inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
134 T volatile* dest,
135 T compare_value,
136 atomic_memory_order order) const {
137 STATIC_ASSERT(1 == sizeof(T));
138 // alternative for InterlockedCompareExchange
139 __asm {
140 mov edx, dest
141 mov cl, exchange_value
142 mov al, compare_value
143 lock cmpxchg byte ptr [edx], cl
144 }
145 }
146
147 template<>
148 template<typename T>
149 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
150 T volatile* dest,
151 T compare_value,
152 atomic_memory_order order) const {
153 STATIC_ASSERT(4 == sizeof(T));
154 // alternative for InterlockedCompareExchange
155 __asm {
156 mov edx, dest
157 mov ecx, exchange_value
158 mov eax, compare_value
159 lock cmpxchg dword ptr [edx], ecx
160 }
161 }
162
163 template<>
164 template<typename T>
165 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
166 T volatile* dest,
167 T compare_value,
168 atomic_memory_order order) const {
169 STATIC_ASSERT(8 == sizeof(T));
170 int32_t ex_lo = (int32_t)exchange_value;
171 int32_t ex_hi = *( ((int32_t*)&exchange_value) + 1 );
172 int32_t cmp_lo = (int32_t)compare_value;
173 int32_t cmp_hi = *( ((int32_t*)&compare_value) + 1 );
174 __asm {
175 push ebx
176 push edi
177 mov eax, cmp_lo
178 mov edx, cmp_hi
179 mov edi, dest
180 mov ebx, ex_lo
181 mov ecx, ex_hi
182 lock cmpxchg8b qword ptr [edi]
183 pop edi
184 pop ebx
185 }
186 }
187
188 template<>
|