1 /*
   2  * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP
  26 #define OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP
  27 
  28 #include "runtime/os.hpp"
  29 
  30 // The following alternative implementations are needed because
  31 // Windows 95 doesn't support (some of) the corresponding Windows NT
  32 // calls. Furthermore, these versions allow inlining in the caller.
  33 // (More precisely: The documentation for InterlockedExchange says
  34 // it is supported for Windows 95. However, when single-stepping
  35 // through the assembly code we cannot step into the routine and
  36 // when looking at the routine address we see only garbage code.
  37 // Better safe then sorry!). Was bug 7/31/98 (gri).
  38 //
  39 // Performance note: On uniprocessors, the 'lock' prefixes are not
  40 // necessary (and expensive). We should generate separate cases if
  41 // this becomes a performance problem.
  42 
  43 #pragma warning(disable: 4035) // Disables warnings reporting missing return statement
  44 
  45 inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
  46 inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
  47 inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
  48 
  49 inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
  50 inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
  51 
  52 inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
  53 inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
  54 inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
  55 
  56 
  57 inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
  58 inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
  59 
  60 template<size_t byte_size>
  61 struct Atomic::PlatformAdd
  62   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
  63 {
  64   template<typename I, typename D>
  65   D add_and_fetch(I add_value, D volatile* dest) const;
  66 };
  67 
  68 #ifdef AMD64
  69 inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
  70 inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
  71 
  72 template<>
  73 template<typename I, typename D>
  74 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
  75   return add_using_helper<jint>(os::atomic_add_func, add_value, dest);
  76 }
  77 
  78 template<>
  79 template<typename I, typename D>
  80 inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
  81   return add_using_helper<intptr_t>(os::atomic_add_ptr_func, add_value, dest);
  82 }
  83 
  84 inline void Atomic::inc    (volatile jint*     dest) {
  85   (void)add    (1, dest);
  86 }
  87 
  88 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
  89   (void)add_ptr(1, dest);
  90 }
  91 
  92 inline void Atomic::inc_ptr(volatile void*     dest) {
  93   (void)add_ptr(1, dest);
  94 }
  95 
  96 inline void Atomic::dec    (volatile jint*     dest) {
  97   (void)add    (-1, dest);
  98 }
  99 
 100 inline void Atomic::dec_ptr(volatile intptr_t* dest) {
 101   (void)add_ptr(-1, dest);
 102 }
 103 
 104 inline void Atomic::dec_ptr(volatile void*     dest) {
 105   (void)add_ptr(-1, dest);
 106 }
 107 
 108 inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
 109   return (jint)(*os::atomic_xchg_func)(exchange_value, dest);
 110 }
 111 
 112 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
 113   return (intptr_t)(os::atomic_xchg_ptr_func)(exchange_value, dest);
 114 }
 115 
 116 inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
 117   return (void *)(os::atomic_xchg_ptr_func)((intptr_t)exchange_value, (volatile intptr_t*)dest);
 118 }
 119 
 120 #define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName)               \
 121   template<>                                                            \
 122   template<typename T>                                                  \
 123   inline T Atomic::PlatformCmpxchg<ByteSize>::operator()(T exchange_value, \
 124                                                          T volatile* dest, \
 125                                                          T compare_value, \
 126                                                          cmpxchg_memory_order order) const { \
 127     STATIC_ASSERT(ByteSize == sizeof(T));                               \
 128     return cmpxchg_using_helper<StubType>(StubName, exchange_value, dest, compare_value); \
 129   }
 130 
 131 DEFINE_STUB_CMPXCHG(1, jbyte, os::atomic_cmpxchg_byte_func)
 132 DEFINE_STUB_CMPXCHG(4, jint,  os::atomic_cmpxchg_func)
 133 DEFINE_STUB_CMPXCHG(8, jlong, os::atomic_cmpxchg_long_func)
 134 
 135 #undef DEFINE_STUB_CMPXCHG
 136 
 137 inline jlong Atomic::load(const volatile jlong* src) { return *src; }
 138 
 139 #else // !AMD64
 140 
 141 template<>
 142 template<typename I, typename D>
 143 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
 144   STATIC_ASSERT(4 == sizeof(I));
 145   STATIC_ASSERT(4 == sizeof(D));
 146   __asm {
 147     mov edx, dest;
 148     mov eax, add_value;
 149     mov ecx, eax;
 150     lock xadd dword ptr [edx], eax;
 151     add eax, ecx;
 152   }
 153 }
 154 
 155 inline void Atomic::inc    (volatile jint*     dest) {
 156   // alternative for InterlockedIncrement
 157   __asm {
 158     mov edx, dest;
 159     lock add dword ptr [edx], 1;
 160   }
 161 }
 162 
 163 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
 164   inc((volatile jint*)dest);
 165 }
 166 
 167 inline void Atomic::inc_ptr(volatile void*     dest) {
 168   inc((volatile jint*)dest);
 169 }
 170 
 171 inline void Atomic::dec    (volatile jint*     dest) {
 172   // alternative for InterlockedDecrement
 173   __asm {
 174     mov edx, dest;
 175     lock sub dword ptr [edx], 1;
 176   }
 177 }
 178 
 179 inline void Atomic::dec_ptr(volatile intptr_t* dest) {
 180   dec((volatile jint*)dest);
 181 }
 182 
 183 inline void Atomic::dec_ptr(volatile void*     dest) {
 184   dec((volatile jint*)dest);
 185 }
 186 
 187 inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
 188   // alternative for InterlockedExchange
 189   __asm {
 190     mov eax, exchange_value;
 191     mov ecx, dest;
 192     xchg eax, dword ptr [ecx];
 193   }
 194 }
 195 
 196 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
 197   return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
 198 }
 199 
 200 inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
 201   return (void*)xchg((jint)exchange_value, (volatile jint*)dest);
 202 }
 203 
 204 template<>
 205 template<typename T>
 206 inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
 207                                                 T volatile* dest,
 208                                                 T compare_value,
 209                                                 cmpxchg_memory_order order) const {
 210   STATIC_ASSERT(1 == sizeof(T));
 211   // alternative for InterlockedCompareExchange
 212   __asm {
 213     mov edx, dest
 214     mov cl, exchange_value
 215     mov al, compare_value
 216     lock cmpxchg byte ptr [edx], cl
 217   }
 218 }
 219 
 220 template<>
 221 template<typename T>
 222 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
 223                                                 T volatile* dest,
 224                                                 T compare_value,
 225                                                 cmpxchg_memory_order order) const {
 226   STATIC_ASSERT(4 == sizeof(T));
 227   // alternative for InterlockedCompareExchange
 228   __asm {
 229     mov edx, dest
 230     mov ecx, exchange_value
 231     mov eax, compare_value
 232     lock cmpxchg dword ptr [edx], ecx
 233   }
 234 }
 235 
 236 template<>
 237 template<typename T>
 238 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
 239                                                 T volatile* dest,
 240                                                 T compare_value,
 241                                                 cmpxchg_memory_order order) const {
 242   STATIC_ASSERT(8 == sizeof(T));
 243   jint ex_lo  = (jint)exchange_value;
 244   jint ex_hi  = *( ((jint*)&exchange_value) + 1 );
 245   jint cmp_lo = (jint)compare_value;
 246   jint cmp_hi = *( ((jint*)&compare_value) + 1 );
 247   __asm {
 248     push ebx
 249     push edi
 250     mov eax, cmp_lo
 251     mov edx, cmp_hi
 252     mov edi, dest
 253     mov ebx, ex_lo
 254     mov ecx, ex_hi
 255     lock cmpxchg8b qword ptr [edi]
 256     pop edi
 257     pop ebx
 258   }
 259 }
 260 
 261 inline jlong Atomic::load(const volatile jlong* src) {
 262   volatile jlong dest;
 263   volatile jlong* pdest = &dest;
 264   __asm {
 265     mov eax, src
 266     fild     qword ptr [eax]
 267     mov eax, pdest
 268     fistp    qword ptr [eax]
 269   }
 270   return dest;
 271 }
 272 
 273 inline void Atomic::store(jlong store_value, volatile jlong* dest) {
 274   volatile jlong* src = &store_value;
 275   __asm {
 276     mov eax, src
 277     fild     qword ptr [eax]
 278     mov eax, dest
 279     fistp    qword ptr [eax]
 280   }
 281 }
 282 
 283 inline void Atomic::store(jlong store_value, jlong* dest) {
 284   Atomic::store(store_value, (volatile jlong*)dest);
 285 }
 286 
 287 #endif // AMD64
 288 
 289 #pragma warning(default: 4035) // Enables warnings reporting missing return statement
 290 
 291 #endif // OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP