1 /*
   2  * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP
  26 #define OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP
  27 
  28 #include "runtime/os.hpp"
  29 
  30 // The following alternative implementations are needed because
  31 // Windows 95 doesn't support (some of) the corresponding Windows NT
  32 // calls. Furthermore, these versions allow inlining in the caller.
  33 // (More precisely: The documentation for InterlockedExchange says
  34 // it is supported for Windows 95. However, when single-stepping
  35 // through the assembly code we cannot step into the routine and
  36 // when looking at the routine address we see only garbage code.
  37 // Better safe then sorry!). Was bug 7/31/98 (gri).
  38 //
  39 // Performance note: On uniprocessors, the 'lock' prefixes are not
  40 // necessary (and expensive). We should generate separate cases if
  41 // this becomes a performance problem.
  42 
  43 #pragma warning(disable: 4035) // Disables warnings reporting missing return statement
  44 
  45 inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
  46 inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
  47 inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
  48 
  49 inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
  50 inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
  51 
  52 inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
  53 inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
  54 inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
  55 
  56 
  57 inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
  58 inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
  59 
  60 template<size_t byte_size>
  61 struct Atomic::PlatformAdd
  62   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
  63 {
  64   template<typename I, typename D>
  65   D add_and_fetch(I add_value, D volatile* dest) const;
  66 };
  67 
  68 #ifdef AMD64
  69 inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
  70 inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
  71 
  72 template<>
  73 template<typename I, typename D>
  74 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
  75   return add_using_helper<jint>(os::atomic_add_func, add_value, dest);
  76 }
  77 
  78 template<>
  79 template<typename I, typename D>
  80 inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
  81   return add_using_helper<intptr_t>(os::atomic_add_ptr_func, add_value, dest);
  82 }
  83 
  84 #define DEFINE_STUB_XCHG(ByteSize, StubType, StubName)                  \
  85   template<>                                                            \
  86   template<typename T>                                                  \
  87   inline T Atomic::PlatformXchg<ByteSize>::operator()(T exchange_value, \
  88                                                       T volatile* dest) const { \
  89     STATIC_ASSERT(ByteSize == sizeof(T));                               \
  90     return xchg_using_helper<StubType>(StubName, exchange_value, dest); \
  91   }
  92 
  93 DEFINE_STUB_XCHG(4, jint, os::atomic_xchg_func)
  94 DEFINE_STUB_XCHG(8, jlong, os::atomic_xchg_ptr_func)
  95 
  96 #undef DEFINE_STUB_XCHG
  97 
  98 #define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName)               \
  99   template<>                                                            \
 100   template<typename T>                                                  \
 101   inline T Atomic::PlatformCmpxchg<ByteSize>::operator()(T exchange_value, \
 102                                                          T volatile* dest, \
 103                                                          T compare_value, \
 104                                                          cmpxchg_memory_order order) const { \
 105     STATIC_ASSERT(ByteSize == sizeof(T));                               \
 106     return cmpxchg_using_helper<StubType>(StubName, exchange_value, dest, compare_value); \
 107   }
 108 
 109 DEFINE_STUB_CMPXCHG(1, jbyte, os::atomic_cmpxchg_byte_func)
 110 DEFINE_STUB_CMPXCHG(4, jint,  os::atomic_cmpxchg_func)
 111 DEFINE_STUB_CMPXCHG(8, jlong, os::atomic_cmpxchg_long_func)
 112 
 113 #undef DEFINE_STUB_CMPXCHG
 114 
 115 inline jlong Atomic::load(const volatile jlong* src) { return *src; }
 116 
 117 #else // !AMD64
 118 
 119 template<>
 120 template<typename I, typename D>
 121 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
 122   STATIC_ASSERT(4 == sizeof(I));
 123   STATIC_ASSERT(4 == sizeof(D));
 124   __asm {
 125     mov edx, dest;
 126     mov eax, add_value;
 127     mov ecx, eax;
 128     lock xadd dword ptr [edx], eax;
 129     add eax, ecx;
 130   }
 131 }
 132 
 133 template<>
 134 template<typename T>
 135 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
 136                                              T volatile* dest) const {
 137   STATIC_ASSERT(4 == sizeof(T));
 138   // alternative for InterlockedExchange
 139   __asm {
 140     mov eax, exchange_value;
 141     mov ecx, dest;
 142     xchg eax, dword ptr [ecx];
 143   }
 144 }
 145 
 146 template<>
 147 template<typename T>
 148 inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
 149                                                 T volatile* dest,
 150                                                 T compare_value,
 151                                                 cmpxchg_memory_order order) const {
 152   STATIC_ASSERT(1 == sizeof(T));
 153   // alternative for InterlockedCompareExchange
 154   __asm {
 155     mov edx, dest
 156     mov cl, exchange_value
 157     mov al, compare_value
 158     lock cmpxchg byte ptr [edx], cl
 159   }
 160 }
 161 
 162 template<>
 163 template<typename T>
 164 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
 165                                                 T volatile* dest,
 166                                                 T compare_value,
 167                                                 cmpxchg_memory_order order) const {
 168   STATIC_ASSERT(4 == sizeof(T));
 169   // alternative for InterlockedCompareExchange
 170   __asm {
 171     mov edx, dest
 172     mov ecx, exchange_value
 173     mov eax, compare_value
 174     lock cmpxchg dword ptr [edx], ecx
 175   }
 176 }
 177 
 178 template<>
 179 template<typename T>
 180 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
 181                                                 T volatile* dest,
 182                                                 T compare_value,
 183                                                 cmpxchg_memory_order order) const {
 184   STATIC_ASSERT(8 == sizeof(T));
 185   jint ex_lo  = (jint)exchange_value;
 186   jint ex_hi  = *( ((jint*)&exchange_value) + 1 );
 187   jint cmp_lo = (jint)compare_value;
 188   jint cmp_hi = *( ((jint*)&compare_value) + 1 );
 189   __asm {
 190     push ebx
 191     push edi
 192     mov eax, cmp_lo
 193     mov edx, cmp_hi
 194     mov edi, dest
 195     mov ebx, ex_lo
 196     mov ecx, ex_hi
 197     lock cmpxchg8b qword ptr [edi]
 198     pop edi
 199     pop ebx
 200   }
 201 }
 202 
 203 inline jlong Atomic::load(const volatile jlong* src) {
 204   volatile jlong dest;
 205   volatile jlong* pdest = &dest;
 206   __asm {
 207     mov eax, src
 208     fild     qword ptr [eax]
 209     mov eax, pdest
 210     fistp    qword ptr [eax]
 211   }
 212   return dest;
 213 }
 214 
 215 inline void Atomic::store(jlong store_value, volatile jlong* dest) {
 216   volatile jlong* src = &store_value;
 217   __asm {
 218     mov eax, src
 219     fild     qword ptr [eax]
 220     mov eax, dest
 221     fistp    qword ptr [eax]
 222   }
 223 }
 224 
 225 inline void Atomic::store(jlong store_value, jlong* dest) {
 226   Atomic::store(store_value, (volatile jlong*)dest);
 227 }
 228 
 229 #endif // AMD64
 230 
 231 #pragma warning(default: 4035) // Enables warnings reporting missing return statement
 232 
 233 #endif // OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP