1 /*
   2  * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP
  26 #define OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP
  27 
  28 #include <intrin.h>
  29 #include "runtime/atomic.inline.hpp"
  30 #include "runtime/orderAccess.hpp"
  31 #include "runtime/os.hpp"
  32 
  33 // Implementation of class OrderAccess.
  34 
  35 // A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
  36 inline void compiler_barrier() {
  37   _ReadWriteBarrier();
  38 }
  39 
  40 // Note that in MSVC, volatile memory accesses are explicitly
  41 // guaranteed to have acquire release semantics (w.r.t. compiler
  42 // reordering) and therefore does not even need a compiler barrier
  43 // for normal acquire release accesses.
  44 template<> inline void ScopedFence<X_ACQUIRE>::postfix()       { }
  45 template<> inline void ScopedFence<RELEASE_X>::prefix()        { }
  46 template<> inline void ScopedFence<RELEASE_X_FENCE>::prefix()  { }
  47 template<> inline void ScopedFence<RELEASE_X_FENCE>::postfix() { OrderAccess::fence(); }
  48 
  49 inline void OrderAccess::loadload()   { compiler_barrier(); }
  50 inline void OrderAccess::storestore() { compiler_barrier(); }
  51 inline void OrderAccess::loadstore()  { compiler_barrier(); }
  52 inline void OrderAccess::storeload()  { fence(); }
  53 
  54 inline void OrderAccess::acquire()    { compiler_barrier(); }
  55 inline void OrderAccess::release()    { compiler_barrier(); }
  56 
  57 inline void OrderAccess::fence() {
  58 #ifdef AMD64
  59   StubRoutines_fence();
  60 #else
  61   if (os::is_MP()) {
  62     __asm {
  63       lock add dword ptr [esp], 0;
  64     }
  65   }
  66 #endif // AMD64
  67   compiler_barrier();
  68 }
  69 
  70 #ifndef AMD64
  71 template<>
  72 inline void OrderAccess::specialized_release_store_fence<jbyte> (volatile jbyte*  p, jbyte  v) {
  73   __asm {
  74     mov edx, p;
  75     mov al, v;
  76     xchg al, byte ptr [edx];
  77   }
  78 }
  79 
  80 template<>
  81 inline void OrderAccess::specialized_release_store_fence<jshort>(volatile jshort* p, jshort v) {
  82   __asm {
  83     mov edx, p;
  84     mov ax, v;
  85     xchg ax, word ptr [edx];
  86   }
  87 }
  88 
  89 template<>
  90 inline void OrderAccess::specialized_release_store_fence<jint>  (volatile jint*   p, jint   v) {
  91   __asm {
  92     mov edx, p;
  93     mov eax, v;
  94     xchg eax, dword ptr [edx];
  95   }
  96 }
  97 #endif // AMD64
  98 
  99 template<>
 100 inline void OrderAccess::specialized_release_store_fence<jfloat>(volatile jfloat*  p, jfloat  v) {
 101     release_store_fence((volatile jint*)p, jint_cast(v));
 102 }
 103 template<>
 104 inline void OrderAccess::specialized_release_store_fence<jdouble>(volatile jdouble* p, jdouble v) {
 105     release_store_fence((volatile jlong*)p, jlong_cast(v));
 106 }
 107 
 108 #define VM_HAS_GENERALIZED_ORDER_ACCESS 1
 109 
 110 #endif // OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP