< prev index next >

src/hotspot/os_cpu/bsd_x86/orderAccess_bsd_x86.inline.hpp

Print this page
rev 47383 : [mq]: OrderAccess_refactoring
   1 /*
   2  * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


  47 inline void OrderAccess::storestore() { compiler_barrier(); }
  48 inline void OrderAccess::loadstore()  { compiler_barrier(); }
  49 inline void OrderAccess::storeload()  { fence();            }
  50 
  51 inline void OrderAccess::acquire()    { compiler_barrier(); }
  52 inline void OrderAccess::release()    { compiler_barrier(); }
  53 
  54 inline void OrderAccess::fence() {
  55   if (os::is_MP()) {
  56     // always use locked addl since mfence is sometimes expensive
  57 #ifdef AMD64
  58     __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
  59 #else
  60     __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
  61 #endif
  62   }
  63   compiler_barrier();
  64 }
  65 
  66 template<>
  67 inline void OrderAccess::specialized_release_store_fence<jbyte> (volatile jbyte*  p, jbyte  v) {




  68   __asm__ volatile (  "xchgb (%2),%0"
  69                     : "=q" (v)
  70                     : "0" (v), "r" (p)
  71                     : "memory");
  72 }


  73 template<>
  74 inline void OrderAccess::specialized_release_store_fence<jshort>(volatile jshort* p, jshort v) {




  75   __asm__ volatile (  "xchgw (%2),%0"
  76                     : "=r" (v)
  77                     : "0" (v), "r" (p)
  78                     : "memory");
  79 }


  80 template<>
  81 inline void OrderAccess::specialized_release_store_fence<jint>  (volatile jint*   p, jint   v) {




  82   __asm__ volatile (  "xchgl (%2),%0"
  83                     : "=r" (v)
  84                     : "0" (v), "r" (p)
  85                     : "memory");
  86 }

  87 
  88 #ifdef AMD64
  89 template<>
  90 inline void OrderAccess::specialized_release_store_fence<jlong> (volatile jlong*  p, jlong  v) {




  91   __asm__ volatile (  "xchgq (%2), %0"
  92                     : "=r" (v)
  93                     : "0" (v), "r" (p)
  94                     : "memory");
  95 }

  96 #endif // AMD64
  97 
  98 template<>
  99 inline void OrderAccess::specialized_release_store_fence<jfloat> (volatile jfloat*  p, jfloat  v) {
 100   release_store_fence((volatile jint*)p, jint_cast(v));
 101 }
 102 template<>
 103 inline void OrderAccess::specialized_release_store_fence<jdouble>(volatile jdouble* p, jdouble v) {
 104   release_store_fence((volatile jlong*)p, jlong_cast(v));
 105 }
 106 
 107 #define VM_HAS_GENERALIZED_ORDER_ACCESS 1
 108 
 109 #endif // OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP
   1 /*
   2  * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


  47 inline void OrderAccess::storestore() { compiler_barrier(); }
  48 inline void OrderAccess::loadstore()  { compiler_barrier(); }
  49 inline void OrderAccess::storeload()  { fence();            }
  50 
  51 inline void OrderAccess::acquire()    { compiler_barrier(); }
  52 inline void OrderAccess::release()    { compiler_barrier(); }
  53 
  54 inline void OrderAccess::fence() {
  55   if (os::is_MP()) {
  56     // always use locked addl since mfence is sometimes expensive
  57 #ifdef AMD64
  58     __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
  59 #else
  60     __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
  61 #endif
  62   }
  63   compiler_barrier();
  64 }
  65 
  66 template<>
  67 struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
  68   VALUE_OBJ_CLASS_SPEC
  69 {
  70   template <typename T>
  71   void operator()(T v, volatile T* p) const {
  72     __asm__ volatile (  "xchgb (%2),%0"
  73                       : "=q" (v)
  74                       : "0" (v), "r" (p)
  75                       : "memory");
  76   }
  77 };
  78 
  79 template<>
  80 struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
  81   VALUE_OBJ_CLASS_SPEC
  82 {
  83   template <typename T>
  84   void operator()(T v, volatile T* p) const {
  85     __asm__ volatile (  "xchgw (%2),%0"
  86                       : "=r" (v)
  87                       : "0" (v), "r" (p)
  88                       : "memory");
  89   }
  90 };
  91 
  92 template<>
  93 struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
  94   VALUE_OBJ_CLASS_SPEC
  95 {
  96   template <typename T>
  97   void operator()(T v, volatile T* p) const {
  98     __asm__ volatile (  "xchgl (%2),%0"
  99                       : "=r" (v)
 100                       : "0" (v), "r" (p)
 101                       : "memory");
 102   }
 103 };
 104 
 105 #ifdef AMD64
 106 template<>
 107 struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
 108   VALUE_OBJ_CLASS_SPEC
 109 {
 110   template <typename T>
 111   void operator()(T v, volatile T* p) const {
 112     __asm__ volatile (  "xchgq (%2), %0"
 113                       : "=r" (v)
 114                       : "0" (v), "r" (p)
 115                       : "memory");
 116   }
 117 };
 118 #endif // AMD64











 119 
 120 #endif // OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP
< prev index next >