src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File open Sdiff src/hotspot/os_cpu/linux_arm

src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.hpp

Print this page




  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef OS_CPU_LINUX_ARM_VM_ORDERACCESS_LINUX_ARM_HPP
  26 #define OS_CPU_LINUX_ARM_VM_ORDERACCESS_LINUX_ARM_HPP
  27 
  28 // Included in orderAccess.hpp header file.
  29 
  30 #include "runtime/os.hpp"
  31 #include "vm_version_arm.hpp"
  32 
  33 // Implementation of class OrderAccess.
  34 // - we define the high level barriers below and use the general
  35 //   implementation in orderAccess.hpp, with customizations
  36 //   on AARCH64 via the specialized_* template functions
  37 
  38 // Memory Ordering on ARM is weak.
  39 //
  40 // Implement all 4 memory ordering barriers by DMB, since it is a
  41 // lighter version of DSB.
  42 // dmb_sy implies full system shareability domain. RD/WR access type.
  43 // dmb_st implies full system shareability domain. WR only access type.
  44 //
  45 // NOP on < ARMv6 (MP not supported)
  46 //
  47 // Non mcr instructions can be used if we build for armv7 or higher arch
  48 //    __asm__ __volatile__ ("dmb" : : : "memory");
  49 //    __asm__ __volatile__ ("dsb" : : : "memory");
  50 //
  51 // inline void _OrderAccess_dsb() {
  52 //    volatile intptr_t dummy = 0;
  53 //    if (os::is_MP()) {
  54 //      __asm__ volatile (
  55 //        "mcr p15, 0, %0, c7, c10, 4"
  56 //        : : "r" (dummy) : "memory");
  57 //    }
  58 // }
  59 
  60 inline static void dmb_sy() {
  61    if (!os::is_MP()) {
  62      return;
  63    }
  64 #ifdef AARCH64
  65    __asm__ __volatile__ ("dmb sy" : : : "memory");
  66 #else
  67    if (VM_Version::arm_arch() >= 7) {
  68 #ifdef __thumb__
  69      __asm__ volatile (
  70      "dmb sy": : : "memory");
  71 #else
  72      __asm__ volatile (
  73      ".word 0xF57FF050 | 0xf" : : : "memory");
  74 #endif
  75    } else {
  76      intptr_t zero = 0;
  77      __asm__ volatile (
  78        "mcr p15, 0, %0, c7, c10, 5"
  79        : : "r" (zero) : "memory");
  80    }
  81 #endif
  82 }
  83 
  84 inline static void dmb_st() {
  85    if (!os::is_MP()) {
  86      return;
  87    }
  88 #ifdef AARCH64
  89    __asm__ __volatile__ ("dmb st" : : : "memory");
  90 #else
  91    if (VM_Version::arm_arch() >= 7) {
  92 #ifdef __thumb__
  93      __asm__ volatile (
  94      "dmb st": : : "memory");
  95 #else
  96      __asm__ volatile (
  97      ".word 0xF57FF050 | 0xe" : : : "memory");
  98 #endif
  99    } else {
 100      intptr_t zero = 0;
 101      __asm__ volatile (
 102        "mcr p15, 0, %0, c7, c10, 5"
 103        : : "r" (zero) : "memory");
 104    }
 105 #endif
 106 }
 107 
 108 // Load-Load/Store barrier
 109 inline static void dmb_ld() {
 110 #ifdef AARCH64
 111    if (!os::is_MP()) {
 112      return;
 113    }
 114    __asm__ __volatile__ ("dmb ld" : : : "memory");
 115 #else
 116    dmb_sy();
 117 #endif
 118 }
 119 
 120 
 121 inline void OrderAccess::loadload()   { dmb_ld(); }
 122 inline void OrderAccess::loadstore()  { dmb_ld(); }
 123 inline void OrderAccess::acquire()    { dmb_ld(); }
 124 inline void OrderAccess::storestore() { dmb_st(); }
 125 inline void OrderAccess::storeload()  { dmb_sy(); }
 126 inline void OrderAccess::release()    { dmb_sy(); }
 127 inline void OrderAccess::fence()      { dmb_sy(); }
 128 
 129 // specializations for Aarch64
 130 // TODO-AARCH64: evaluate effectiveness of ldar*/stlr* implementations compared to 32-bit ARM approach
 131 
 132 #ifdef AARCH64
 133 
 134 template<>
 135 struct OrderAccess::PlatformOrderedLoad<1, X_ACQUIRE>
 136 {
 137   template <typename T>
 138   T operator()(const volatile T* p) const {
 139     volatile T result;
 140     __asm__ volatile(
 141       "ldarb %w[res], [%[ptr]]"
 142       : [res] "=&r" (result)
 143       : [ptr] "r" (p)
 144       : "memory");
 145     return result;
 146   }
 147 };
 148 
 149 template<>
 150 struct OrderAccess::PlatformOrderedLoad<2, X_ACQUIRE>
 151 {
 152   template <typename T>
 153   T operator()(const volatile T* p) const {
 154     volatile T result;
 155     __asm__ volatile(
 156       "ldarh %w[res], [%[ptr]]"
 157       : [res] "=&r" (result)
 158       : [ptr] "r" (p)
 159       : "memory");
 160     return result;
 161   }
 162 };
 163 
 164 template<>
 165 struct OrderAccess::PlatformOrderedLoad<4, X_ACQUIRE>
 166 {
 167   template <typename T>
 168   T operator()(const volatile T* p) const {
 169     volatile T result;
 170     __asm__ volatile(
 171       "ldar %w[res], [%[ptr]]"
 172       : [res] "=&r" (result)
 173       : [ptr] "r" (p)
 174       : "memory");
 175     return result;
 176   }
 177 };
 178 
 179 template<>
 180 struct OrderAccess::PlatformOrderedLoad<8, X_ACQUIRE>
 181 {
 182   template <typename T>
 183   T operator()(const volatile T* p) const {
 184     volatile T result;
 185     __asm__ volatile(
 186       "ldar %[res], [%[ptr]]"
 187       : [res] "=&r" (result)
 188       : [ptr] "r" (p)
 189       : "memory");
 190     return result;
 191   }
 192 };
 193 
 194 template<>
 195 struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
 196 {
 197   template <typename T>
 198   void operator()(T v, volatile T* p) const {
 199     __asm__ volatile(
 200       "stlrb %w[val], [%[ptr]]"
 201       :
 202       : [ptr] "r" (p), [val] "r" (v)
 203       : "memory");
 204   }
 205 };
 206 
 207 template<>
 208 struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
 209 {
 210   template <typename T>
 211   void operator()(T v, volatile T* p) const {
 212     __asm__ volatile(
 213       "stlrh %w[val], [%[ptr]]"
 214       :
 215       : [ptr] "r" (p), [val] "r" (v)
 216       : "memory");
 217   }
 218 };
 219 
 220 template<>
 221 struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
 222 {
 223   template <typename T>
 224   void operator()(T v, volatile T* p) const {
 225     __asm__ volatile(
 226       "stlr %w[val], [%[ptr]]"
 227       :
 228       : [ptr] "r" (p), [val] "r" (v)
 229       : "memory");
 230   }
 231 };
 232 
 233 template<>
 234 struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
 235 {
 236   template <typename T>
 237   void operator()(T v, volatile T* p) const {
 238     __asm__ volatile(
 239       "stlr %[val], [%[ptr]]"
 240       :
 241       : [ptr] "r" (p), [val] "r" (v)
 242       : "memory");
 243   }
 244 };
 245 
 246 #endif // AARCH64
 247 
 248 #endif // OS_CPU_LINUX_ARM_VM_ORDERACCESS_LINUX_ARM_HPP


  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef OS_CPU_LINUX_ARM_VM_ORDERACCESS_LINUX_ARM_HPP
  26 #define OS_CPU_LINUX_ARM_VM_ORDERACCESS_LINUX_ARM_HPP
  27 
  28 // Included in orderAccess.hpp header file.
  29 
  30 #include "runtime/os.hpp"
  31 #include "vm_version_arm.hpp"
  32 
  33 // Implementation of class OrderAccess.
  34 // - we define the high level barriers below and use the general
  35 //   implementation in orderAccess.hpp.

  36 
  37 // Memory Ordering on ARM is weak.
  38 //
  39 // Implement all 4 memory ordering barriers by DMB, since it is a
  40 // lighter version of DSB.
  41 // dmb_sy implies full system shareability domain. RD/WR access type.
  42 // dmb_st implies full system shareability domain. WR only access type.
  43 //
  44 // NOP on < ARMv6 (MP not supported)
  45 //
  46 // Non mcr instructions can be used if we build for armv7 or higher arch
  47 //    __asm__ __volatile__ ("dmb" : : : "memory");
  48 //    __asm__ __volatile__ ("dsb" : : : "memory");
  49 //
  50 // inline void _OrderAccess_dsb() {
  51 //    volatile intptr_t dummy = 0;
  52 //    if (os::is_MP()) {
  53 //      __asm__ volatile (
  54 //        "mcr p15, 0, %0, c7, c10, 4"
  55 //        : : "r" (dummy) : "memory");
  56 //    }
  57 // }
  58 
  59 inline static void dmb_sy() {
  60    if (!os::is_MP()) {
  61      return;
  62    }



  63    if (VM_Version::arm_arch() >= 7) {
  64 #ifdef __thumb__
  65      __asm__ volatile (
  66      "dmb sy": : : "memory");
  67 #else
  68      __asm__ volatile (
  69      ".word 0xF57FF050 | 0xf" : : : "memory");
  70 #endif
  71    } else {
  72      intptr_t zero = 0;
  73      __asm__ volatile (
  74        "mcr p15, 0, %0, c7, c10, 5"
  75        : : "r" (zero) : "memory");
  76    }

  77 }
  78 
  79 inline static void dmb_st() {
  80    if (!os::is_MP()) {
  81      return;
  82    }



  83    if (VM_Version::arm_arch() >= 7) {
  84 #ifdef __thumb__
  85      __asm__ volatile (
  86      "dmb st": : : "memory");
  87 #else
  88      __asm__ volatile (
  89      ".word 0xF57FF050 | 0xe" : : : "memory");
  90 #endif
  91    } else {
  92      intptr_t zero = 0;
  93      __asm__ volatile (
  94        "mcr p15, 0, %0, c7, c10, 5"
  95        : : "r" (zero) : "memory");
  96    }

  97 }
  98 
  99 // Load-Load/Store barrier
 100 inline static void dmb_ld() {






 101    dmb_sy();

 102 }
 103 
 104 
 105 inline void OrderAccess::loadload()   { dmb_ld(); }
 106 inline void OrderAccess::loadstore()  { dmb_ld(); }
 107 inline void OrderAccess::acquire()    { dmb_ld(); }
 108 inline void OrderAccess::storestore() { dmb_st(); }
 109 inline void OrderAccess::storeload()  { dmb_sy(); }
 110 inline void OrderAccess::release()    { dmb_sy(); }
 111 inline void OrderAccess::fence()      { dmb_sy(); }























































































































 112 
 113 #endif // OS_CPU_LINUX_ARM_VM_ORDERACCESS_LINUX_ARM_HPP
src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File