src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File
*** old/src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.hpp	Mon Sep 17 10:30:56 2018
--- new/src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.hpp	Mon Sep 17 10:30:56 2018

*** 30,41 **** --- 30,40 ---- #include "runtime/os.hpp" #include "vm_version_arm.hpp" // Implementation of class OrderAccess. // - we define the high level barriers below and use the general ! // implementation in orderAccess.hpp, with customizations // on AARCH64 via the specialized_* template functions ! // implementation in orderAccess.hpp. // Memory Ordering on ARM is weak. // // Implement all 4 memory ordering barriers by DMB, since it is a // lighter version of DSB.
*** 59,71 **** --- 58,67 ---- inline static void dmb_sy() { if (!os::is_MP()) { return; } #ifdef AARCH64 __asm__ __volatile__ ("dmb sy" : : : "memory"); #else if (VM_Version::arm_arch() >= 7) { #ifdef __thumb__ __asm__ volatile ( "dmb sy": : : "memory"); #else
*** 76,95 **** --- 72,87 ---- intptr_t zero = 0; __asm__ volatile ( "mcr p15, 0, %0, c7, c10, 5" : : "r" (zero) : "memory"); } #endif } inline static void dmb_st() { if (!os::is_MP()) { return; } #ifdef AARCH64 __asm__ __volatile__ ("dmb st" : : : "memory"); #else if (VM_Version::arm_arch() >= 7) { #ifdef __thumb__ __asm__ volatile ( "dmb st": : : "memory"); #else
*** 100,122 **** --- 92,106 ---- intptr_t zero = 0; __asm__ volatile ( "mcr p15, 0, %0, c7, c10, 5" : : "r" (zero) : "memory"); } #endif } // Load-Load/Store barrier inline static void dmb_ld() { #ifdef AARCH64 if (!os::is_MP()) { return; } __asm__ __volatile__ ("dmb ld" : : : "memory"); #else dmb_sy(); #endif } inline void OrderAccess::loadload() { dmb_ld(); } inline void OrderAccess::loadstore() { dmb_ld(); }
*** 124,248 **** --- 108,113 ---- inline void OrderAccess::storestore() { dmb_st(); } inline void OrderAccess::storeload() { dmb_sy(); } inline void OrderAccess::release() { dmb_sy(); } inline void OrderAccess::fence() { dmb_sy(); } // specializations for Aarch64 // TODO-AARCH64: evaluate effectiveness of ldar*/stlr* implementations compared to 32-bit ARM approach #ifdef AARCH64 template<> struct OrderAccess::PlatformOrderedLoad<1, X_ACQUIRE> { template <typename T> T operator()(const volatile T* p) const { volatile T result; __asm__ volatile( "ldarb %w[res], [%[ptr]]" : [res] "=&r" (result) : [ptr] "r" (p) : "memory"); return result; } }; template<> struct OrderAccess::PlatformOrderedLoad<2, X_ACQUIRE> { template <typename T> T operator()(const volatile T* p) const { volatile T result; __asm__ volatile( "ldarh %w[res], [%[ptr]]" : [res] "=&r" (result) : [ptr] "r" (p) : "memory"); return result; } }; template<> struct OrderAccess::PlatformOrderedLoad<4, X_ACQUIRE> { template <typename T> T operator()(const volatile T* p) const { volatile T result; __asm__ volatile( "ldar %w[res], [%[ptr]]" : [res] "=&r" (result) : [ptr] "r" (p) : "memory"); return result; } }; template<> struct OrderAccess::PlatformOrderedLoad<8, X_ACQUIRE> { template <typename T> T operator()(const volatile T* p) const { volatile T result; __asm__ volatile( "ldar %[res], [%[ptr]]" : [res] "=&r" (result) : [ptr] "r" (p) : "memory"); return result; } }; template<> struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE> { template <typename T> void operator()(T v, volatile T* p) const { __asm__ volatile( "stlrb %w[val], [%[ptr]]" : : [ptr] "r" (p), [val] "r" (v) : "memory"); } }; template<> struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE> { template <typename T> void operator()(T v, volatile T* p) const { __asm__ volatile( "stlrh %w[val], [%[ptr]]" : : [ptr] "r" (p), [val] "r" (v) : "memory"); } }; template<> struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE> { template <typename T> void operator()(T v, volatile T* p) const { __asm__ volatile( "stlr %w[val], [%[ptr]]" : : [ptr] "r" (p), [val] "r" (v) : "memory"); } }; template<> struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE> { template <typename T> void operator()(T v, volatile T* p) const { __asm__ volatile( "stlr %[val], [%[ptr]]" : : [ptr] "r" (p), [val] "r" (v) : "memory"); } }; #endif // AARCH64 #endif // OS_CPU_LINUX_ARM_VM_ORDERACCESS_LINUX_ARM_HPP

src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File