--- old/src/hotspot/os_cpu/aix_ppc/orderAccess_aix_ppc.inline.hpp 2017-10-05 15:23:30.132769646 +0200 +++ new/src/hotspot/os_cpu/aix_ppc/orderAccess_aix_ppc.inline.hpp 2017-10-05 15:23:29.892769654 +0200 @@ -78,16 +78,17 @@ inline void OrderAccess::release() { inlasm_lwsync(); } inline void OrderAccess::fence() { inlasm_sync(); } -template<> inline jbyte OrderAccess::specialized_load_acquire (const volatile jbyte* p) { register jbyte t = load(p); inlasm_acquire_reg(t); return t; } -template<> inline jshort OrderAccess::specialized_load_acquire(const volatile jshort* p) { register jshort t = load(p); inlasm_acquire_reg(t); return t; } -template<> inline jint OrderAccess::specialized_load_acquire (const volatile jint* p) { register jint t = load(p); inlasm_acquire_reg(t); return t; } -template<> inline jlong OrderAccess::specialized_load_acquire (const volatile jlong* p) { register jlong t = load(p); inlasm_acquire_reg(t); return t; } +template +struct OrderAccess::PlatformOrderedLoad + VALUE_OBJ_CLASS_SPEC +{ + template + T operator()(const volatile T* p) const { register T t = Atomic::load(p); inlasm_acquire_reg(t); return t; } +}; #undef inlasm_sync #undef inlasm_lwsync #undef inlasm_eieio #undef inlasm_isync -#define VM_HAS_GENERALIZED_ORDER_ACCESS 1 - #endif // OS_CPU_AIX_OJDKPPC_VM_ORDERACCESS_AIX_PPC_INLINE_HPP --- old/src/hotspot/os_cpu/bsd_x86/orderAccess_bsd_x86.inline.hpp 2017-10-05 15:23:30.916769619 +0200 +++ new/src/hotspot/os_cpu/bsd_x86/orderAccess_bsd_x86.inline.hpp 2017-10-05 15:23:30.688769627 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -64,46 +64,57 @@ } template<> -inline void OrderAccess::specialized_release_store_fence (volatile jbyte* p, jbyte v) { - __asm__ volatile ( "xchgb (%2),%0" - : "=q" (v) - : "0" (v), "r" (p) - : "memory"); -} +struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE> + VALUE_OBJ_CLASS_SPEC +{ + template + void operator()(T v, volatile T* p) const { + __asm__ volatile ( "xchgb (%2),%0" + : "=q" (v) + : "0" (v), "r" (p) + : "memory"); + } +}; + template<> -inline void OrderAccess::specialized_release_store_fence(volatile jshort* p, jshort v) { - __asm__ volatile ( "xchgw (%2),%0" - : "=r" (v) - : "0" (v), "r" (p) - : "memory"); -} +struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE> + VALUE_OBJ_CLASS_SPEC +{ + template + void operator()(T v, volatile T* p) const { + __asm__ volatile ( "xchgw (%2),%0" + : "=r" (v) + : "0" (v), "r" (p) + : "memory"); + } +}; + template<> -inline void OrderAccess::specialized_release_store_fence (volatile jint* p, jint v) { - __asm__ volatile ( "xchgl (%2),%0" - : "=r" (v) - : "0" (v), "r" (p) - : "memory"); -} +struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE> + VALUE_OBJ_CLASS_SPEC +{ + template + void operator()(T v, volatile T* p) const { + __asm__ volatile ( "xchgl (%2),%0" + : "=r" (v) + : "0" (v), "r" (p) + : "memory"); + } +}; #ifdef AMD64 template<> -inline void OrderAccess::specialized_release_store_fence (volatile jlong* p, jlong v) { - __asm__ volatile ( "xchgq (%2), %0" - : "=r" (v) - : "0" (v), "r" (p) - : "memory"); -} +struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE> + VALUE_OBJ_CLASS_SPEC +{ + template + void operator()(T v, volatile T* p) const { + __asm__ volatile ( "xchgq (%2), %0" + : "=r" (v) + : "0" (v), "r" (p) + : "memory"); + } +}; #endif // AMD64 -template<> -inline void OrderAccess::specialized_release_store_fence (volatile jfloat* p, jfloat v) { - release_store_fence((volatile jint*)p, jint_cast(v)); -} -template<> -inline void OrderAccess::specialized_release_store_fence(volatile jdouble* p, jdouble v) { - release_store_fence((volatile jlong*)p, jlong_cast(v)); -} - -#define VM_HAS_GENERALIZED_ORDER_ACCESS 1 - #endif // OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP --- old/src/hotspot/os_cpu/bsd_zero/orderAccess_bsd_zero.inline.hpp 2017-10-05 15:23:31.772769589 +0200 +++ new/src/hotspot/os_cpu/bsd_zero/orderAccess_bsd_zero.inline.hpp 2017-10-05 15:23:31.516769598 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright 2007, 2008, 2009 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -74,6 +74,4 @@ inline void OrderAccess::release() { LIGHT_MEM_BARRIER; } inline void OrderAccess::fence() { FULL_MEM_BARRIER; } -#define VM_HAS_GENERALIZED_ORDER_ACCESS 1 - #endif // OS_CPU_BSD_ZERO_VM_ORDERACCESS_BSD_ZERO_INLINE_HPP --- old/src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.inline.hpp 2017-10-05 15:23:32.672769557 +0200 +++ new/src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.inline.hpp 2017-10-05 15:23:32.400769567 +0200 @@ -50,93 +50,28 @@ FULL_MEM_BARRIER; } -inline jbyte OrderAccess::load_acquire(const volatile jbyte* p) -{ jbyte data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; } -inline jshort OrderAccess::load_acquire(const volatile jshort* p) -{ jshort data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; } -inline jint OrderAccess::load_acquire(const volatile jint* p) -{ jint data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; } -inline jlong OrderAccess::load_acquire(const volatile jlong* p) -{ jlong data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; } -inline jubyte OrderAccess::load_acquire(const volatile jubyte* p) -{ jubyte data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; } -inline jushort OrderAccess::load_acquire(const volatile jushort* p) -{ jushort data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; } -inline juint OrderAccess::load_acquire(const volatile juint* p) -{ juint data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; } -inline julong OrderAccess::load_acquire(const volatile julong* p) -{ julong data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; } -inline jfloat OrderAccess::load_acquire(const volatile jfloat* p) -{ jfloat data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; } -inline jdouble OrderAccess::load_acquire(const volatile jdouble* p) -{ jdouble data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; } -inline intptr_t OrderAccess::load_ptr_acquire(const volatile intptr_t* p) -{ intptr_t data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; } -inline void* OrderAccess::load_ptr_acquire(const volatile void* p) -{ void* data; __atomic_load((void* const volatile *)p, &data, __ATOMIC_ACQUIRE); return data; } - -inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) -{ __atomic_store(p, &v, __ATOMIC_RELEASE); } -inline void OrderAccess::release_store(volatile jshort* p, jshort v) -{ __atomic_store(p, &v, __ATOMIC_RELEASE); } -inline void OrderAccess::release_store(volatile jint* p, jint v) -{ __atomic_store(p, &v, __ATOMIC_RELEASE); } -inline void OrderAccess::release_store(volatile jlong* p, jlong v) -{ __atomic_store(p, &v, __ATOMIC_RELEASE); } -inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) -{ __atomic_store(p, &v, __ATOMIC_RELEASE); } -inline void OrderAccess::release_store(volatile jushort* p, jushort v) -{ __atomic_store(p, &v, __ATOMIC_RELEASE); } -inline void OrderAccess::release_store(volatile juint* p, juint v) -{ __atomic_store(p, &v, __ATOMIC_RELEASE); } -inline void OrderAccess::release_store(volatile julong* p, julong v) -{ __atomic_store(p, &v, __ATOMIC_RELEASE); } -inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) -{ __atomic_store(p, &v, __ATOMIC_RELEASE); } -inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) -{ __atomic_store(p, &v, __ATOMIC_RELEASE); } -inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) -{ __atomic_store(p, &v, __ATOMIC_RELEASE); } -inline void OrderAccess::release_store_ptr(volatile void* p, void* v) -{ __atomic_store((void* volatile *)p, &v, __ATOMIC_RELEASE); } - -inline void OrderAccess::store_fence(jbyte* p, jbyte v) -{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); } -inline void OrderAccess::store_fence(jshort* p, jshort v) -{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); } -inline void OrderAccess::store_fence(jint* p, jint v) -{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); } -inline void OrderAccess::store_fence(jlong* p, jlong v) -{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); } -inline void OrderAccess::store_fence(jubyte* p, jubyte v) -{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); } -inline void OrderAccess::store_fence(jushort* p, jushort v) -{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); } -inline void OrderAccess::store_fence(juint* p, juint v) -{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); } -inline void OrderAccess::store_fence(julong* p, julong v) -{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); } -inline void OrderAccess::store_fence(jfloat* p, jfloat v) -{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); } -inline void OrderAccess::store_fence(jdouble* p, jdouble v) -{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); } -inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) -{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); } -inline void OrderAccess::store_ptr_fence(void** p, void* v) -{ __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); } - -inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { release_store(p, v); fence(); } -inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) { release_store(p, v); fence(); } -inline void OrderAccess::release_store_fence(volatile jint* p, jint v) { release_store(p, v); fence(); } -inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { release_store(p, v); fence(); } -inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { release_store(p, v); fence(); } -inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store(p, v); fence(); } -inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { release_store(p, v); fence(); } -inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { release_store(p, v); fence(); } -inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { release_store(p, v); fence(); } -inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { release_store(p, v); fence(); } - -inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { release_store_ptr(p, v); fence(); } -inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) { release_store_ptr(p, v); fence(); } +template +struct OrderAccess::PlatformOrderedLoad + VALUE_OBJ_CLASS_SPEC +{ + template + T operator()(const volatile T* p) const { T data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; } +}; + +template +struct OrderAccess::PlatformOrderedStore + VALUE_OBJ_CLASS_SPEC +{ + template + void operator()(T v, volatile T* p) const { __atomic_store(p, &v, __ATOMIC_RELEASE); } +}; + +template +struct OrderAccess::PlatformOrderedStore + VALUE_OBJ_CLASS_SPEC +{ + template + void operator()(T v, volatile T* p) const { release_store(p, v); fence(); } +}; #endif // OS_CPU_LINUX_AARCH64_VM_ORDERACCESS_LINUX_AARCH64_INLINE_HPP --- old/src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.inline.hpp 2017-10-05 15:23:33.556769527 +0200 +++ new/src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.inline.hpp 2017-10-05 15:23:33.292769536 +0200 @@ -33,7 +33,6 @@ // - we define the high level barriers below and use the general // implementation in orderAccess.inline.hpp, with customizations // on AARCH64 via the specialized_* template functions -#define VM_HAS_GENERALIZED_ORDER_ACCESS 1 // Memory Ordering on ARM is weak. // @@ -131,91 +130,126 @@ #ifdef AARCH64 -template<> inline jbyte OrderAccess::specialized_load_acquire(const volatile jbyte* p) { - volatile jbyte result; - __asm__ volatile( - "ldarb %w[res], [%[ptr]]" - : [res] "=&r" (result) - : [ptr] "r" (p) - : "memory"); - return result; -} - -template<> inline jshort OrderAccess::specialized_load_acquire(const volatile jshort* p) { - volatile jshort result; - __asm__ volatile( - "ldarh %w[res], [%[ptr]]" - : [res] "=&r" (result) - : [ptr] "r" (p) - : "memory"); - return result; -} - -template<> inline jint OrderAccess::specialized_load_acquire(const volatile jint* p) { - volatile jint result; - __asm__ volatile( - "ldar %w[res], [%[ptr]]" - : [res] "=&r" (result) - : [ptr] "r" (p) - : "memory"); - return result; -} +template<> +struct OrderAccess::PlatformOrderedLoad<1, X_ACQUIRE> + VALUE_OBJ_CLASS_SPEC +{ + template + T operator()(const volatile T* p) const { + volatile T result; + __asm__ volatile( + "ldarb %w[res], [%[ptr]]" + : [res] "=&r" (result) + : [ptr] "r" (p) + : "memory"); + return result; + } +}; + +template<> +struct OrderAccess::PlatformOrderedLoad<2, X_ACQUIRE> + VALUE_OBJ_CLASS_SPEC +{ + template + T operator()(const volatile T* p) const { + volatile T result; + __asm__ volatile( + "ldarh %w[res], [%[ptr]]" + : [res] "=&r" (result) + : [ptr] "r" (p) + : "memory"); + return result; + } +}; + +template<> +struct OrderAccess::PlatformOrderedLoad<4, X_ACQUIRE> + VALUE_OBJ_CLASS_SPEC +{ + template + T operator()(const volatile T* p) const { + volatile T result; + __asm__ volatile( + "ldar %w[res], [%[ptr]]" + : [res] "=&r" (result) + : [ptr] "r" (p) + : "memory"); + return result; + } +}; + +template<> +struct OrderAccess::PlatformOrderedLoad<8, X_ACQUIRE> + VALUE_OBJ_CLASS_SPEC +{ + template + T operator()(const volatile T* p) const { + volatile T result; + __asm__ volatile( + "ldar %[res], [%[ptr]]" + : [res] "=&r" (result) + : [ptr] "r" (p) + : "memory"); + return result; + } +}; + +template<> +struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE> + VALUE_OBJ_CLASS_SPEC +{ + template + void operator()(T v, volatile T* p) const { + __asm__ volatile( + "stlrb %w[val], [%[ptr]]" + : + : [ptr] "r" (p), [val] "r" (v) + : "memory"); + } +}; + +template<> +struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE> + VALUE_OBJ_CLASS_SPEC +{ + template + void operator()(T v, volatile T* p) const { + __asm__ volatile( + "stlrh %w[val], [%[ptr]]" + : + : [ptr] "r" (p), [val] "r" (v) + : "memory"); + } +}; + +template<> +struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE> + VALUE_OBJ_CLASS_SPEC +{ + template + void operator()(T v, volatile T* p) const { + __asm__ volatile( + "stlr %w[val], [%[ptr]]" + : + : [ptr] "r" (p), [val] "r" (v) + : "memory"); + } +}; + +template<> +struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE> + VALUE_OBJ_CLASS_SPEC +{ + template + void operator()(T v, volatile T* p) const { + __asm__ volatile( + "stlr %[val], [%[ptr]]" + : + : [ptr] "r" (p), [val] "r" (v) + : "memory"); + } +}; -template<> inline jfloat OrderAccess::specialized_load_acquire(const volatile jfloat* p) { - return jfloat_cast(specialized_load_acquire((const volatile jint*)p)); -} - -// This is implicit as jlong and intptr_t are both "long int" -//template<> inline jlong OrderAccess::specialized_load_acquire(const volatile jlong* p) { -// return (volatile jlong)specialized_load_acquire((const volatile intptr_t*)p); -//} - -template<> inline intptr_t OrderAccess::specialized_load_acquire(const volatile intptr_t* p) { - volatile intptr_t result; - __asm__ volatile( - "ldar %[res], [%[ptr]]" - : [res] "=&r" (result) - : [ptr] "r" (p) - : "memory"); - return result; -} - -template<> inline jdouble OrderAccess::specialized_load_acquire(const volatile jdouble* p) { - return jdouble_cast(specialized_load_acquire((const volatile intptr_t*)p)); -} - - -template<> inline void OrderAccess::specialized_release_store(volatile jbyte* p, jbyte v) { - __asm__ volatile( - "stlrb %w[val], [%[ptr]]" - : - : [ptr] "r" (p), [val] "r" (v) - : "memory"); -} - -template<> inline void OrderAccess::specialized_release_store(volatile jshort* p, jshort v) { - __asm__ volatile( - "stlrh %w[val], [%[ptr]]" - : - : [ptr] "r" (p), [val] "r" (v) - : "memory"); -} - -template<> inline void OrderAccess::specialized_release_store(volatile jint* p, jint v) { - __asm__ volatile( - "stlr %w[val], [%[ptr]]" - : - : [ptr] "r" (p), [val] "r" (v) - : "memory"); -} - -template<> inline void OrderAccess::specialized_release_store(volatile jlong* p, jlong v) { - __asm__ volatile( - "stlr %[val], [%[ptr]]" - : - : [ptr] "r" (p), [val] "r" (v) - : "memory"); -} #endif // AARCH64 #endif // OS_CPU_LINUX_ARM_VM_ORDERACCESS_LINUX_ARM_INLINE_HPP --- old/src/hotspot/os_cpu/linux_ppc/orderAccess_linux_ppc.inline.hpp 2017-10-05 15:23:34.468769495 +0200 +++ new/src/hotspot/os_cpu/linux_ppc/orderAccess_linux_ppc.inline.hpp 2017-10-05 15:23:34.204769504 +0200 @@ -80,10 +80,14 @@ inline void OrderAccess::release() { inlasm_lwsync(); } inline void OrderAccess::fence() { inlasm_sync(); } -template<> inline jbyte OrderAccess::specialized_load_acquire (const volatile jbyte* p) { register jbyte t = load(p); inlasm_acquire_reg(t); return t; } -template<> inline jshort OrderAccess::specialized_load_acquire(const volatile jshort* p) { register jshort t = load(p); inlasm_acquire_reg(t); return t; } -template<> inline jint OrderAccess::specialized_load_acquire (const volatile jint* p) { register jint t = load(p); inlasm_acquire_reg(t); return t; } -template<> inline jlong OrderAccess::specialized_load_acquire (const volatile jlong* p) { register jlong t = load(p); inlasm_acquire_reg(t); return t; } + +template +struct OrderAccess::PlatformOrderedLoad + VALUE_OBJ_CLASS_SPEC +{ + template + T operator()(const volatile T* p) const { register T t = Atomic::load(p); inlasm_acquire_reg(t); return t; } +}; #undef inlasm_sync #undef inlasm_lwsync @@ -91,6 +95,4 @@ #undef inlasm_isync #undef inlasm_acquire_reg -#define VM_HAS_GENERALIZED_ORDER_ACCESS 1 - #endif // OS_CPU_LINUX_PPC_VM_ORDERACCESS_LINUX_PPC_INLINE_HPP --- old/src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.inline.hpp 2017-10-05 15:23:35.292769466 +0200 +++ new/src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.inline.hpp 2017-10-05 15:23:35.068769474 +0200 @@ -74,10 +74,13 @@ inline void OrderAccess::release() { inlasm_zarch_release(); } inline void OrderAccess::fence() { inlasm_zarch_sync(); } -template<> inline jbyte OrderAccess::specialized_load_acquire (const volatile jbyte* p) { register jbyte t = *p; inlasm_zarch_acquire(); return t; } -template<> inline jshort OrderAccess::specialized_load_acquire(const volatile jshort* p) { register jshort t = *p; inlasm_zarch_acquire(); return t; } -template<> inline jint OrderAccess::specialized_load_acquire (const volatile jint* p) { register jint t = *p; inlasm_zarch_acquire(); return t; } -template<> inline jlong OrderAccess::specialized_load_acquire (const volatile jlong* p) { register jlong t = *p; inlasm_zarch_acquire(); return t; } +template +struct OrderAccess::PlatformOrderedLoad + VALUE_OBJ_CLASS_SPEC +{ + template + T operator()(const volatile T* p) const { register T t = *p; inlasm_zarch_acquire(); return t; } +}; #undef inlasm_compiler_barrier #undef inlasm_zarch_sync @@ -85,8 +88,4 @@ #undef inlasm_zarch_acquire #undef inlasm_zarch_fence -#define VM_HAS_GENERALIZED_ORDER_ACCESS 1 - #endif // OS_CPU_LINUX_S390_VM_ORDERACCESS_LINUX_S390_INLINE_HPP - - --- old/src/hotspot/os_cpu/linux_sparc/orderAccess_linux_sparc.inline.hpp 2017-10-05 15:23:36.192769435 +0200 +++ new/src/hotspot/os_cpu/linux_sparc/orderAccess_linux_sparc.inline.hpp 2017-10-05 15:23:35.928769444 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -48,6 +48,4 @@ __asm__ volatile ("membar #StoreLoad" : : : "memory"); } -#define VM_HAS_GENERALIZED_ORDER_ACCESS 1 - #endif // OS_CPU_LINUX_SPARC_VM_ORDERACCESS_LINUX_SPARC_INLINE_HPP --- old/src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.inline.hpp 2017-10-05 15:23:37.008769406 +0200 +++ new/src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.inline.hpp 2017-10-05 15:23:36.748769415 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,46 +60,57 @@ } template<> -inline void OrderAccess::specialized_release_store_fence (volatile jbyte* p, jbyte v) { - __asm__ volatile ( "xchgb (%2),%0" - : "=q" (v) - : "0" (v), "r" (p) - : "memory"); -} +struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE> + VALUE_OBJ_CLASS_SPEC +{ + template + void operator()(T v, volatile T* p) const { + __asm__ volatile ( "xchgb (%2),%0" + : "=q" (v) + : "0" (v), "r" (p) + : "memory"); + } +}; + template<> -inline void OrderAccess::specialized_release_store_fence(volatile jshort* p, jshort v) { - __asm__ volatile ( "xchgw (%2),%0" - : "=r" (v) - : "0" (v), "r" (p) - : "memory"); -} +struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE> + VALUE_OBJ_CLASS_SPEC +{ + template + void operator()(T v, volatile T* p) const { + __asm__ volatile ( "xchgw (%2),%0" + : "=r" (v) + : "0" (v), "r" (p) + : "memory"); + } +}; + template<> -inline void OrderAccess::specialized_release_store_fence (volatile jint* p, jint v) { - __asm__ volatile ( "xchgl (%2),%0" - : "=r" (v) - : "0" (v), "r" (p) - : "memory"); -} +struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE> + VALUE_OBJ_CLASS_SPEC +{ + template + void operator()(T v, volatile T* p) const { + __asm__ volatile ( "xchgl (%2),%0" + : "=r" (v) + : "0" (v), "r" (p) + : "memory"); + } +}; #ifdef AMD64 template<> -inline void OrderAccess::specialized_release_store_fence (volatile jlong* p, jlong v) { - __asm__ volatile ( "xchgq (%2), %0" - : "=r" (v) - : "0" (v), "r" (p) - : "memory"); -} +struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE> + VALUE_OBJ_CLASS_SPEC +{ + template + void operator()(T v, volatile T* p) const { + __asm__ volatile ( "xchgq (%2), %0" + : "=r" (v) + : "0" (v), "r" (p) + : "memory"); + } +}; #endif // AMD64 -template<> -inline void OrderAccess::specialized_release_store_fence (volatile jfloat* p, jfloat v) { - release_store_fence((volatile jint*)p, jint_cast(v)); -} -template<> -inline void OrderAccess::specialized_release_store_fence(volatile jdouble* p, jdouble v) { - release_store_fence((volatile jlong*)p, jlong_cast(v)); -} - -#define VM_HAS_GENERALIZED_ORDER_ACCESS 1 - #endif // OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP --- old/src/hotspot/os_cpu/linux_zero/orderAccess_linux_zero.inline.hpp 2017-10-05 15:23:37.912769375 +0200 +++ new/src/hotspot/os_cpu/linux_zero/orderAccess_linux_zero.inline.hpp 2017-10-05 15:23:37.652769384 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright 2007, 2008, 2009 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -83,6 +83,4 @@ inline void OrderAccess::fence() { FULL_MEM_BARRIER; } -#define VM_HAS_GENERALIZED_ORDER_ACCESS 1 - #endif // OS_CPU_LINUX_ZERO_VM_ORDERACCESS_LINUX_ZERO_INLINE_HPP --- old/src/hotspot/os_cpu/solaris_sparc/orderAccess_solaris_sparc.inline.hpp 2017-10-05 15:23:38.760769345 +0200 +++ new/src/hotspot/os_cpu/solaris_sparc/orderAccess_solaris_sparc.inline.hpp 2017-10-05 15:23:38.528769353 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -52,6 +52,4 @@ __asm__ volatile ("membar #StoreLoad" : : : "memory"); } -#define VM_HAS_GENERALIZED_ORDER_ACCESS 1 - #endif // OS_CPU_SOLARIS_SPARC_VM_ORDERACCESS_SOLARIS_SPARC_INLINE_HPP --- old/src/hotspot/os_cpu/solaris_x86/orderAccess_solaris_x86.inline.hpp 2017-10-05 15:23:39.656769314 +0200 +++ new/src/hotspot/os_cpu/solaris_x86/orderAccess_solaris_x86.inline.hpp 2017-10-05 15:23:39.396769323 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -58,6 +58,4 @@ compiler_barrier(); } -#define VM_HAS_GENERALIZED_ORDER_ACCESS 1 - #endif // OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_INLINE_HPP --- old/src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.inline.hpp 2017-10-05 15:23:40.552769283 +0200 +++ new/src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.inline.hpp 2017-10-05 15:23:40.296769292 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -74,42 +74,46 @@ #ifndef AMD64 template<> -inline void OrderAccess::specialized_release_store_fence (volatile jbyte* p, jbyte v) { - __asm { - mov edx, p; - mov al, v; - xchg al, byte ptr [edx]; +struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE> + VALUE_OBJ_CLASS_SPEC +{ + template + void operator()(T v, volatile T* p) const { + __asm { + mov edx, p; + mov al, v; + xchg al, byte ptr [edx]; + } } -} +}; template<> -inline void OrderAccess::specialized_release_store_fence(volatile jshort* p, jshort v) { - __asm { - mov edx, p; - mov ax, v; - xchg ax, word ptr [edx]; +struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE> + VALUE_OBJ_CLASS_SPEC +{ + template + void operator()(T v, volatile T* p) const { + __asm { + mov edx, p; + mov ax, v; + xchg ax, word ptr [edx]; + } } -} +}; template<> -inline void OrderAccess::specialized_release_store_fence (volatile jint* p, jint v) { - __asm { - mov edx, p; - mov eax, v; - xchg eax, dword ptr [edx]; +struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE> + VALUE_OBJ_CLASS_SPEC +{ + template + void operator()(T v, volatile T* p) const { + __asm { + mov edx, p; + mov eax, v; + xchg eax, dword ptr [edx]; + } } -} +}; #endif // AMD64 -template<> -inline void OrderAccess::specialized_release_store_fence(volatile jfloat* p, jfloat v) { - release_store_fence((volatile jint*)p, jint_cast(v)); -} -template<> -inline void OrderAccess::specialized_release_store_fence(volatile jdouble* p, jdouble v) { - release_store_fence((volatile jlong*)p, jlong_cast(v)); -} - -#define VM_HAS_GENERALIZED_ORDER_ACCESS 1 - #endif // OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP --- old/src/hotspot/share/gc/shared/cardTableModRefBS.inline.hpp 2017-10-05 15:23:41.388769254 +0200 +++ new/src/hotspot/share/gc/shared/cardTableModRefBS.inline.hpp 2017-10-05 15:23:41.128769263 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,7 +33,7 @@ jbyte* byte = byte_for((void*)field); if (release) { // Perform a releasing store if requested. - OrderAccess::release_store((volatile jbyte*) byte, dirty_card); + OrderAccess::release_store((volatile jbyte*) byte, jbyte(dirty_card)); } else { *byte = dirty_card; } --- old/src/hotspot/share/metaprogramming/primitiveConversions.hpp 2017-10-05 15:23:42.208769225 +0200 +++ new/src/hotspot/share/metaprogramming/primitiveConversions.hpp 2017-10-05 15:23:41.976769233 +0200 @@ -167,4 +167,24 @@ return Cast()(x); } +// jfloat and jdouble translation to integral types + +template<> +struct PrimitiveConversions::Translate : public TrueType { + typedef double Value; + typedef int64_t Decayed; + + static Decayed decay(Value x) { return PrimitiveConversions::cast(x); } + static Value recover(Decayed x) { return PrimitiveConversions::cast(x); } +}; + +template<> +struct PrimitiveConversions::Translate : public TrueType { + typedef float Value; + typedef int32_t Decayed; + + static Decayed decay(Value x) { return PrimitiveConversions::cast(x); } + static Value recover(Decayed x) { return PrimitiveConversions::cast(x); } +}; + #endif // SHARE_VM_METAPROGRAMMING_PRIMITIVECONVERSIONS_HPP --- old/src/hotspot/share/oops/oop.inline.hpp 2017-10-05 15:23:43.032769197 +0200 +++ new/src/hotspot/share/oops/oop.inline.hpp 2017-10-05 15:23:42.776769206 +0200 @@ -501,7 +501,7 @@ void oopDesc::release_char_field_put(int offset, jchar contents) { OrderAccess::release_store(char_field_addr(offset), contents); } jboolean oopDesc::bool_field_acquire(int offset) const { return OrderAccess::load_acquire(bool_field_addr(offset)); } -void oopDesc::release_bool_field_put(int offset, jboolean contents) { OrderAccess::release_store(bool_field_addr(offset), (contents & 1)); } +void oopDesc::release_bool_field_put(int offset, jboolean contents) { OrderAccess::release_store(bool_field_addr(offset), jboolean(contents & 1)); } jint oopDesc::int_field_acquire(int offset) const { return OrderAccess::load_acquire(int_field_addr(offset)); } void oopDesc::release_int_field_put(int offset, jint contents) { OrderAccess::release_store(int_field_addr(offset), contents); } --- old/src/hotspot/share/oops/oopsHierarchy.hpp 2017-10-05 15:23:43.928769165 +0200 +++ new/src/hotspot/share/oops/oopsHierarchy.hpp 2017-10-05 15:23:43.700769173 +0200 @@ -177,6 +177,15 @@ (void)const_cast(oop::operator=(o)); \ return *this; \ } \ + }; \ + \ + template<> \ + struct PrimitiveConversions::Translate : public TrueType { \ + typedef type##Oop Value; \ + typedef type##OopDesc* Decayed; \ + \ + static Decayed decay(Value x) { return (type##OopDesc*)x.obj(); } \ + static Value recover(Decayed x) { return type##Oop(x); } \ }; DEF_OOP(instance); --- old/src/hotspot/share/runtime/atomic.hpp 2017-10-05 15:23:44.748769137 +0200 +++ new/src/hotspot/share/runtime/atomic.hpp 2017-10-05 15:23:44.520769145 +0200 @@ -44,7 +44,7 @@ }; class Atomic : AllStatic { - public: +public: // Atomic operations on jlong types are not available on all 32-bit // platforms. If atomic ops on jlongs are defined here they must only // be used from code that verifies they are available at runtime and @@ -175,6 +175,7 @@ // that is needed here. template struct IsPointerConvertible; +protected: // Dispatch handler for store. Provides type-based validity // checking and limited conversions around calls to the platform- // specific implementation layer provided by PlatformOp. @@ -226,6 +227,7 @@ // requires more for e.g. 64 bit loads, a specialization is required template struct PlatformLoad; +private: // Dispatch handler for add. Provides type-based validity checking // and limited conversions around calls to the platform-specific // implementation layer provided by PlatformAdd. --- old/src/hotspot/share/runtime/mutex.cpp 2017-10-05 15:23:45.640769106 +0200 +++ new/src/hotspot/share/runtime/mutex.cpp 2017-10-05 15:23:45.380769115 +0200 @@ -526,7 +526,7 @@ // Note that the OrderAccess::storeload() fence that appears after unlock store // provides for progress conditions and succession and is _not related to exclusion // safety or lock release consistency. - OrderAccess::release_store(&_LockWord.Bytes[_LSBINDEX], 0); // drop outer lock + OrderAccess::release_store(&_LockWord.Bytes[_LSBINDEX], jbyte(0)); // drop outer lock OrderAccess::storeload(); ParkEvent * const w = _OnDeck; // raw load as we will just return if non-NULL --- old/src/hotspot/share/runtime/orderAccess.hpp 2017-10-05 15:23:46.508769076 +0200 +++ new/src/hotspot/share/runtime/orderAccess.hpp 2017-10-05 15:23:46.248769085 +0200 @@ -26,6 +26,7 @@ #define SHARE_VM_RUNTIME_ORDERACCESS_HPP #include "memory/allocation.hpp" +#include "runtime/atomic.hpp" // Memory Access Ordering Model // @@ -252,7 +253,7 @@ void postfix() { ScopedFenceGeneral::postfix(); } }; -class OrderAccess : AllStatic { +class OrderAccess : private Atomic { public: // barriers static void loadload(); @@ -264,44 +265,20 @@ static void release(); static void fence(); - static jbyte load_acquire(const volatile jbyte* p); - static jshort load_acquire(const volatile jshort* p); - static jint load_acquire(const volatile jint* p); - static jlong load_acquire(const volatile jlong* p); - static jubyte load_acquire(const volatile jubyte* p); - static jushort load_acquire(const volatile jushort* p); - static juint load_acquire(const volatile juint* p); - static julong load_acquire(const volatile julong* p); - static jfloat load_acquire(const volatile jfloat* p); - static jdouble load_acquire(const volatile jdouble* p); + template + static T load_acquire(const volatile T* p); static intptr_t load_ptr_acquire(const volatile intptr_t* p); static void* load_ptr_acquire(const volatile void* p); - static void release_store(volatile jbyte* p, jbyte v); - static void release_store(volatile jshort* p, jshort v); - static void release_store(volatile jint* p, jint v); - static void release_store(volatile jlong* p, jlong v); - static void release_store(volatile jubyte* p, jubyte v); - static void release_store(volatile jushort* p, jushort v); - static void release_store(volatile juint* p, juint v); - static void release_store(volatile julong* p, julong v); - static void release_store(volatile jfloat* p, jfloat v); - static void release_store(volatile jdouble* p, jdouble v); + template + static void release_store(volatile D* p, T v); static void release_store_ptr(volatile intptr_t* p, intptr_t v); static void release_store_ptr(volatile void* p, void* v); - static void release_store_fence(volatile jbyte* p, jbyte v); - static void release_store_fence(volatile jshort* p, jshort v); - static void release_store_fence(volatile jint* p, jint v); - static void release_store_fence(volatile jlong* p, jlong v); - static void release_store_fence(volatile jubyte* p, jubyte v); - static void release_store_fence(volatile jushort* p, jushort v); - static void release_store_fence(volatile juint* p, juint v); - static void release_store_fence(volatile julong* p, julong v); - static void release_store_fence(volatile jfloat* p, jfloat v); - static void release_store_fence(volatile jdouble* p, jdouble v); + template + static void release_store_fence(volatile D* p, T v); static void release_store_ptr_fence(volatile intptr_t* p, intptr_t v); static void release_store_ptr_fence(volatile void* p, void* v); @@ -313,45 +290,34 @@ static void StubRoutines_fence(); // Give platforms a variation point to specialize. - template static T specialized_load_acquire (const volatile T* p); - template static void specialized_release_store (volatile T* p, T v); - template static void specialized_release_store_fence(volatile T* p, T v); + template struct PlatformOrderedStore; + template struct PlatformOrderedLoad; template static void ordered_store(volatile FieldType* p, FieldType v); template static FieldType ordered_load(const volatile FieldType* p); +}; - static void store(volatile jbyte* p, jbyte v); - static void store(volatile jshort* p, jshort v); - static void store(volatile jint* p, jint v); - static void store(volatile jlong* p, jlong v); - static void store(volatile jdouble* p, jdouble v); - static void store(volatile jfloat* p, jfloat v); - - static jbyte load(const volatile jbyte* p); - static jshort load(const volatile jshort* p); - static jint load(const volatile jint* p); - static jlong load(const volatile jlong* p); - static jdouble load(const volatile jdouble* p); - static jfloat load(const volatile jfloat* p); - - // The following store_fence methods are deprecated and will be removed - // when all repos conform to the new generalized OrderAccess. - static void store_fence(jbyte* p, jbyte v); - static void store_fence(jshort* p, jshort v); - static void store_fence(jint* p, jint v); - static void store_fence(jlong* p, jlong v); - static void store_fence(jubyte* p, jubyte v); - static void store_fence(jushort* p, jushort v); - static void store_fence(juint* p, juint v); - static void store_fence(julong* p, julong v); - static void store_fence(jfloat* p, jfloat v); - static void store_fence(jdouble* p, jdouble v); +// The following methods can be specialized using simple template specialization +// in the platform specific files for optimization purposes. Otherwise the +// generalized variant is used. + +template +struct OrderAccess::PlatformOrderedStore VALUE_OBJ_CLASS_SPEC { + template + void operator()(T v, volatile T* p) const { + ordered_store(p, v); + } +}; - static void store_ptr_fence(intptr_t* p, intptr_t v); - static void store_ptr_fence(void** p, void* v); +template +struct OrderAccess::PlatformOrderedLoad VALUE_OBJ_CLASS_SPEC { + template + T operator()(const volatile T* p) const { + return ordered_load(p); + } }; #endif // SHARE_VM_RUNTIME_ORDERACCESS_HPP --- old/src/hotspot/share/runtime/orderAccess.inline.hpp 2017-10-05 15:23:47.336769047 +0200 +++ new/src/hotspot/share/runtime/orderAccess.inline.hpp 2017-10-05 15:23:47.088769055 +0200 @@ -26,14 +26,11 @@ #ifndef SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP #define SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP -#include "runtime/atomic.hpp" #include "runtime/orderAccess.hpp" #include "utilities/macros.hpp" #include OS_CPU_HEADER_INLINE(orderAccess) -#ifdef VM_HAS_GENERALIZED_ORDER_ACCESS - template<> inline void ScopedFenceGeneral::postfix() { OrderAccess::acquire(); } template<> inline void ScopedFenceGeneral::prefix() { OrderAccess::release(); } template<> inline void ScopedFenceGeneral::prefix() { OrderAccess::release(); } @@ -43,80 +40,42 @@ template inline void OrderAccess::ordered_store(volatile FieldType* p, FieldType v) { ScopedFence f((void*)p); - store(p, v); + Atomic::store(v, p); } template inline FieldType OrderAccess::ordered_load(const volatile FieldType* p) { ScopedFence f((void*)p); - return load(p); + return Atomic::load(p); +} + +template +inline T OrderAccess::load_acquire(const volatile T* p) { + return LoadImpl >()(p); } -inline jbyte OrderAccess::load_acquire(const volatile jbyte* p) { return specialized_load_acquire(p); } -inline jshort OrderAccess::load_acquire(const volatile jshort* p) { return specialized_load_acquire(p); } -inline jint OrderAccess::load_acquire(const volatile jint* p) { return specialized_load_acquire(p); } -inline jlong OrderAccess::load_acquire(const volatile jlong* p) { return specialized_load_acquire(p); } -inline jfloat OrderAccess::load_acquire(const volatile jfloat* p) { return specialized_load_acquire(p); } -inline jdouble OrderAccess::load_acquire(const volatile jdouble* p) { return specialized_load_acquire(p); } -inline jubyte OrderAccess::load_acquire(const volatile jubyte* p) { return (jubyte) specialized_load_acquire((const volatile jbyte*)p); } -inline jushort OrderAccess::load_acquire(const volatile jushort* p) { return (jushort)specialized_load_acquire((const volatile jshort*)p); } -inline juint OrderAccess::load_acquire(const volatile juint* p) { return (juint) specialized_load_acquire((const volatile jint*)p); } -inline julong OrderAccess::load_acquire(const volatile julong* p) { return (julong) specialized_load_acquire((const volatile jlong*)p); } - -inline intptr_t OrderAccess::load_ptr_acquire(const volatile intptr_t* p) { return (intptr_t)specialized_load_acquire(p); } -inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return (void*)specialized_load_acquire((const volatile intptr_t*)p); } - -inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { specialized_release_store(p, v); } -inline void OrderAccess::release_store(volatile jshort* p, jshort v) { specialized_release_store(p, v); } -inline void OrderAccess::release_store(volatile jint* p, jint v) { specialized_release_store(p, v); } -inline void OrderAccess::release_store(volatile jlong* p, jlong v) { specialized_release_store(p, v); } -inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { specialized_release_store(p, v); } -inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { specialized_release_store(p, v); } -inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { specialized_release_store((volatile jbyte*) p, (jbyte) v); } -inline void OrderAccess::release_store(volatile jushort* p, jushort v) { specialized_release_store((volatile jshort*)p, (jshort)v); } -inline void OrderAccess::release_store(volatile juint* p, juint v) { specialized_release_store((volatile jint*) p, (jint) v); } -inline void OrderAccess::release_store(volatile julong* p, julong v) { specialized_release_store((volatile jlong*) p, (jlong) v); } - -inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { specialized_release_store(p, v); } -inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { specialized_release_store((volatile intptr_t*)p, (intptr_t)v); } - -inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { specialized_release_store_fence(p, v); } -inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) { specialized_release_store_fence(p, v); } -inline void OrderAccess::release_store_fence(volatile jint* p, jint v) { specialized_release_store_fence(p, v); } -inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { specialized_release_store_fence(p, v); } -inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { specialized_release_store_fence(p, v); } -inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { specialized_release_store_fence(p, v); } -inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { specialized_release_store_fence((volatile jbyte*) p, (jbyte) v); } -inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { specialized_release_store_fence((volatile jshort*)p, (jshort)v); } -inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { specialized_release_store_fence((volatile jint*) p, (jint) v); } -inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { specialized_release_store_fence((volatile jlong*) p, (jlong) v); } - -inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { specialized_release_store_fence(p, v); } -inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) { specialized_release_store_fence((volatile intptr_t*)p, (intptr_t)v); } - -// The following methods can be specialized using simple template specialization -// in the platform specific files for optimization purposes. Otherwise the -// generalized variant is used. -template inline T OrderAccess::specialized_load_acquire (const volatile T* p) { return ordered_load(p); } -template inline void OrderAccess::specialized_release_store (volatile T* p, T v) { ordered_store(p, v); } -template inline void OrderAccess::specialized_release_store_fence(volatile T* p, T v) { ordered_store(p, v); } - -// Generalized atomic volatile accesses valid in OrderAccess -// All other types can be expressed in terms of these. -inline void OrderAccess::store(volatile jbyte* p, jbyte v) { *p = v; } -inline void OrderAccess::store(volatile jshort* p, jshort v) { *p = v; } -inline void OrderAccess::store(volatile jint* p, jint v) { *p = v; } -inline void OrderAccess::store(volatile jlong* p, jlong v) { Atomic::store(v, p); } -inline void OrderAccess::store(volatile jdouble* p, jdouble v) { Atomic::store(jlong_cast(v), (volatile jlong*)p); } -inline void OrderAccess::store(volatile jfloat* p, jfloat v) { *p = v; } - -inline jbyte OrderAccess::load(const volatile jbyte* p) { return *p; } -inline jshort OrderAccess::load(const volatile jshort* p) { return *p; } -inline jint OrderAccess::load(const volatile jint* p) { return *p; } -inline jlong OrderAccess::load(const volatile jlong* p) { return Atomic::load(p); } -inline jdouble OrderAccess::load(const volatile jdouble* p) { return jdouble_cast(Atomic::load((const volatile jlong*)p)); } -inline jfloat OrderAccess::load(const volatile jfloat* p) { return *p; } +inline intptr_t OrderAccess::load_ptr_acquire(const volatile intptr_t* p) { + return load_acquire(p); +} + +inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { + return load_acquire(static_cast(p)); +} + +template +inline void OrderAccess::release_store(volatile D* p, T v) { + StoreImpl >()(v, p); +} + +inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { release_store(p, v); } +inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { release_store(static_cast(p), v); } + +template +inline void OrderAccess::release_store_fence(volatile D* p, T v) { + StoreImpl >()(v, p); +} -#endif // VM_HAS_GENERALIZED_ORDER_ACCESS +inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { release_store_fence(p, v); } +inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) { release_store_fence(static_cast(p), v); } #endif // SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP