--- old/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.hpp 2017-08-20 02:08:22.478899034 -0400 +++ new/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.hpp 2017-08-20 02:08:22.362893083 -0400 @@ -93,9 +93,21 @@ #define strasm_nobarrier "" #define strasm_nobarrier_clobber_memory "" -inline jint Atomic::add (jint add_value, volatile jint* dest) { +template +struct Atomic::PlatformAdd + : Atomic::AddAndFetch > +{ + template + D add_and_fetch(I add_value, D volatile* dest) const; +}; + +template<> +template +inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const { + STATIC_CAST(4 == sizeof(I)); + STATIC_CAST(4 == sizeof(D)); - unsigned int result; + D result; __asm__ __volatile__ ( strasm_lwsync @@ -108,13 +120,17 @@ : /*%1*/"r" (add_value), /*%2*/"r" (dest) : "cc", "memory" ); - return (jint) result; + return result; } -inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { +template<> +template +inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const { + STATIC_CAST(8 == sizeof(I)); + STATIC_CAST(8 == sizeof(D)); - long result; + D result; __asm__ __volatile__ ( strasm_lwsync @@ -127,11 +143,7 @@ : /*%1*/"r" (add_value), /*%2*/"r" (dest) : "cc", "memory" ); - return (intptr_t) result; -} - -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void*)add_ptr(add_value, (volatile intptr_t*)dest); + return result; } --- old/src/os_cpu/bsd_x86/vm/atomic_bsd_x86.hpp 2017-08-20 02:08:23.018926729 -0400 +++ new/src/os_cpu/bsd_x86/vm/atomic_bsd_x86.hpp 2017-08-20 02:08:22.902920782 -0400 @@ -40,13 +40,25 @@ inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; } -inline jint Atomic::add (jint add_value, volatile jint* dest) { - jint addend = add_value; +template +struct Atomic::PlatformAdd + : Atomic::FetchAndAdd > +{ + template + D fetch_and_add(I add_value, D volatile* dest) const; +}; + +template<> +template +inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest) const { + STATIC_ASSERT(4 == sizeof(I)); + STATIC_ASSERT(4 == sizeof(D)); + D old_value; __asm__ volatile ( "lock xaddl %0,(%2)" - : "=r" (addend) - : "0" (addend), "r" (dest) + : "=r" (old_value) + : "0" (add_value), "r" (dest) : "cc", "memory"); - return addend + add_value; + return old_value; } inline void Atomic::inc (volatile jint* dest) { @@ -111,17 +123,17 @@ inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } -inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { - intptr_t addend = add_value; +template<> +template +inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) const { + STATIC_ASSERT(8 == sizeof(I)); + STATIC_ASSERT(8 == sizeof(D)); + D old_value; __asm__ __volatile__ ( "lock xaddq %0,(%2)" - : "=r" (addend) - : "0" (addend), "r" (dest) + : "=r" (old_value) + : "0" (add_value), "r" (dest) : "cc", "memory"); - return addend + add_value; -} - -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void*)add_ptr(add_value, (volatile intptr_t*)dest); + return old_value; } inline void Atomic::inc_ptr(volatile intptr_t* dest) { @@ -164,15 +176,6 @@ #else // !AMD64 -inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { - return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest); -} - -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void*)Atomic::add((jint)add_value, (volatile jint*)dest); -} - - inline void Atomic::inc_ptr(volatile intptr_t* dest) { inc((volatile jint*)dest); } --- old/src/os_cpu/bsd_zero/vm/atomic_bsd_zero.hpp 2017-08-20 02:08:23.554954228 -0400 +++ new/src/os_cpu/bsd_zero/vm/atomic_bsd_zero.hpp 2017-08-20 02:08:23.438948273 -0400 @@ -74,7 +74,7 @@ } /* Atomically add an int to memory. */ -static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) { +static inline int m68k_add_and_fetch(int add_value, volatile int *ptr) { for (;;) { // Loop until success. @@ -135,7 +135,7 @@ } /* Atomically add an int to memory. */ -static inline int arm_add_and_fetch(volatile int *ptr, int add_value) { +static inline int arm_add_and_fetch(int add_value, volatile int *ptr) { for (;;) { // Loop until a __kernel_cmpxchg succeeds. @@ -173,32 +173,38 @@ *dest = store_value; } -inline jint Atomic::add(jint add_value, volatile jint* dest) { -#ifdef ARM - return arm_add_and_fetch(dest, add_value); -#else -#ifdef M68K - return m68k_add_and_fetch(dest, add_value); -#else - return __sync_add_and_fetch(dest, add_value); -#endif // M68K -#endif // ARM -} +template +struct Atomic::PlatformAdd + : Atomic::AddAndFetch > +{ + template + D add_and_fetch(I add_value, D volatile* dest) const; +}; + +template<> +template +inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const { + STATIC_CAST(4 == sizeof(I)); + STATIC_CAST(4 == sizeof(D)); -inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { #ifdef ARM - return arm_add_and_fetch(dest, add_value); + return add_using_helper(arm_add_and_fetch, add_value, dest); #else #ifdef M68K - return m68k_add_and_fetch(dest, add_value); + return add_using_helper(m68k_add_and_fetch, add_value, dest); #else return __sync_add_and_fetch(dest, add_value); #endif // M68K #endif // ARM } -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void *) add_ptr(add_value, (volatile intptr_t *) dest); +template<> +template +inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const { + STATIC_CAST(8 == sizeof(I)); + STATIC_CAST(8 == sizeof(D)); + + return __sync_add_and_fetch(dest, add_value); } inline void Atomic::inc(volatile jint* dest) { --- old/src/os_cpu/linux_aarch64/vm/atomic_linux_aarch64.hpp 2017-08-20 02:08:24.090981714 -0400 +++ new/src/os_cpu/linux_aarch64/vm/atomic_linux_aarch64.hpp 2017-08-20 02:08:23.974975764 -0400 @@ -47,10 +47,15 @@ inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; } -inline jint Atomic::add(jint add_value, volatile jint* dest) +template +struct Atomic::PlatformAdd + : Atomic::AddAndFetch > { - return __sync_add_and_fetch(dest, add_value); -} + template + D add_and_fetch(I add_value, D volatile* dest) const { + return __sync_add_and_fetch(dest, add_value); + } +}; inline void Atomic::inc(volatile jint* dest) { @@ -105,16 +110,6 @@ inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } -inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) -{ - return __sync_add_and_fetch(dest, add_value); -} - -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) -{ - return (void *) add_ptr(add_value, (volatile intptr_t *) dest); -} - inline void Atomic::inc_ptr(volatile intptr_t* dest) { add_ptr(1, dest); --- old/src/os_cpu/linux_arm/vm/atomic_linux_arm.hpp 2017-08-20 02:08:24.631009417 -0400 +++ new/src/os_cpu/linux_arm/vm/atomic_linux_arm.hpp 2017-08-20 02:08:24.515003463 -0400 @@ -91,9 +91,21 @@ // // For ARMv7 we add explicit barriers in the stubs. -inline jint Atomic::add(jint add_value, volatile jint* dest) { +template +struct Atomic::PlatformAdd + : Atomic::AddAndFetch > +{ + template + D add_and_fetch(I add_value, D volatile* dest) const; +}; + +template<> +template +inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const { + STATIC_ASSERT(4 == sizeof(I)); + STATIC_ASSERT(4 == sizeof(D)); #ifdef AARCH64 - jint val; + D val; int tmp; __asm__ volatile( "1:\n\t" @@ -106,7 +118,7 @@ : "memory"); return val; #else - return (*os::atomic_add_func)(add_value, dest); + return add_using_helper(os::atomic_add_func, add_value, dest); #endif } @@ -118,9 +130,13 @@ Atomic::add(-1, (volatile jint *)dest); } -inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { #ifdef AARCH64 - intptr_t val; +template<> +template +inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const { + STATIC_ASSERT(8 == sizeof(I)); + STATIC_ASSERT(8 == sizeof(D)); + D val; int tmp; __asm__ volatile( "1:\n\t" @@ -132,14 +148,8 @@ : [add_val] "r" (add_value), [dest] "r" (dest) : "memory"); return val; -#else - return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest); -#endif -} - -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void*)add_ptr(add_value, (volatile intptr_t*)dest); } +#endif // AARCH64 inline void Atomic::inc_ptr(volatile intptr_t* dest) { Atomic::add_ptr(1, dest); --- old/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.hpp 2017-08-20 02:08:25.175037313 -0400 +++ new/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.hpp 2017-08-20 02:08:25.059031369 -0400 @@ -93,9 +93,21 @@ #define strasm_nobarrier "" #define strasm_nobarrier_clobber_memory "" -inline jint Atomic::add (jint add_value, volatile jint* dest) { +template +struct Atomic::PlatformAdd + : Atomic::AddAndFetch > +{ + template + D add_and_fetch(I add_value, D volatile* dest) const; +}; + +template<> +template +inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const { + STATIC_CAST(4 == sizeof(I)); + STATIC_CAST(4 == sizeof(D)); - unsigned int result; + D result; __asm__ __volatile__ ( strasm_lwsync @@ -108,13 +120,17 @@ : /*%1*/"r" (add_value), /*%2*/"r" (dest) : "cc", "memory" ); - return (jint) result; + return result; } -inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { +template<> +template +inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const { + STATIC_CAST(8 == sizeof(I)); + STATIC_CAST(8 == sizeof(D)); - long result; + D result; __asm__ __volatile__ ( strasm_lwsync @@ -127,11 +143,7 @@ : /*%1*/"r" (add_value), /*%2*/"r" (dest) : "cc", "memory" ); - return (intptr_t) result; -} - -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void*)add_ptr(add_value, (volatile intptr_t*)dest); + return result; } --- old/src/os_cpu/linux_s390/vm/atomic_linux_s390.hpp 2017-08-20 02:08:25.715065011 -0400 +++ new/src/os_cpu/linux_s390/vm/atomic_linux_s390.hpp 2017-08-20 02:08:25.599059067 -0400 @@ -82,8 +82,21 @@ // The return value of the method is the value that was successfully stored. At the // time the caller receives back control, the value in memory may have changed already. -inline jint Atomic::add(jint inc, volatile jint*dest) { - unsigned int old, upd; +template +struct Atomic::PlatformAdd + : Atomic::AddAndFetch > +{ + template + D add_and_fetch(I add_value, D volatile* dest) const; +}; + +template<> +template +inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const { + STATIC_CAST(4 == sizeof(I)); + STATIC_CAST(4 == sizeof(D)); + + D old, upd; if (VM_Version::has_LoadAndALUAtomicV1()) { __asm__ __volatile__ ( @@ -124,12 +137,17 @@ ); } - return (jint)upd; + return upd; } -inline intptr_t Atomic::add_ptr(intptr_t inc, volatile intptr_t* dest) { - unsigned long old, upd; +template<> +template +inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const { + STATIC_CAST(8 == sizeof(I)); + STATIC_CAST(8 == sizeof(D)); + + D old, upd; if (VM_Version::has_LoadAndALUAtomicV1()) { __asm__ __volatile__ ( @@ -170,11 +188,7 @@ ); } - return (intptr_t)upd; -} - -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void*)add_ptr(add_value, (volatile intptr_t*)dest); + return upd; } --- old/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.hpp 2017-08-20 02:08:26.263093118 -0400 +++ new/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.hpp 2017-08-20 02:08:26.143086963 -0400 @@ -51,8 +51,21 @@ inline jlong Atomic::load(const volatile jlong* src) { return *src; } -inline jint Atomic::add (jint add_value, volatile jint* dest) { - intptr_t rv; +template +struct Atomic::PlatformAdd + : Atomic::AddAndFetch > +{ + template + D add_and_fetch(I add_value, D volatile* dest) const; +}; + +template<> +template +inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const { + STATIC_CAST(4 == sizeof(I)); + STATIC_CAST(4 == sizeof(D)); + + D rv; __asm__ volatile( "1: \n\t" " ld [%2], %%o2\n\t" @@ -68,8 +81,12 @@ return rv; } -inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { - intptr_t rv; +template +inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const { + STATIC_CAST(8 == sizeof(I)); + STATIC_CAST(8 == sizeof(D)); + + D rv; __asm__ volatile( "1: \n\t" " ldx [%2], %%o2\n\t" @@ -85,10 +102,6 @@ return rv; } -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void*)add_ptr((intptr_t)add_value, (volatile intptr_t*)dest); -} - inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { intptr_t rv = exchange_value; --- old/src/os_cpu/linux_x86/vm/atomic_linux_x86.hpp 2017-08-20 02:08:26.795120405 -0400 +++ new/src/os_cpu/linux_x86/vm/atomic_linux_x86.hpp 2017-08-20 02:08:26.679114455 -0400 @@ -40,13 +40,25 @@ inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; } -inline jint Atomic::add (jint add_value, volatile jint* dest) { - jint addend = add_value; +template +struct Atomic::PlatformAdd + : Atomic::FetchAndAdd > +{ + template + D fetch_and_add(I add_value, D volatile* dest) const; +}; + +template<> +template +inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest) const { + STATIC_ASSERT(4 == sizeof(I)); + STATIC_ASSERT(4 == sizeof(D)); + D old_value; __asm__ volatile ( "lock xaddl %0,(%2)" - : "=r" (addend) - : "0" (addend), "r" (dest) + : "=r" (old_value) + : "0" (add_value), "r" (dest) : "cc", "memory"); - return addend + add_value; + return old_value; } inline void Atomic::inc (volatile jint* dest) { @@ -111,17 +123,17 @@ inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } -inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { - intptr_t addend = add_value; +template<> +template +inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) const { + STATIC_ASSERT(8 == sizeof(I)); + STATIC_ASSERT(8 == sizeof(D)); + D old_value; __asm__ __volatile__ ("lock xaddq %0,(%2)" - : "=r" (addend) - : "0" (addend), "r" (dest) + : "=r" (old_value) + : "0" (add_value), "r" (dest) : "cc", "memory"); - return addend + add_value; -} - -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void*)add_ptr(add_value, (volatile intptr_t*)dest); + return old_value; } inline void Atomic::inc_ptr(volatile intptr_t* dest) { @@ -164,15 +176,6 @@ #else // !AMD64 -inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { - return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest); -} - -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void*)Atomic::add((jint)add_value, (volatile jint*)dest); -} - - inline void Atomic::inc_ptr(volatile intptr_t* dest) { inc((volatile jint*)dest); } --- old/src/os_cpu/linux_zero/vm/atomic_linux_zero.hpp 2017-08-20 02:08:27.327147696 -0400 +++ new/src/os_cpu/linux_zero/vm/atomic_linux_zero.hpp 2017-08-20 02:08:27.211141743 -0400 @@ -74,7 +74,7 @@ } /* Atomically add an int to memory. */ -static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) { +static inline int m68k_add_and_fetch(int add_value, volatile int *ptr) { for (;;) { // Loop until success. @@ -135,7 +135,7 @@ } /* Atomically add an int to memory. */ -static inline int arm_add_and_fetch(volatile int *ptr, int add_value) { +static inline int arm_add_and_fetch(int add_value, volatile int *ptr) { for (;;) { // Loop until a __kernel_cmpxchg succeeds. @@ -167,32 +167,38 @@ *dest = store_value; } -inline jint Atomic::add(jint add_value, volatile jint* dest) { -#ifdef ARM - return arm_add_and_fetch(dest, add_value); -#else -#ifdef M68K - return m68k_add_and_fetch(dest, add_value); -#else - return __sync_add_and_fetch(dest, add_value); -#endif // M68K -#endif // ARM -} +template +struct Atomic::PlatformAdd + : Atomic::AddAndFetch > +{ + template + D add_and_fetch(I add_value, D volatile* dest) const; +}; + +template<> +template +inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const { + STATIC_CAST(4 == sizeof(I)); + STATIC_CAST(4 == sizeof(D)); -inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { #ifdef ARM - return arm_add_and_fetch(dest, add_value); + return add_using_helper(arm_add_and_fetch, add_value, dest); #else #ifdef M68K - return m68k_add_and_fetch(dest, add_value); + return add_using_helper(m68k_add_and_fetch, add_value, dest); #else return __sync_add_and_fetch(dest, add_value); #endif // M68K #endif // ARM } -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void *) add_ptr(add_value, (volatile intptr_t *) dest); +template<> +template +inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const { + STATIC_CAST(8 == sizeof(I)); + STATIC_CAST(8 == sizeof(D)); + + return __sync_add_and_fetch(dest, add_value); } inline void Atomic::inc(volatile jint* dest) { --- old/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.hpp 2017-08-20 02:08:27.863175188 -0400 +++ new/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.hpp 2017-08-20 02:08:27.747169234 -0400 @@ -62,22 +62,21 @@ extern "C" jint _Atomic_swap32(jint exchange_value, volatile jint* dest); extern "C" intptr_t _Atomic_swap64(intptr_t exchange_value, volatile intptr_t* dest); -extern "C" jint _Atomic_add32(jint inc, volatile jint* dest); -extern "C" intptr_t _Atomic_add64(intptr_t add_value, volatile intptr_t* dest); - - -inline jint Atomic::add (jint add_value, volatile jint* dest) { - return _Atomic_add32(add_value, dest); -} - -inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { - return _Atomic_add64(add_value, dest); -} - -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void*)add_ptr((intptr_t)add_value, (volatile intptr_t*)dest); -} - +// Implement ADD using a CAS loop. +template +struct Atomic::PlatformAdd VALUE_OBJ_CLASS_SPEC { + template + inline D operator()(I add_value, D volatile* dest) const { + D old_value = *dest; + while (true) { + D new_value = old_value + add_value; + D result = cmpxchg(new_value, dest, old_value); + if (result == old_value) break; + old_value = result; + } + return old_value + add_value; + } +}; inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { return _Atomic_swap32(exchange_value, dest); --- old/src/os_cpu/solaris_sparc/vm/solaris_sparc.il 2017-08-20 02:08:28.407203091 -0400 +++ new/src/os_cpu/solaris_sparc/vm/solaris_sparc.il 2017-08-20 02:08:28.291197141 -0400 @@ -90,58 +90,6 @@ .nonvolatile .end - // Support for jint Atomic::add(jint add_value, volatile jint* dest). - // - // Arguments: - // add_value: O0 (e.g., +1 or -1) - // dest: O1 - // - // Results: - // O0: the new value stored in dest - // - // Overwrites O3 - - .inline _Atomic_add32, 2 - .volatile - 2: - ld [%o1], %o2 - add %o0, %o2, %o3 - cas [%o1], %o2, %o3 - cmp %o2, %o3 - bne 2b - nop - add %o0, %o2, %o0 - .nonvolatile - .end - - - // Support for intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) - // - // 64-bit - // - // Arguments: - // add_value: O0 (e.g., +1 or -1) - // dest: O1 - // - // Results: - // O0: the new value stored in dest - // - // Overwrites O3 - - .inline _Atomic_add64, 2 - .volatile - 3: - ldx [%o1], %o2 - add %o0, %o2, %o3 - casx [%o1], %o2, %o3 - cmp %o2, %o3 - bne %xcc, 3b - nop - add %o0, %o2, %o0 - .nonvolatile - .end - - // Support for void Prefetch::read(void *loc, intx interval) // // Prefetch for several reads. --- old/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.hpp 2017-08-20 02:08:28.935230169 -0400 +++ new/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.hpp 2017-08-20 02:08:28.819224216 -0400 @@ -51,6 +51,8 @@ extern "C" { jint _Atomic_add(jint add_value, volatile jint* dest); + jlong _Atomic_add_long(jlong add_value, volatile jlong* dest); + jint _Atomic_xchg(jint exchange_value, volatile jint* dest); jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value); @@ -60,8 +62,34 @@ jlong compare_value); } -inline jint Atomic::add (jint add_value, volatile jint* dest) { - return _Atomic_add(add_value, dest); +template +struct Atomic::PlatformAdd + : Atomic::AddAndFetch > +{ + template + D add_and_fetch(I add_value, D volatile* dest) const; +}; + +// Not using add_using_helper; see comment for cmpxchg. +template<> +template +inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const { + STATIC_ASSERT(4 == sizeof(I)); + STATIC_ASSERT(4 == sizeof(D)); + return PrimitiveConversions::cast( + _Atomic_add(PrimitiveConversions::cast(add_value), + reinterpret_cast(dest))); +} + +// Not using add_using_helper; see comment for cmpxchg. +template<> +template +inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const { + STATIC_ASSERT(8 == sizeof(I)); + STATIC_ASSERT(8 == sizeof(D)); + return PrimitiveConversions::cast( + _Atomic_add_long(PrimitiveConversions::cast(add_value), + reinterpret_cast(dest))); } inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { @@ -115,17 +143,8 @@ inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } -extern "C" jlong _Atomic_add_long(jlong add_value, volatile jlong* dest); extern "C" jlong _Atomic_xchg_long(jlong exchange_value, volatile jlong* dest); -inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { - return (intptr_t)_Atomic_add_long((jlong)add_value, (volatile jlong*)dest); -} - -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void*)_Atomic_add_long((jlong)add_value, (volatile jlong*)dest); -} - inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { return (intptr_t)_Atomic_xchg_long((jlong)exchange_value, (volatile jlong*)dest); } --- old/src/os_cpu/windows_x86/vm/atomic_windows_x86.hpp 2017-08-20 02:08:29.467257454 -0400 +++ new/src/os_cpu/windows_x86/vm/atomic_windows_x86.hpp 2017-08-20 02:08:29.351251502 -0400 @@ -57,20 +57,28 @@ inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; } inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; } +template +struct Atomic::PlatformAdd + : Atomic::AddAndFetch > +{ + template + D add_and_fetch(I add_value, D volatile* dest) const; +}; + #ifdef AMD64 inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } -inline jint Atomic::add (jint add_value, volatile jint* dest) { - return (jint)(*os::atomic_add_func)(add_value, dest); -} - -inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { - return (intptr_t)(*os::atomic_add_ptr_func)(add_value, dest); +template<> +template +inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const { + return add_using_helper(os::atomic_add_func, add_value, dest); } -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void*)(*os::atomic_add_ptr_func)(add_value, (volatile intptr_t*)dest); +template<> +template +inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const { + return add_using_helper(os::atomic_add_ptr_func, add_value, dest); } inline void Atomic::inc (volatile jint* dest) { @@ -130,7 +138,11 @@ #else // !AMD64 -inline jint Atomic::add (jint add_value, volatile jint* dest) { +template<> +template +inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const { + STATIC_ASSERT(4 == sizeof(I)); + STATIC_ASSERT(4 == sizeof(D)); __asm { mov edx, dest; mov eax, add_value; @@ -140,14 +152,6 @@ } } -inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { - return (intptr_t)add((jint)add_value, (volatile jint*)dest); -} - -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void*)add((jint)add_value, (volatile jint*)dest); -} - inline void Atomic::inc (volatile jint* dest) { // alternative for InterlockedIncrement __asm { --- old/src/share/vm/gc/g1/g1CardLiveData.cpp 2017-08-20 02:08:30.007285150 -0400 +++ new/src/share/vm/gc/g1/g1CardLiveData.cpp 2017-08-20 02:08:29.887278996 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -409,7 +409,7 @@ virtual void work(uint worker_id) { while (true) { - size_t to_process = Atomic::add(1, &_cur_chunk) - 1; + size_t to_process = Atomic::add(1u, &_cur_chunk) - 1; if (to_process >= _num_chunks) { break; } --- old/src/share/vm/gc/g1/g1ConcurrentMark.cpp 2017-08-20 02:08:30.551313057 -0400 +++ new/src/share/vm/gc/g1/g1ConcurrentMark.cpp 2017-08-20 02:08:30.431306902 -0400 @@ -200,7 +200,7 @@ return NULL; } - size_t cur_idx = Atomic::add(1, &_hwm) - 1; + size_t cur_idx = Atomic::add(1u, &_hwm) - 1; if (cur_idx >= _chunk_capacity) { return NULL; } --- old/src/share/vm/gc/g1/g1HotCardCache.cpp 2017-08-20 02:08:31.115341978 -0400 +++ new/src/share/vm/gc/g1/g1HotCardCache.cpp 2017-08-20 02:08:30.999336034 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -64,7 +64,7 @@ return card_ptr; } // Otherwise, the card is hot. - size_t index = Atomic::add(1, &_hot_cache_idx) - 1; + size_t index = Atomic::add(1u, &_hot_cache_idx) - 1; size_t masked_index = index & (_hot_cache_size - 1); jbyte* current_ptr = _hot_cache[masked_index]; --- old/src/share/vm/gc/g1/g1HotCardCache.hpp 2017-08-20 02:08:31.643369067 -0400 +++ new/src/share/vm/gc/g1/g1HotCardCache.hpp 2017-08-20 02:08:31.527363112 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -67,7 +67,7 @@ size_t _hot_cache_size; - int _hot_cache_par_chunk_size; + size_t _hot_cache_par_chunk_size; // Avoids false sharing when concurrently updating _hot_cache_idx or // _hot_cache_par_claimed_idx. These are never updated at the same time --- old/src/share/vm/gc/g1/g1RemSet.cpp 2017-08-20 02:08:32.175396349 -0400 +++ new/src/share/vm/gc/g1/g1RemSet.cpp 2017-08-20 02:08:32.059390398 -0400 @@ -243,7 +243,7 @@ bool marked_as_dirty = Atomic::cmpxchg(Dirty, &_in_dirty_region_buffer[region], Clean) == Clean; if (marked_as_dirty) { - size_t allocated = Atomic::add(1, &_cur_dirty_region) - 1; + size_t allocated = Atomic::add(1u, &_cur_dirty_region) - 1; _dirty_region_buffer[allocated] = region; } } --- old/src/share/vm/oops/symbol.cpp 2017-08-20 02:08:32.711423845 -0400 +++ new/src/share/vm/oops/symbol.cpp 2017-08-20 02:08:32.591417690 -0400 @@ -219,7 +219,7 @@ void Symbol::decrement_refcount() { if (_refcount >= 0) { // not a permanent symbol - jshort new_value = Atomic::add(-1, &_refcount); + short new_value = Atomic::add(short(-1), &_refcount); #ifdef ASSERT if (new_value == -1) { // we have transitioned from 0 -> -1 print(); --- old/src/share/vm/runtime/atomic.hpp 2017-08-20 02:08:33.243451131 -0400 +++ new/src/share/vm/runtime/atomic.hpp 2017-08-20 02:08:33.127445176 -0400 @@ -26,11 +26,14 @@ #define SHARE_VM_RUNTIME_ATOMIC_HPP #include "memory/allocation.hpp" +#include "metaprogramming/conditional.hpp" #include "metaprogramming/enableIf.hpp" #include "metaprogramming/isIntegral.hpp" +#include "metaprogramming/isPointer.hpp" #include "metaprogramming/isSame.hpp" #include "metaprogramming/primitiveConversions.hpp" #include "metaprogramming/removeCV.hpp" +#include "metaprogramming/removePointer.hpp" #include "utilities/align.hpp" #include "utilities/macros.hpp" @@ -82,11 +85,17 @@ // Atomically add to a location. Returns updated value. add*() provide: // add-value-to-dest - inline static jshort add (jshort add_value, volatile jshort* dest); - inline static jint add (jint add_value, volatile jint* dest); - inline static size_t add (size_t add_value, volatile size_t* dest); - inline static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest); - inline static void* add_ptr(intptr_t add_value, volatile void* dest); + + template + inline static D add(I add_value, D volatile* dest); + + inline static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest) { + return add(add_value, dest); + } + + inline static void* add_ptr(intptr_t add_value, volatile void* dest) { + return add(add_value, reinterpret_cast(dest)); + } // Atomically increment location. inc*() provide: // increment-dest @@ -156,6 +165,74 @@ // that is needed here. template struct IsPointerConvertible; + // Dispatch handler for add. Provides type-based validity checking + // and limited conversions around calls to the platform-specific + // implementation layer provided by PlatformAdd. + template + struct AddImpl; + + // Platform-specific implementation of add. Support for sizes of 4 + // bytes and (if different) pointer size bytes are required. The + // class is a function object that must be default constructable, + // with these requirements: + // + // - dest is of type D*, an integral or pointer type. + // - add_value is of type I, an integral type. + // - sizeof(I) == sizeof(D). + // - if D is an integral type, I == D. + // - platform_add is an object of type PlatformAdd. + // + // Then + // platform_add(add_value, dest) + // must be a valid expression, returning a result convertible to D. + // + // No definition is provided; all platforms must explicitly define + // this class and any needed specializations. + template struct PlatformAdd; + + // Helper base classes for defining PlatformAdd. To use, define + // PlatformAdd or a specialization that derives from one of these, + // and include in the PlatformAdd definition the support function + // (described below) required by the base class. + // + // These classes implement the required function object protocol for + // PlatformAdd, using a support function template provided by the + // derived class. Let add_value (of type I) and dest (of type D) be + // the arguments the object is called with. If D is a pointer type + // P*, then let addend (of type I) be add_value * sizeof(P); + // otherwise, addend is add_value. + // + // FetchAndAdd requires the derived class to provide + // fetch_and_add(addend, dest) + // atomically adding addend to the value of dest, and returning the + // old value. + // + // AddAndFetch requires the derived class to provide + // add_and_fetch(addend, dest) + // atomically adding addend to the value of dest, and returning the + // new value. + // + // When D is a pointer type P*, both fetch_and_add and add_and_fetch + // treat it as if it were a uintptr_t; they do not perform any + // scaling of the addend, as that has already been done by the + // caller. +public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11. + template struct FetchAndAdd; + template struct AddAndFetch; +private: + + // Support for platforms that implement some variants of add using a + // (typically out of line) non-template helper function. The + // generic arguments passed to PlatformAdd need to be translated to + // the appropriate type for the helper function, the helper function + // invoked on the translated arguments, and the result translated + // back. Type is the parameter / return type of the helper + // function. No scaling of add_value is performed when D is a pointer + // type, so this function can be used to implement the support function + // required by AddAndFetch. + template + static D add_using_helper(Fn fn, I add_value, D volatile* dest); + // Dispatch handler for cmpxchg. Provides type-based validity // checking and limited conversions around calls to the // platform-specific implementation layer provided by @@ -219,6 +296,22 @@ static const bool value = (sizeof(yes) == sizeof(test(test_value))); }; +// Define FetchAndAdd and AddAndFetch helper classes before including +// platform file, which may use these as base classes, requiring they +// be complete. + +template +struct Atomic::FetchAndAdd VALUE_OBJ_CLASS_SPEC { + template + D operator()(I add_value, D volatile* dest) const; +}; + +template +struct Atomic::AddAndFetch VALUE_OBJ_CLASS_SPEC { + template + D operator()(I add_value, D volatile* dest) const; +}; + // Define the class before including platform file, which may specialize // the operator definition. No generic definition of specializations // of the operator template are provided, nor are there any generic @@ -255,8 +348,93 @@ #error size_t is not WORD_SIZE, interesting platform, but missing implementation here #endif -inline size_t Atomic::add(size_t add_value, volatile size_t* dest) { - return (size_t) add_ptr((intptr_t) add_value, (volatile intptr_t*) dest); +template +inline D Atomic::add(I add_value, D volatile* dest) { + return AddImpl()(add_value, dest); +} + +template +struct Atomic::AddImpl< + I, D, + typename EnableIf::value && + IsIntegral::value && + (sizeof(I) <= sizeof(D)) && + (IsSigned::value == IsSigned::value)>::type> + VALUE_OBJ_CLASS_SPEC +{ + D operator()(I add_value, D volatile* dest) const { + D addend = add_value; + return PlatformAdd()(addend, dest); + } +}; + +template +struct Atomic::AddImpl< + I, P*, + typename EnableIf::value && (sizeof(I) <= sizeof(P*))>::type> + VALUE_OBJ_CLASS_SPEC +{ + P* operator()(I add_value, P* volatile* dest) const { + STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*)); + STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*)); + typedef typename Conditional::value, + intptr_t, + uintptr_t>::type CI; + CI addend = add_value; + return PlatformAdd()(addend, dest); + } +}; + +// Most platforms do not support atomic add on a 2-byte value. However, +// if the value occupies the most significant 16 bits of an aligned 32-bit +// word, then we can do this with an atomic add of (add_value << 16) +// to the 32-bit word. +// +// The least significant parts of this 32-bit word will never be affected, even +// in case of overflow/underflow. +// +// Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment. +template<> +struct Atomic::AddImpl VALUE_OBJ_CLASS_SPEC { + jshort operator()(jshort add_value, jshort volatile* dest) const { +#ifdef VM_LITTLE_ENDIAN + assert((intx(dest) & 0x03) == 0x02, "wrong alignment"); + jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest-1)); +#else + assert((intx(dest) & 0x03) == 0x00, "wrong alignment"); + jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest)); +#endif + return (jshort)(new_value >> 16); // preserves sign + } +}; + +template +template +inline D Atomic::FetchAndAdd::operator()(I add_value, D volatile* dest) const { + I addend = add_value; + // If D is a pointer type P*, scale by sizeof(P). + if (IsPointer::value) { + addend *= sizeof(typename RemovePointer::type); + } + D old = static_cast(this)->fetch_and_add(addend, dest); + return old + add_value; +} + +template +template +inline D Atomic::AddAndFetch::operator()(I add_value, D volatile* dest) const { + // If D is a pointer type P*, scale by sizeof(P). + if (IsPointer::value) { + add_value *= sizeof(typename RemovePointer::type); + } + return static_cast(this)->add_and_fetch(add_value, dest); +} + +template +inline D Atomic::add_using_helper(Fn fn, I add_value, D volatile* dest) { + return PrimitiveConversions::cast( + fn(PrimitiveConversions::cast(add_value), + reinterpret_cast(dest))); } inline void Atomic::inc(volatile size_t* dest) { @@ -413,32 +591,12 @@ return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest); } -inline jshort Atomic::add(jshort add_value, volatile jshort* dest) { - // Most platforms do not support atomic add on a 2-byte value. However, - // if the value occupies the most significant 16 bits of an aligned 32-bit - // word, then we can do this with an atomic add of (add_value << 16) - // to the 32-bit word. - // - // The least significant parts of this 32-bit word will never be affected, even - // in case of overflow/underflow. - // - // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment. -#ifdef VM_LITTLE_ENDIAN - assert((intx(dest) & 0x03) == 0x02, "wrong alignment"); - jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest-1)); -#else - assert((intx(dest) & 0x03) == 0x00, "wrong alignment"); - jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest)); -#endif - return (jshort)(new_value >> 16); // preserves sign -} - inline void Atomic::inc(volatile jshort* dest) { - (void)add(1, dest); + (void)add(jshort(1), dest); } inline void Atomic::dec(volatile jshort* dest) { - (void)add(-1, dest); + (void)add(jshort(-1), dest); } #endif // SHARE_VM_RUNTIME_ATOMIC_HPP --- old/src/share/vm/services/mallocTracker.hpp 2017-08-20 02:08:33.795479444 -0400 +++ new/src/share/vm/services/mallocTracker.hpp 2017-08-20 02:08:33.679473488 -0400 @@ -53,7 +53,7 @@ } inline void allocate(size_t sz) { - Atomic::add(1, &_count); + Atomic::inc(&_count); if (sz > 0) { Atomic::add(sz, &_size); DEBUG_ONLY(_peak_size = MAX2(_peak_size, _size)); @@ -64,7 +64,7 @@ inline void deallocate(size_t sz) { assert(_count > 0, "Nothing allocated yet"); assert(_size >= sz, "deallocation > allocated"); - Atomic::add(-1, &_count); + Atomic::dec(&_count); if (sz > 0) { // unary minus operator applied to unsigned type, result still unsigned #pragma warning(suppress: 4146) @@ -74,7 +74,7 @@ inline void resize(long sz) { if (sz != 0) { - Atomic::add(sz, &_size); + Atomic::add(size_t(sz), &_size); DEBUG_ONLY(_peak_size = MAX2(_size, _peak_size);) } }