--- old/src/cpu/x86/vm/stubGenerator_x86_32.cpp 2017-07-14 18:05:30.682233039 +0200 +++ new/src/cpu/x86/vm/stubGenerator_x86_32.cpp 2017-07-14 18:05:30.526233044 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -433,7 +433,7 @@ //---------------------------------------------------------------------------------------------------- - // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest) + // Support for int32_t Atomic::specialized_xchg(int32_t exchange_value, volatile int32_t* dest) // // xchg exists as far back as 8086, lock needed for MP only // Stack layout immediately after call: --- old/src/cpu/x86/vm/stubGenerator_x86_64.cpp 2017-07-14 18:05:31.510233010 +0200 +++ new/src/cpu/x86/vm/stubGenerator_x86_64.cpp 2017-07-14 18:05:31.366233015 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -547,7 +547,7 @@ return start; } - // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest) + // Support for int32_t Atomic::specialized_xchg(int32_t exchange_value, volatile int32_t* dest) // // Arguments : // c_rarg0: exchange_value @@ -566,7 +566,7 @@ return start; } - // Support for intptr_t atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) + // Support for intptr_t Atomic::specialized_xchg(intptr_t exchange_value, volatile intptr_t* dest) // // Arguments : // c_rarg0: exchange_value @@ -585,8 +585,8 @@ return start; } - // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest, - // jint compare_value) + // Support for int32_t Atomic::specialized_cmpxchg(int32_t exchange_value, volatile int32_t* dest, + // int32_t compare_value) // // Arguments : // c_rarg0: exchange_value @@ -611,8 +611,8 @@ return start; } - // Support for jbyte atomic::atomic_cmpxchg(jbyte exchange_value, volatile jbyte* dest, - // jbyte compare_value) + // Support for int8_t Atomic::specialized_cmpxchg(int8_t exchange_value, volatile int8_t* dest, + // int8_t compare_value) // // Arguments : // c_rarg0: exchange_value @@ -637,9 +637,9 @@ return start; } - // Support for jlong atomic::atomic_cmpxchg(jlong exchange_value, - // volatile jlong* dest, - // jlong compare_value) + // Support for int64_t Atomic::specialized_cmpxchg(int64_t exchange_value, + // volatile int64_t* dest, + // int64_t compare_value) // Arguments : // c_rarg0: exchange_value // c_rarg1: dest @@ -663,7 +663,7 @@ return start; } - // Support for jint atomic::add(jint add_value, volatile jint* dest) + // Support for int32_t Atomic::specialized_add(int32_t add_value, volatile int32_t* dest) // // Arguments : // c_rarg0: add_value @@ -685,7 +685,7 @@ return start; } - // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) + // Support for intptr_t Atomic::specialized_add(intptr_t add_value, volatile intptr_t* dest) // // Arguments : // c_rarg0: add_value --- old/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.hpp 2017-07-14 18:05:32.318232982 +0200 +++ new/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.hpp 2017-07-14 18:05:32.166232987 +0200 @@ -32,22 +32,6 @@ // Implementation of class atomic -inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; } -inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; } -inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; } -inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } -inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; } -inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; } - -inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; } -inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; } -inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; } -inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } -inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; } -inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; } - -inline jlong Atomic::load(const volatile jlong* src) { return *src; } - // // machine barrier instructions: // @@ -93,8 +77,8 @@ #define strasm_nobarrier "" #define strasm_nobarrier_clobber_memory "" -inline jint Atomic::add (jint add_value, volatile jint* dest) { - +template <> +inline int32_t Atomic::specialized_add(int32_t add_value, volatile int32_t* dest) { unsigned int result; __asm__ __volatile__ ( @@ -108,12 +92,12 @@ : /*%1*/"r" (add_value), /*%2*/"r" (dest) : "cc", "memory" ); - return (jint) result; + return (int32_t) result; } -inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { - +template <> +inline int64_t Atomic::specialized_add(int64_t add_value, volatile int64_t* dest) { long result; __asm__ __volatile__ ( @@ -127,16 +111,11 @@ : /*%1*/"r" (add_value), /*%2*/"r" (dest) : "cc", "memory" ); - return (intptr_t) result; + return (int64_t) result; } -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void*)add_ptr(add_value, (volatile intptr_t*)dest); -} - - -inline void Atomic::inc (volatile jint* dest) { - +template <> +inline void Atomic::specialized_inc(volatile int32_t* dest) { unsigned int temp; __asm__ __volatile__ ( @@ -152,8 +131,8 @@ } -inline void Atomic::inc_ptr(volatile intptr_t* dest) { - +template <> +inline void Atomic::specialized_inc(volatile int64_t* dest) { long temp; __asm__ __volatile__ ( @@ -169,13 +148,8 @@ } -inline void Atomic::inc_ptr(volatile void* dest) { - inc_ptr((volatile intptr_t*)dest); -} - - -inline void Atomic::dec (volatile jint* dest) { - +template <> +inline void Atomic::specialized_dec(volatile int32_t* dest) { unsigned int temp; __asm__ __volatile__ ( @@ -191,8 +165,8 @@ } -inline void Atomic::dec_ptr(volatile intptr_t* dest) { - +template <> +inline void Atomic::specialized_dec(volatile int64_t* dest) { long temp; __asm__ __volatile__ ( @@ -208,12 +182,8 @@ } -inline void Atomic::dec_ptr(volatile void* dest) { - dec_ptr((volatile intptr_t*)dest); -} - -inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) { - +template <> +inline int32_t Atomic::specialized_xchg(int32_t exchange_value, volatile int32_t* dest) { // Note that xchg_ptr doesn't necessarily do an acquire // (see synchronizer.cpp). @@ -245,11 +215,11 @@ "memory" ); - return (jint) old_value; + return (int32_t) old_value; } -inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { - +template <> +inline int64_t Atomic::specialized_xchg(int64_t exchange_value, volatile int64_t* dest) { // Note that xchg_ptr doesn't necessarily do an acquire // (see synchronizer.cpp). @@ -281,11 +251,7 @@ "memory" ); - return (intptr_t) old_value; -} - -inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { - return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest); + return (int64_t) old_value; } inline void cmpxchg_pre_membar(cmpxchg_memory_order order) { @@ -307,8 +273,8 @@ } #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE -inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) { - +template <> +inline int8_t Atomic::specialized_cmpxchg(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value, cmpxchg_memory_order order) { // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not // specified otherwise (see atomic.hpp). @@ -368,11 +334,11 @@ cmpxchg_post_membar(order); - return (jbyte)(unsigned char)old_value; + return (int8_t)(unsigned char)old_value; } -inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) { - +template <> +inline int32_t Atomic::specialized_cmpxchg(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value, cmpxchg_memory_order order) { // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not // specified otherwise (see atomic.hpp). @@ -412,11 +378,11 @@ cmpxchg_post_membar(order); - return (jint) old_value; + return (int32_t) old_value; } -inline jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) { - +template <> +inline int64_t Atomic::specialized_cmpxchg(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) { // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not // specified otherwise (see atomic.hpp). @@ -456,15 +422,7 @@ cmpxchg_post_membar(order); - return (jlong) old_value; -} - -inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) { - return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order); -} - -inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) { - return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order); + return (int64_t) old_value; } #undef strasm_sync --- old/src/os_cpu/bsd_x86/vm/atomic_bsd_x86.hpp 2017-07-14 18:05:33.134232953 +0200 +++ new/src/os_cpu/bsd_x86/vm/atomic_bsd_x86.hpp 2017-07-14 18:05:32.982232959 +0200 @@ -29,21 +29,9 @@ // Implementation of class atomic -inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; } -inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; } -inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; } -inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; } -inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; } - -inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; } -inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; } -inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; } -inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; } -inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; } - - -inline jint Atomic::add (jint add_value, volatile jint* dest) { - jint addend = add_value; +template <> +inline int32_t Atomic::specialized_add(int32_t add_value, volatile int32_t* dest) { + int32_t addend = add_value; __asm__ volatile ( "lock xaddl %0,(%2)" : "=r" (addend) : "0" (addend), "r" (dest) @@ -51,25 +39,20 @@ return addend + add_value; } -inline void Atomic::inc (volatile jint* dest) { +template <> +inline void Atomic::specialized_inc(volatile int32_t* dest) { __asm__ volatile ( "lock addl $1,(%0)" : : "r" (dest) : "cc", "memory"); } -inline void Atomic::inc_ptr(volatile void* dest) { - inc_ptr((volatile intptr_t*)dest); -} - -inline void Atomic::dec (volatile jint* dest) { +template <> +inline void Atomic::specialized_dec(volatile int32_t* dest) { __asm__ volatile ( "lock subl $1,(%0)" : : "r" (dest) : "cc", "memory"); } -inline void Atomic::dec_ptr(volatile void* dest) { - dec_ptr((volatile intptr_t*)dest); -} - -inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { +template <> +inline int32_t Atomic::specialized_xchg(int32_t exchange_value, volatile int32_t* dest) { __asm__ volatile ( "xchgl (%2),%0" : "=r" (exchange_value) : "0" (exchange_value), "r" (dest) @@ -77,21 +60,19 @@ return exchange_value; } -inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { - return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest); -} - #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE -inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) { - __asm__ volatile ( "lock cmpxchgb %1,(%3)" +template <> +inline int8_t Atomic::specialized_cmpxchg(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value, cmpxchg_memory_order order) { + __asm__ volatile ("lock cmpxchgb %1,(%3)" : "=a" (exchange_value) : "q" (exchange_value), "a" (compare_value), "r" (dest) : "cc", "memory"); return exchange_value; } -inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) { - __asm__ volatile ( "lock cmpxchgl %1,(%3)" +template <> +inline int32_t Atomic::specialized_cmpxchg(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value, cmpxchg_memory_order order) { + __asm__ volatile ("lock cmpxchgl %1,(%3)" : "=a" (exchange_value) : "r" (exchange_value), "a" (compare_value), "r" (dest) : "cc", "memory"); @@ -99,37 +80,35 @@ } #ifdef AMD64 -inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } -inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } -inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { - intptr_t addend = add_value; - __asm__ __volatile__ ( "lock xaddq %0,(%2)" +template <> +inline int64_t Atomic::specialized_add(int64_t add_value, volatile int64_t* dest) { + int64_t addend = add_value; + __asm__ __volatile__ ("lock xaddq %0,(%2)" : "=r" (addend) : "0" (addend), "r" (dest) : "cc", "memory"); return addend + add_value; } -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void*)add_ptr(add_value, (volatile intptr_t*)dest); -} - -inline void Atomic::inc_ptr(volatile intptr_t* dest) { - __asm__ __volatile__ ( "lock addq $1,(%0)" +template <> +inline void Atomic::specialized_inc(volatile int64_t* dest) { + __asm__ __volatile__ ("lock addq $1,(%0)" : : "r" (dest) : "cc", "memory"); } -inline void Atomic::dec_ptr(volatile intptr_t* dest) { - __asm__ __volatile__ ( "lock subq $1,(%0)" +template <> +inline void Atomic::specialized_dec(volatile int64_t* dest) { + __asm__ __volatile__ ("lock subq $1,(%0)" : : "r" (dest) : "cc", "memory"); } -inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { +template <> +inline int64_t Atomic::specialized_xchg(int64_t exchange_value, volatile int64_t* dest) { __asm__ __volatile__ ("xchgq (%2),%0" : "=r" (exchange_value) : "0" (exchange_value), "r" (dest) @@ -137,77 +116,38 @@ return exchange_value; } -inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) { - __asm__ __volatile__ ( "lock cmpxchgq %1,(%3)" +template <> +inline int64_t Atomic::specialized_cmpxchg(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) { + __asm__ __volatile__ ("lock cmpxchgq %1,(%3)" : "=a" (exchange_value) : "r" (exchange_value), "a" (compare_value), "r" (dest) : "cc", "memory"); return exchange_value; } -inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) { - return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order); -} - -inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) { - return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order); -} - -inline jlong Atomic::load(const volatile jlong* src) { return *src; } - #else // !AMD64 -inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { - return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest); -} - -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void*)Atomic::add((jint)add_value, (volatile jint*)dest); -} - - -inline void Atomic::inc_ptr(volatile intptr_t* dest) { - inc((volatile jint*)dest); -} - -inline void Atomic::dec_ptr(volatile intptr_t* dest) { - dec((volatile jint*)dest); -} - -inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { - return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest); -} - extern "C" { // defined in bsd_x86.s - jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool); - void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst); -} - -inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) { - return _Atomic_cmpxchg_long(exchange_value, dest, compare_value, os::is_MP()); + int64_t _Atomic_cmpxchg_long(int64_t, volatile int64_t*, int64_t); + void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst); } -inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) { - return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order); +template <> +inline void Atomic::specialized_store(int64_t store_value, volatile int64_t* dest) { + _Atomic_move_long(&store_value, dest); } -inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) { - return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order); -} - -inline jlong Atomic::load(const volatile jlong* src) { +template <> +inline int64_t Atomic::specialized_load(const volatile int64_t* src) { volatile jlong dest; _Atomic_move_long(src, &dest); return dest; } -inline void Atomic::store(jlong store_value, jlong* dest) { - _Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest); -} - -inline void Atomic::store(jlong store_value, volatile jlong* dest) { - _Atomic_move_long((volatile jlong*)&store_value, dest); +template <> +inline int64_t Atomic::specialized_cmpxchg(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) { + return _Atomic_cmpxchg_long(exchange_value, dest, compare_value); } #endif // AMD64 --- old/src/os_cpu/bsd_zero/vm/atomic_bsd_zero.hpp 2017-07-14 18:05:33.858232928 +0200 +++ new/src/os_cpu/bsd_zero/vm/atomic_bsd_zero.hpp 2017-07-14 18:05:33.714232933 +0200 @@ -159,21 +159,18 @@ } #endif // ARM -inline void Atomic::store(jint store_value, volatile jint* dest) { -#if !defined(ARM) && !defined(M68K) - __sync_synchronize(); -#endif - *dest = store_value; -} +#ifdef _LP64 -inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { +template<> +inline void Atomic::specialized_store(int64_t store_value, volatile int64_t* dest) { #if !defined(ARM) && !defined(M68K) __sync_synchronize(); #endif *dest = store_value; } -inline jint Atomic::add(jint add_value, volatile jint* dest) { +template <> +inline int64_t Atomic::specialized_add(int64_t add_value, volatile int64_t* dest) { #ifdef ARM return arm_add_and_fetch(dest, add_value); #else @@ -185,47 +182,61 @@ #endif // ARM } -inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { +template <> +inline int64_t Atomic::specialized_xchg(int64_t exchange_value, volatile int64_t* dest) { #ifdef ARM - return arm_add_and_fetch(dest, add_value); + return arm_lock_test_and_set(dest, exchange_value); #else #ifdef M68K - return m68k_add_and_fetch(dest, add_value); + return m68k_lock_test_and_set(dest, exchange_value); #else - return __sync_add_and_fetch(dest, add_value); + intptr_t result = __sync_lock_test_and_set (dest, exchange_value); + __sync_synchronize(); + return result; #endif // M68K #endif // ARM } -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void *) add_ptr(add_value, (volatile intptr_t *) dest); -} +#else // _LP64 -inline void Atomic::inc(volatile jint* dest) { - add(1, dest); +template<> +inline int64_t Atomic::specialized_load(const volatile int64_t* src) { + volatile int64_t dest; + os::atomic_copy64(src, &dest); + return dest; } -inline void Atomic::inc_ptr(volatile intptr_t* dest) { - add_ptr(1, dest); +template<> +inline void Atomic::specialized_store(int64_t store_value, volatile int64_t* dest) { + os::atomic_copy64((volatile int64_t*)&store_value, dest); } -inline void Atomic::inc_ptr(volatile void* dest) { - add_ptr(1, dest); -} +#endif // _LP64 -inline void Atomic::dec(volatile jint* dest) { - add(-1, dest); +template<> +inline void Atomic::specialized_store(int32_t store_value, volatile int32_t* dest) { +#if !defined(ARM) && !defined(M68K) + __sync_synchronize(); +#endif + *dest = store_value; } -inline void Atomic::dec_ptr(volatile intptr_t* dest) { - add_ptr(-1, dest); +template <> +inline int32_t Atomic::specialized_add(int32_t add_value, volatile int32_t* dest) { +#ifdef ARM + return arm_add_and_fetch(dest, add_value); +#else +#ifdef M68K + return m68k_add_and_fetch(dest, add_value); +#else + return __sync_add_and_fetch(dest, add_value); +#endif // M68K +#endif // ARM } -inline void Atomic::dec_ptr(volatile void* dest) { - add_ptr(-1, dest); -} -inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) { +template <> +inline int32_t Atomic::specialized_xchg(int32_t exchange_value, volatile int32_t* dest) { #ifdef ARM return arm_lock_test_and_set(dest, exchange_value); #else @@ -236,7 +247,7 @@ // operation. Note that some platforms only support this with the // limitation that the only valid value to store is the immediate // constant 1. There is a test for this in JNI_CreateJavaVM(). - jint result = __sync_lock_test_and_set (dest, exchange_value); + int32_t result = __sync_lock_test_and_set (dest, exchange_value); // All atomic operations are expected to be full memory barriers // (see atomic.hpp). However, __sync_lock_test_and_set is not // a full memory barrier, but an acquire barrier. Hence, this added @@ -247,30 +258,9 @@ #endif // ARM } -inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, - volatile intptr_t* dest) { -#ifdef ARM - return arm_lock_test_and_set(dest, exchange_value); -#else -#ifdef M68K - return m68k_lock_test_and_set(dest, exchange_value); -#else - intptr_t result = __sync_lock_test_and_set (dest, exchange_value); - __sync_synchronize(); - return result; -#endif // M68K -#endif // ARM -} -inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { - return (void *) xchg_ptr((intptr_t) exchange_value, - (volatile intptr_t*) dest); -} - -inline jint Atomic::cmpxchg(jint exchange_value, - volatile jint* dest, - jint compare_value, - cmpxchg_memory_order order) { +template <> +inline int32_t Atomic::specialized_cmpxchg(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value, cmpxchg_memory_order order) { #ifdef ARM return arm_compare_and_swap(dest, compare_value, exchange_value); #else @@ -282,52 +272,9 @@ #endif // ARM } -inline jlong Atomic::cmpxchg(jlong exchange_value, - volatile jlong* dest, - jlong compare_value, - cmpxchg_memory_order order) { - +template <> +inline int64_t Atomic::specialized_cmpxchg(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) { return __sync_val_compare_and_swap(dest, compare_value, exchange_value); } -inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, - volatile intptr_t* dest, - intptr_t compare_value, - cmpxchg_memory_order order) { -#ifdef ARM - return arm_compare_and_swap(dest, compare_value, exchange_value); -#else -#ifdef M68K - return m68k_compare_and_swap(dest, compare_value, exchange_value); -#else - return __sync_val_compare_and_swap(dest, compare_value, exchange_value); -#endif // M68K -#endif // ARM -} - -inline void* Atomic::cmpxchg_ptr(void* exchange_value, - volatile void* dest, - void* compare_value, - cmpxchg_memory_order order) { - - return (void *) cmpxchg_ptr((intptr_t) exchange_value, - (volatile intptr_t*) dest, - (intptr_t) compare_value, - order); -} - -inline jlong Atomic::load(const volatile jlong* src) { - volatile jlong dest; - os::atomic_copy64(src, &dest); - return dest; -} - -inline void Atomic::store(jlong store_value, jlong* dest) { - os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest); -} - -inline void Atomic::store(jlong store_value, volatile jlong* dest) { - os::atomic_copy64((volatile jlong*)&store_value, dest); -} - #endif // OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP --- old/src/os_cpu/linux_aarch64/vm/atomic_linux_aarch64.hpp 2017-07-14 18:05:34.666232900 +0200 +++ new/src/os_cpu/linux_aarch64/vm/atomic_linux_aarch64.hpp 2017-07-14 18:05:34.514232905 +0200 @@ -34,55 +34,31 @@ #define READ_MEM_BARRIER __atomic_thread_fence(__ATOMIC_ACQUIRE); #define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE); -inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; } -inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; } -inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; } -inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; } -inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; } - -inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; } -inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; } -inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; } -inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; } -inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; } - -inline jint Atomic::add(jint add_value, volatile jint* dest) -{ +template <> +inline int32_t Atomic::specialized_add(int32_t add_value, volatile int32_t* dest) { return __sync_add_and_fetch(dest, add_value); } -inline void Atomic::inc(volatile jint* dest) -{ - add(1, dest); -} -inline void Atomic::inc_ptr(volatile void* dest) -{ - add_ptr(1, dest); -} - -inline void Atomic::dec (volatile jint* dest) -{ - add(-1, dest); +template <> +inline int64_t Atomic::specialized_add(int64_t add_value, volatile int64_t* dest) { + return __sync_add_and_fetch(dest, add_value); } -inline void Atomic::dec_ptr(volatile void* dest) -{ - add_ptr(-1, dest); -} -inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) -{ - jint res = __sync_lock_test_and_set (dest, exchange_value); +template <> +inline int32_t Atomic::specialized_xchg(int32_t exchange_value, volatile int32_t* dest) { + int32_t res = __sync_lock_test_and_set (dest, exchange_value); FULL_MEM_BARRIER; return res; } -inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) -{ - return (void *) xchg_ptr((intptr_t) exchange_value, - (volatile intptr_t*) dest); +template <> +inline int64_t Atomic::specialized_xchg(int64_t exchange_value, volatile int64_t* dest) { + int64_t res = __sync_lock_test_and_set (dest, exchange_value); + FULL_MEM_BARRIER; + return res; } template T generic_cmpxchg(T exchange_value, volatile T* dest, @@ -99,64 +75,21 @@ } #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE -inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) -{ +template <> +inline int8_t Atomic::specialized_cmpxchg(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value, cmpxchg_memory_order order) { return generic_cmpxchg(exchange_value, dest, compare_value, order); } -inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) -{ +template <> +inline int32_t Atomic::specialized_cmpxchg(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value, cmpxchg_memory_order order) { return generic_cmpxchg(exchange_value, dest, compare_value, order); } -inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } -inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } -inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) -{ - return __sync_add_and_fetch(dest, add_value); -} - -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) -{ - return (void *) add_ptr(add_value, (volatile intptr_t *) dest); -} - -inline void Atomic::inc_ptr(volatile intptr_t* dest) -{ - add_ptr(1, dest); -} - -inline void Atomic::dec_ptr(volatile intptr_t* dest) -{ - add_ptr(-1, dest); -} - -inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) -{ - intptr_t res = __sync_lock_test_and_set (dest, exchange_value); - FULL_MEM_BARRIER; - return res; -} - -inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) -{ - return generic_cmpxchg(exchange_value, dest, compare_value, order); -} - -inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) -{ +template <> +inline int64_t Atomic::specialized_cmpxchg(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) { return generic_cmpxchg(exchange_value, dest, compare_value, order); } -inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) -{ - return (void *) cmpxchg_ptr((intptr_t) exchange_value, - (volatile intptr_t*) dest, - (intptr_t) compare_value, - order); -} - -inline jlong Atomic::load(const volatile jlong* src) { return *src; } #endif // OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_HPP --- old/src/os_cpu/linux_arm/vm/atomic_linux_arm.hpp 2017-07-14 18:05:35.438232873 +0200 +++ new/src/os_cpu/linux_arm/vm/atomic_linux_arm.hpp 2017-07-14 18:05:35.290232878 +0200 @@ -44,40 +44,26 @@ * kernel source or kernel_user_helpers.txt in Linux Doc. */ -inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; } -inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; } -inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; } -inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; } -inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; } - -inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; } -inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; } -inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; } -inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; } -inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; } - -inline jlong Atomic::load (const volatile jlong* src) { - assert(((intx)src & (sizeof(jlong)-1)) == 0, "Atomic load jlong mis-aligned"); +template <> +inline void Atomic::specialized_store(int64_t value, volatile int64_t* dest) { + assert(((intx)dest & (sizeof(int64_t)-1)) == 0, "Atomic 64 bit store mis-aligned"); #ifdef AARCH64 - return *src; + *dest = value; #else - return (*os::atomic_load_long_func)(src); + (*os::atomic_store_long_func)(value, dest); #endif } -inline void Atomic::store (jlong value, volatile jlong* dest) { - assert(((intx)dest & (sizeof(jlong)-1)) == 0, "Atomic store jlong mis-aligned"); +template <> +inline int64_t Atomic::specialized_load(const volatile int64_t* src) { + assert(((intx)src & (sizeof(int64_t)-1)) == 0, "Atomic 64 bit load mis-aligned"); #ifdef AARCH64 - *dest = value; + return *src; #else - (*os::atomic_store_long_func)(value, dest); + return (*os::atomic_load_long_func)(src); #endif } -inline void Atomic::store (jlong value, jlong* dest) { - store(value, (volatile jlong*)dest); -} - // As per atomic.hpp all read-modify-write operations have to provide two-way // barriers semantics. For AARCH64 we are using load-acquire-with-reservation and // store-release-with-reservation. While load-acquire combined with store-release @@ -91,9 +77,10 @@ // // For ARMv7 we add explicit barriers in the stubs. -inline jint Atomic::add(jint add_value, volatile jint* dest) { +template <> +inline int32_t Atomic::specialized_add(int32_t add_value, volatile int32_t* dest) { #ifdef AARCH64 - jint val; + int32_t val; int tmp; __asm__ volatile( "1:\n\t" @@ -110,57 +97,10 @@ #endif } -inline void Atomic::inc(volatile jint* dest) { - Atomic::add(1, (volatile jint *)dest); -} - -inline void Atomic::dec(volatile jint* dest) { - Atomic::add(-1, (volatile jint *)dest); -} - -inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { -#ifdef AARCH64 - intptr_t val; - int tmp; - __asm__ volatile( - "1:\n\t" - " ldaxr %[val], [%[dest]]\n\t" - " add %[val], %[val], %[add_val]\n\t" - " stlxr %w[tmp], %[val], [%[dest]]\n\t" - " cbnz %w[tmp], 1b\n\t" - : [val] "=&r" (val), [tmp] "=&r" (tmp) - : [add_val] "r" (add_value), [dest] "r" (dest) - : "memory"); - return val; -#else - return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest); -#endif -} - -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void*)add_ptr(add_value, (volatile intptr_t*)dest); -} - -inline void Atomic::inc_ptr(volatile intptr_t* dest) { - Atomic::add_ptr(1, dest); -} - -inline void Atomic::dec_ptr(volatile intptr_t* dest) { - Atomic::add_ptr(-1, dest); -} - -inline void Atomic::inc_ptr(volatile void* dest) { - inc_ptr((volatile intptr_t*)dest); -} - -inline void Atomic::dec_ptr(volatile void* dest) { - dec_ptr((volatile intptr_t*)dest); -} - - -inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) { +template <> +inline int32_t Atomic::specialized_xchg(int32_t exchange_value, volatile int32_t* dest) { #ifdef AARCH64 - jint old_val; + int32_t old_val; int tmp; __asm__ volatile( "1:\n\t" @@ -176,33 +116,12 @@ #endif } -inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { -#ifdef AARCH64 - intptr_t old_val; - int tmp; - __asm__ volatile( - "1:\n\t" - " ldaxr %[old_val], [%[dest]]\n\t" - " stlxr %w[tmp], %[new_val], [%[dest]]\n\t" - " cbnz %w[tmp], 1b\n\t" - : [old_val] "=&r" (old_val), [tmp] "=&r" (tmp) - : [new_val] "r" (exchange_value), [dest] "r" (dest) - : "memory"); - return old_val; -#else - return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest); -#endif -} - -inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { - return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest); -} - // The memory_order parameter is ignored - we always provide the strongest/most-conservative ordering -inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) { +template <> +inline int32_t Atomic::specialized_cmpxchg(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value, cmpxchg_memory_order order) { #ifdef AARCH64 - jint rv; + int32_t rv; int tmp; __asm__ volatile( "1:\n\t" @@ -225,7 +144,8 @@ #endif } -inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) { +template <> +inline int64_t Atomic::specialized_cmpxchg(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) { #ifdef AARCH64 jlong rv; int tmp; @@ -245,21 +165,43 @@ : "memory"); return rv; #else - assert(VM_Version::supports_cx8(), "Atomic compare and exchange jlong not supported on this architecture!"); + assert(VM_Version::supports_cx8(), "64 bit atomic compare and exchange not supported on this architecture!"); return (*os::atomic_cmpxchg_long_func)(compare_value, exchange_value, dest); #endif } -inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) { #ifdef AARCH64 - return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order); -#else - return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order); -#endif +template <> +inline int64_t Atomic::specialized_add(int64_t add_value, volatile int64_t* dest) { + intptr_t val; + int tmp; + __asm__ volatile( + "1:\n\t" + " ldaxr %[val], [%[dest]]\n\t" + " add %[val], %[val], %[add_val]\n\t" + " stlxr %w[tmp], %[val], [%[dest]]\n\t" + " cbnz %w[tmp], 1b\n\t" + : [val] "=&r" (val), [tmp] "=&r" (tmp) + : [add_val] "r" (add_value), [dest] "r" (dest) + : "memory"); + return val; } -inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) { - return (void*)cmpxchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value, order); +template <> +inline int64_t Atomic::specialized_xchg(int64_t exchange_value, volatile int64_t* dest) { + intptr_t old_val; + int tmp; + __asm__ volatile( + "1:\n\t" + " ldaxr %[old_val], [%[dest]]\n\t" + " stlxr %w[tmp], %[new_val], [%[dest]]\n\t" + " cbnz %w[tmp], 1b\n\t" + : [old_val] "=&r" (old_val), [tmp] "=&r" (tmp) + : [new_val] "r" (exchange_value), [dest] "r" (dest) + : "memory"); + return old_val; } +#endif + #endif // OS_CPU_LINUX_ARM_VM_ATOMIC_LINUX_ARM_HPP --- old/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.hpp 2017-07-14 18:05:36.102232850 +0200 +++ new/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.hpp 2017-07-14 18:05:35.950232855 +0200 @@ -32,22 +32,6 @@ // Implementation of class atomic -inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; } -inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; } -inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; } -inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } -inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; } -inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; } - -inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; } -inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; } -inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; } -inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } -inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; } -inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; } - -inline jlong Atomic::load(const volatile jlong* src) { return *src; } - // // machine barrier instructions: // @@ -93,8 +77,8 @@ #define strasm_nobarrier "" #define strasm_nobarrier_clobber_memory "" -inline jint Atomic::add (jint add_value, volatile jint* dest) { - +template <> +inline int32_t Atomic::specialized_add(int32_t add_value, volatile int32_t* dest) { unsigned int result; __asm__ __volatile__ ( @@ -108,12 +92,12 @@ : /*%1*/"r" (add_value), /*%2*/"r" (dest) : "cc", "memory" ); - return (jint) result; + return (int32_t) result; } -inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { - +template <> +inline int64_t Atomic::specialized_add(int64_t add_value, volatile int64_t* dest) { long result; __asm__ __volatile__ ( @@ -127,16 +111,12 @@ : /*%1*/"r" (add_value), /*%2*/"r" (dest) : "cc", "memory" ); - return (intptr_t) result; -} - -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void*)add_ptr(add_value, (volatile intptr_t*)dest); + return (int64_t) result; } -inline void Atomic::inc (volatile jint* dest) { - +template <> +inline void Atomic::specialized_inc(volatile int32_t* dest) { unsigned int temp; __asm__ __volatile__ ( @@ -152,8 +132,8 @@ } -inline void Atomic::inc_ptr(volatile intptr_t* dest) { - +template <> +inline void Atomic::specialized_inc(volatile int64_t* dest) { long temp; __asm__ __volatile__ ( @@ -169,13 +149,9 @@ } -inline void Atomic::inc_ptr(volatile void* dest) { - inc_ptr((volatile intptr_t*)dest); -} - - -inline void Atomic::dec (volatile jint* dest) { +template <> +inline void Atomic::specialized_dec(volatile int32_t* dest) { unsigned int temp; __asm__ __volatile__ ( @@ -191,8 +167,9 @@ } -inline void Atomic::dec_ptr(volatile intptr_t* dest) { +template <> +inline void Atomic::specialized_dec(volatile int64_t* dest) { long temp; __asm__ __volatile__ ( @@ -208,12 +185,9 @@ } -inline void Atomic::dec_ptr(volatile void* dest) { - dec_ptr((volatile intptr_t*)dest); -} - -inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) { +template <> +inline int32_t Atomic::specialized_xchg(int32_t exchange_value, volatile int32_t* dest) { // Note that xchg_ptr doesn't necessarily do an acquire // (see synchronizer.cpp). @@ -245,11 +219,12 @@ "memory" ); - return (jint) old_value; + return (int32_t) old_value; } -inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { +template <> +inline int64_t Atomic::specialized_xchg(int64_t exchange_value, volatile int64_t* dest) { // Note that xchg_ptr doesn't necessarily do an acquire // (see synchronizer.cpp). @@ -281,12 +256,9 @@ "memory" ); - return (intptr_t) old_value; + return (int64_t) old_value; } -inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { - return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest); -} inline void cmpxchg_pre_membar(cmpxchg_memory_order order) { if (order != memory_order_relaxed) { @@ -307,8 +279,8 @@ } #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE -inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) { - +template <> +inline int8_t Atomic::specialized_cmpxchg(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value, cmpxchg_memory_order order) { // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not // specified otherwise (see atomic.hpp). @@ -368,11 +340,11 @@ cmpxchg_post_membar(order); - return (jbyte)(unsigned char)old_value; + return (int8_t)(unsigned char)old_value; } -inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) { - +template <> +inline int32_t Atomic::specialized_cmpxchg(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value, cmpxchg_memory_order order) { // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not // specified otherwise (see atomic.hpp). @@ -412,11 +384,12 @@ cmpxchg_post_membar(order); - return (jint) old_value; + return (int32_t) old_value; } -inline jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) { +template <> +inline int64_t Atomic::specialized_cmpxchg(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) { // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not // specified otherwise (see atomic.hpp). @@ -456,16 +429,9 @@ cmpxchg_post_membar(order); - return (jlong) old_value; -} - -inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) { - return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order); + return (int64_t) old_value; } -inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) { - return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order); -} #undef strasm_sync #undef strasm_lwsync --- old/src/os_cpu/linux_s390/vm/atomic_linux_s390.hpp 2017-07-14 18:05:36.854232824 +0200 +++ new/src/os_cpu/linux_s390/vm/atomic_linux_s390.hpp 2017-07-14 18:05:36.706232829 +0200 @@ -53,19 +53,6 @@ // is an integer multiple of the data length. Furthermore, all stores are ordered: // a store which occurs conceptually before another store becomes visible to other CPUs // before the other store becomes visible. -inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; } -inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; } -inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; } -inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } -inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; } -inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; } - -inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; } -inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; } -inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; } -inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } -inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; } -inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; } //------------ @@ -82,7 +69,8 @@ // The return value of the method is the value that was successfully stored. At the // time the caller receives back control, the value in memory may have changed already. -inline jint Atomic::add(jint inc, volatile jint*dest) { +template <> +inline int32_t Atomic::specialized_add(int32_t inc, volatile int32_t* dest) { unsigned int old, upd; if (VM_Version::has_LoadAndALUAtomicV1()) { @@ -124,11 +112,12 @@ ); } - return (jint)upd; + return (int32_t)upd; } -inline intptr_t Atomic::add_ptr(intptr_t inc, volatile intptr_t* dest) { +template <> +inline int64_t Atomic::specialized_add(int64_t inc, volatile int64_t* dest) { unsigned long old, upd; if (VM_Version::has_LoadAndALUAtomicV1()) { @@ -170,15 +159,11 @@ ); } - return (intptr_t)upd; + return (int64_t)upd; } -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void*)add_ptr(add_value, (volatile intptr_t*)dest); -} - - //------------ + // Atomic::inc //------------ // These methods force the value in memory to be incremented (augmented by 1). @@ -189,7 +174,8 @@ // The value in memory is updated by using a compare-and-swap instruction. The // instruction is retried as often as required. -inline void Atomic::inc(volatile jint* dest) { +template <> +inline void Atomic::specialized_inc(volatile int32_t* dest) { unsigned int old, upd; if (VM_Version::has_LoadAndALUAtomicV1()) { @@ -234,7 +220,8 @@ } } -inline void Atomic::inc_ptr(volatile intptr_t* dest) { +template <> +inline void Atomic::specialized_inc(volatile int64_t* dest) { unsigned long old, upd; if (VM_Version::has_LoadAndALUAtomicV1()) { @@ -278,9 +265,6 @@ } } -inline void Atomic::inc_ptr(volatile void* dest) { - inc_ptr((volatile intptr_t*)dest); -} //------------ // Atomic::dec @@ -293,7 +277,8 @@ // The value in memory is updated by using a compare-and-swap instruction. The // instruction is retried as often as required. -inline void Atomic::dec(volatile jint* dest) { +template <> +inline void Atomic::specialized_dec(volatile int32_t* dest) { unsigned int old, upd; if (VM_Version::has_LoadAndALUAtomicV1()) { @@ -340,7 +325,8 @@ } } -inline void Atomic::dec_ptr(volatile intptr_t* dest) { +template <> +inline void Atomic::specialized_dec(volatile int64_t* dest) { unsigned long old, upd; if (VM_Version::has_LoadAndALUAtomicV1()) { @@ -387,9 +373,6 @@ } } -inline void Atomic::dec_ptr(volatile void* dest) { - dec_ptr((volatile intptr_t*)dest); -} //------------- // Atomic::xchg @@ -407,7 +390,8 @@ // // The return value is the (unchanged) value from memory as it was when the // replacement succeeded. -inline jint Atomic::xchg (jint xchg_val, volatile jint* dest) { +template <> +inline int32_t Atomic::specialized_xchg(int32_t xchg_val, volatile int32_t* dest) { unsigned int old; __asm__ __volatile__ ( @@ -423,10 +407,11 @@ : "cc" ); - return (jint)old; + return (int32_t)old; } -inline intptr_t Atomic::xchg_ptr(intptr_t xchg_val, volatile intptr_t* dest) { +template <> +inline int64_t Atomic::specialized_xchg(int64_t xchg_val, volatile int64_t* dest) { unsigned long old; __asm__ __volatile__ ( @@ -445,9 +430,6 @@ return (intptr_t)old; } -inline void *Atomic::xchg_ptr(void *exchange_value, volatile void *dest) { - return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest); -} //---------------- // Atomic::cmpxchg @@ -478,7 +460,8 @@ // function is performed before the operand is fetched and again after the // operation is completed." -jint Atomic::cmpxchg(jint xchg_val, volatile jint* dest, jint cmp_val, cmpxchg_memory_order unused) { +template <> +inline int32_t Atomic::specialized_cmpxchg(int32_t xchg_val, volatile int32_t* dest, int32_t cmp_val, cmpxchg_memory_order order) { unsigned long old; __asm__ __volatile__ ( @@ -493,10 +476,11 @@ : "cc" ); - return (jint)old; + return (int32_t)old; } -jlong Atomic::cmpxchg(jlong xchg_val, volatile jlong* dest, jlong cmp_val, cmpxchg_memory_order unused) { +template <> +inline int64_t Atomic::specialized_cmpxchg(int64_t xchg_val, volatile int64_t* dest, int64_t cmp_val, cmpxchg_memory_order order) { unsigned long old; __asm__ __volatile__ ( @@ -511,17 +495,7 @@ : "cc" ); - return (jlong)old; -} - -void* Atomic::cmpxchg_ptr(void *xchg_val, volatile void* dest, void* cmp_val, cmpxchg_memory_order unused) { - return (void*)cmpxchg((jlong)xchg_val, (volatile jlong*)dest, (jlong)cmp_val, unused); + return (int64_t)old; } -intptr_t Atomic::cmpxchg_ptr(intptr_t xchg_val, volatile intptr_t* dest, intptr_t cmp_val, cmpxchg_memory_order unused) { - return (intptr_t)cmpxchg((jlong)xchg_val, (volatile jlong*)dest, (jlong)cmp_val, unused); -} - -inline jlong Atomic::load(const volatile jlong* src) { return *src; } - #endif // OS_CPU_LINUX_S390_VM_ATOMIC_LINUX_S390_INLINE_HPP --- old/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.hpp 2017-07-14 18:05:37.538232800 +0200 +++ new/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.hpp 2017-07-14 18:05:37.422232804 +0200 @@ -27,31 +27,9 @@ // Implementation of class atomic -inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; } -inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; } -inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; } -inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } -inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; } -inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; } - -inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; } -inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; } -inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; } -inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } -inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; } -inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; } - -inline void Atomic::inc (volatile jint* dest) { (void)add (1, dest); } -inline void Atomic::inc_ptr(volatile intptr_t* dest) { (void)add_ptr(1, dest); } -inline void Atomic::inc_ptr(volatile void* dest) { (void)add_ptr(1, dest); } - -inline void Atomic::dec (volatile jint* dest) { (void)add (-1, dest); } -inline void Atomic::dec_ptr(volatile intptr_t* dest) { (void)add_ptr(-1, dest); } -inline void Atomic::dec_ptr(volatile void* dest) { (void)add_ptr(-1, dest); } -inline jlong Atomic::load(const volatile jlong* src) { return *src; } - -inline jint Atomic::add (jint add_value, volatile jint* dest) { +template <> +inline int32_t Atomic::specialized_add(int32_t add_value, volatile int32_t* dest) { intptr_t rv; __asm__ volatile( "1: \n\t" @@ -68,7 +46,9 @@ return rv; } -inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { + +template <> +inline int64_t Atomic::specialized_add(int64_t add_value, volatile int64_t* dest) { intptr_t rv; __asm__ volatile( "1: \n\t" @@ -85,12 +65,9 @@ return rv; } -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void*)add_ptr((intptr_t)add_value, (volatile intptr_t*)dest); -} - -inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { +template <> +inline int32_t Atomic::specialized_xchg(int32_t exchange_value, volatile int32_t* dest) { intptr_t rv = exchange_value; __asm__ volatile( " swap [%2],%1\n\t" @@ -100,7 +77,9 @@ return rv; } -inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { + +template <> +inline int64_t Atomic::specialized_xchg(int64_t exchange_value, volatile int64_t* dest) { intptr_t rv = exchange_value; __asm__ volatile( "1:\n\t" @@ -117,13 +96,10 @@ return rv; } -inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { - return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest); -} - -inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) { - jint rv; +template <> +inline int32_t Atomic::specialized_cmpxchg(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value, cmpxchg_memory_order order) { + int32_t rv; __asm__ volatile( " cas [%2], %3, %0" : "=r" (rv) @@ -132,28 +108,16 @@ return rv; } -inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) { - jlong rv; - __asm__ volatile( - " casx [%2], %3, %0" - : "=r" (rv) - : "0" (exchange_value), "r" (dest), "r" (compare_value) - : "memory"); - return rv; -} -inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) { - intptr_t rv; +template <> +inline int64_t Atomic::specialized_cmpxchg(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) { + int64_t rv; __asm__ volatile( - " casx [%2], %3, %0" + " casx [%2], %3, %0" : "=r" (rv) : "0" (exchange_value), "r" (dest), "r" (compare_value) : "memory"); return rv; } -inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) { - return (void*)cmpxchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value, order); -} - #endif // OS_CPU_LINUX_SPARC_VM_ATOMIC_LINUX_SPARC_INLINE_HPP --- old/src/os_cpu/linux_sparc/vm/os_linux_sparc.hpp 2017-07-14 18:05:38.266232775 +0200 +++ new/src/os_cpu/linux_sparc/vm/os_linux_sparc.hpp 2017-07-14 18:05:38.114232780 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,15 +28,15 @@ // // NOTE: we are back in class os here, not Linux // - static jint (*atomic_xchg_func) (jint, volatile jint*); - static jint (*atomic_cmpxchg_func) (jint, volatile jint*, jint); - static jlong (*atomic_cmpxchg_long_func)(jlong, volatile jlong*, jlong); - static jint (*atomic_add_func) (jint, volatile jint*); + static int32_t (*atomic_xchg_func) (int32_t, volatile int32_t*); + static int32_t (*atomic_cmpxchg_func) (int32_t, volatile int32_t*, int32_t); + static int64_t (*atomic_cmpxchg_long_func)(int64_t, volatile int64_t*, int64_t); + static int32_t (*atomic_add_func) (int32_t, volatile int32_t*); - static jint atomic_xchg_bootstrap (jint, volatile jint*); - static jint atomic_cmpxchg_bootstrap (jint, volatile jint*, jint); - static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong); - static jint atomic_add_bootstrap (jint, volatile jint*); + static int32_t atomic_xchg_bootstrap (int32_t, volatile int32_t*); + static int32_t atomic_cmpxchg_bootstrap (int32_t, volatile int32_t*, int32_t); + static int64_t atomic_cmpxchg_long_bootstrap(int64_t, volatile int64_t*, int64_t); + static int32_t atomic_add_bootstrap (int32_t, volatile int32_t*); static void setup_fpu() {} --- old/src/os_cpu/linux_x86/vm/atomic_linux_x86.hpp 2017-07-14 18:05:39.018232748 +0200 +++ new/src/os_cpu/linux_x86/vm/atomic_linux_x86.hpp 2017-07-14 18:05:38.862232754 +0200 @@ -29,21 +29,9 @@ // Implementation of class atomic -inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; } -inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; } -inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; } -inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; } -inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; } - -inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; } -inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; } -inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; } -inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; } -inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; } - - -inline jint Atomic::add (jint add_value, volatile jint* dest) { - jint addend = add_value; +template <> +inline int32_t Atomic::specialized_add(int32_t add_value, volatile int32_t* dest) { + int32_t addend = add_value; __asm__ volatile ( "lock xaddl %0,(%2)" : "=r" (addend) : "0" (addend), "r" (dest) @@ -51,25 +39,20 @@ return addend + add_value; } -inline void Atomic::inc (volatile jint* dest) { +template <> +inline void Atomic::specialized_inc(volatile int32_t* dest) { __asm__ volatile ( "lock addl $1,(%0)" : : "r" (dest) : "cc", "memory"); } -inline void Atomic::inc_ptr(volatile void* dest) { - inc_ptr((volatile intptr_t*)dest); -} - -inline void Atomic::dec (volatile jint* dest) { +template <> +inline void Atomic::specialized_dec(volatile int32_t* dest) { __asm__ volatile ( "lock subl $1,(%0)" : : "r" (dest) : "cc", "memory"); } -inline void Atomic::dec_ptr(volatile void* dest) { - dec_ptr((volatile intptr_t*)dest); -} - -inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { +template <> +inline int32_t Atomic::specialized_xchg(int32_t exchange_value, volatile int32_t* dest) { __asm__ volatile ( "xchgl (%2),%0" : "=r" (exchange_value) : "0" (exchange_value), "r" (dest) @@ -77,12 +60,9 @@ return exchange_value; } -inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { - return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest); -} - #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE -inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) { +template <> +inline int8_t Atomic::specialized_cmpxchg(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value, cmpxchg_memory_order order) { __asm__ volatile ("lock cmpxchgb %1,(%3)" : "=a" (exchange_value) : "q" (exchange_value), "a" (compare_value), "r" (dest) @@ -90,7 +70,8 @@ return exchange_value; } -inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) { +template <> +inline int32_t Atomic::specialized_cmpxchg(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value, cmpxchg_memory_order order) { __asm__ volatile ("lock cmpxchgl %1,(%3)" : "=a" (exchange_value) : "r" (exchange_value), "a" (compare_value), "r" (dest) @@ -99,11 +80,10 @@ } #ifdef AMD64 -inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } -inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } -inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { - intptr_t addend = add_value; +template <> +inline int64_t Atomic::specialized_add(int64_t add_value, volatile int64_t* dest) { + int64_t addend = add_value; __asm__ __volatile__ ("lock xaddq %0,(%2)" : "=r" (addend) : "0" (addend), "r" (dest) @@ -111,25 +91,24 @@ return addend + add_value; } -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void*)add_ptr(add_value, (volatile intptr_t*)dest); -} - -inline void Atomic::inc_ptr(volatile intptr_t* dest) { +template <> +inline void Atomic::specialized_inc(volatile int64_t* dest) { __asm__ __volatile__ ("lock addq $1,(%0)" : : "r" (dest) : "cc", "memory"); } -inline void Atomic::dec_ptr(volatile intptr_t* dest) { +template <> +inline void Atomic::specialized_dec(volatile int64_t* dest) { __asm__ __volatile__ ("lock subq $1,(%0)" : : "r" (dest) : "cc", "memory"); } -inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { +template <> +inline int64_t Atomic::specialized_xchg(int64_t exchange_value, volatile int64_t* dest) { __asm__ __volatile__ ("xchgq (%2),%0" : "=r" (exchange_value) : "0" (exchange_value), "r" (dest) @@ -137,7 +116,8 @@ return exchange_value; } -inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) { +template <> +inline int64_t Atomic::specialized_cmpxchg(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) { __asm__ __volatile__ ("lock cmpxchgq %1,(%3)" : "=a" (exchange_value) : "r" (exchange_value), "a" (compare_value), "r" (dest) @@ -145,69 +125,29 @@ return exchange_value; } -inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) { - return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order); -} - -inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) { - return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order); -} - -inline jlong Atomic::load(const volatile jlong* src) { return *src; } - #else // !AMD64 -inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { - return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest); -} - -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void*)Atomic::add((jint)add_value, (volatile jint*)dest); -} - - -inline void Atomic::inc_ptr(volatile intptr_t* dest) { - inc((volatile jint*)dest); -} - -inline void Atomic::dec_ptr(volatile intptr_t* dest) { - dec((volatile jint*)dest); -} - -inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { - return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest); -} - extern "C" { // defined in linux_x86.s - jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong); - void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst); + int64_t _Atomic_cmpxchg_long(int64_t, volatile int64_t*, int64_t); + void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst); } -inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) { - return _Atomic_cmpxchg_long(exchange_value, dest, compare_value); -} - -inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) { - return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order); -} - -inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) { - return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order); +template <> +inline void Atomic::specialized_store(int64_t store_value, volatile int64_t* dest) { + _Atomic_move_long(&store_value, dest); } -inline jlong Atomic::load(const volatile jlong* src) { +template <> +inline int64_t Atomic::specialized_load(const volatile int64_t* src) { volatile jlong dest; _Atomic_move_long(src, &dest); return dest; } -inline void Atomic::store(jlong store_value, jlong* dest) { - _Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest); -} - -inline void Atomic::store(jlong store_value, volatile jlong* dest) { - _Atomic_move_long((volatile jlong*)&store_value, dest); +template <> +inline int64_t Atomic::specialized_cmpxchg(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) { + return _Atomic_cmpxchg_long(exchange_value, dest, compare_value); } #endif // AMD64 --- old/src/os_cpu/linux_zero/vm/atomic_linux_zero.hpp 2017-07-14 18:05:39.682232725 +0200 +++ new/src/os_cpu/linux_zero/vm/atomic_linux_zero.hpp 2017-07-14 18:05:39.570232729 +0200 @@ -159,15 +159,10 @@ } #endif // ARM -inline void Atomic::store(jint store_value, volatile jint* dest) { - *dest = store_value; -} - -inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { - *dest = store_value; -} +#ifdef _LP64 -inline jint Atomic::add(jint add_value, volatile jint* dest) { +template <> +inline int64_t Atomic::specialized_add(int64_t add_value, volatile int64_t* dest) { #ifdef ARM return arm_add_and_fetch(dest, add_value); #else @@ -179,47 +174,40 @@ #endif // ARM } -inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { + +template <> +inline int64_t Atomic::specialized_xchg(int64_t exchange_value, volatile int64_t* dest) { #ifdef ARM - return arm_add_and_fetch(dest, add_value); + return arm_lock_test_and_set(dest, exchange_value); #else #ifdef M68K - return m68k_add_and_fetch(dest, add_value); + return m68k_lock_test_and_set(dest, exchange_value); #else - return __sync_add_and_fetch(dest, add_value); + intptr_t result = __sync_lock_test_and_set (dest, exchange_value); + __sync_synchronize(); + return result; #endif // M68K #endif // ARM } -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void *) add_ptr(add_value, (volatile intptr_t *) dest); -} - -inline void Atomic::inc(volatile jint* dest) { - add(1, dest); -} +#endif // _LP64 -inline void Atomic::inc_ptr(volatile intptr_t* dest) { - add_ptr(1, dest); -} - -inline void Atomic::inc_ptr(volatile void* dest) { - add_ptr(1, dest); -} - -inline void Atomic::dec(volatile jint* dest) { - add(-1, dest); -} - -inline void Atomic::dec_ptr(volatile intptr_t* dest) { - add_ptr(-1, dest); +template <> +inline int32_t Atomic::specialized_add(int32_t add_value, volatile int32_t* dest) { +#ifdef ARM + return arm_add_and_fetch(dest, add_value); +#else +#ifdef M68K + return m68k_add_and_fetch(dest, add_value); +#else + return __sync_add_and_fetch(dest, add_value); +#endif // M68K +#endif // ARM } -inline void Atomic::dec_ptr(volatile void* dest) { - add_ptr(-1, dest); -} -inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) { +template <> +inline int32_t Atomic::specialized_xchg(int32_t exchange_value, volatile int32_t* dest) { #ifdef ARM return arm_lock_test_and_set(dest, exchange_value); #else @@ -230,7 +218,7 @@ // operation. Note that some platforms only support this with the // limitation that the only valid value to store is the immediate // constant 1. There is a test for this in JNI_CreateJavaVM(). - jint result = __sync_lock_test_and_set (dest, exchange_value); + int32_t result = __sync_lock_test_and_set (dest, exchange_value); // All atomic operations are expected to be full memory barriers // (see atomic.hpp). However, __sync_lock_test_and_set is not // a full memory barrier, but an acquire barrier. Hence, this added @@ -241,30 +229,9 @@ #endif // ARM } -inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, - volatile intptr_t* dest) { -#ifdef ARM - return arm_lock_test_and_set(dest, exchange_value); -#else -#ifdef M68K - return m68k_lock_test_and_set(dest, exchange_value); -#else - intptr_t result = __sync_lock_test_and_set (dest, exchange_value); - __sync_synchronize(); - return result; -#endif // M68K -#endif // ARM -} -inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { - return (void *) xchg_ptr((intptr_t) exchange_value, - (volatile intptr_t*) dest); -} - -inline jint Atomic::cmpxchg(jint exchange_value, - volatile jint* dest, - jint compare_value, - cmpxchg_memory_order order) { +template <> +inline int32_t Atomic::specialized_cmpxchg(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value, cmpxchg_memory_order order) { #ifdef ARM return arm_compare_and_swap(dest, compare_value, exchange_value); #else @@ -276,52 +243,24 @@ #endif // ARM } -inline jlong Atomic::cmpxchg(jlong exchange_value, - volatile jlong* dest, - jlong compare_value, - cmpxchg_memory_order order) { - - return __sync_val_compare_and_swap(dest, compare_value, exchange_value); -} -inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, - volatile intptr_t* dest, - intptr_t compare_value, - cmpxchg_memory_order order) { -#ifdef ARM - return arm_compare_and_swap(dest, compare_value, exchange_value); -#else -#ifdef M68K - return m68k_compare_and_swap(dest, compare_value, exchange_value); -#else +template <> +inline int64_t Atomic::specialized_cmpxchg(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) { return __sync_val_compare_and_swap(dest, compare_value, exchange_value); -#endif // M68K -#endif // ARM } -inline void* Atomic::cmpxchg_ptr(void* exchange_value, - volatile void* dest, - void* compare_value, - cmpxchg_memory_order order) { - - return (void *) cmpxchg_ptr((intptr_t) exchange_value, - (volatile intptr_t*) dest, - (intptr_t) compare_value, - order); -} -inline jlong Atomic::load(const volatile jlong* src) { - volatile jlong dest; +template<> +inline int64_t Atomic::specialized_load(const volatile int64_t* src) { + volatile int64_t dest; os::atomic_copy64(src, &dest); return dest; } -inline void Atomic::store(jlong store_value, jlong* dest) { - os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest); -} -inline void Atomic::store(jlong store_value, volatile jlong* dest) { - os::atomic_copy64((volatile jlong*)&store_value, dest); +template<> +inline void Atomic::specialized_store(int64_t store_value, volatile int64_t* dest) { + os::atomic_copy64((volatile int64_t*)&store_value, dest); } #endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP --- old/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.hpp 2017-07-14 18:05:40.354232702 +0200 +++ new/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.hpp 2017-07-14 18:05:40.202232707 +0200 @@ -29,90 +29,46 @@ // Implementation of class atomic -inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; } -inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; } -inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; } -inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; } -inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; } - -inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; } -inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; } -inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; } -inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; } -inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; } - -inline void Atomic::inc (volatile jint* dest) { (void)add (1, dest); } -inline void Atomic::inc_ptr(volatile intptr_t* dest) { (void)add_ptr(1, dest); } -inline void Atomic::inc_ptr(volatile void* dest) { (void)add_ptr(1, dest); } - -inline void Atomic::dec (volatile jint* dest) { (void)add (-1, dest); } -inline void Atomic::dec_ptr(volatile intptr_t* dest) { (void)add_ptr(-1, dest); } -inline void Atomic::dec_ptr(volatile void* dest) { (void)add_ptr(-1, dest); } - - -inline void Atomic::store(jlong store_value, jlong* dest) { *dest = store_value; } -inline void Atomic::store(jlong store_value, volatile jlong* dest) { *dest = store_value; } -inline jlong Atomic::load(const volatile jlong* src) { return *src; } - - // This is the interface to the atomic instructions in solaris_sparc.il. -// It's very messy because we need to support v8 and these instructions -// are illegal there. When sparc v8 is dropped, we can drop out lots of -// this code. Also compiler2 does not support v8 so the conditional code -// omits the instruction set check. -extern "C" jint _Atomic_swap32(jint exchange_value, volatile jint* dest); -extern "C" intptr_t _Atomic_swap64(intptr_t exchange_value, volatile intptr_t* dest); +extern "C" int32_t _Atomic_swap32(int32_t exchange_value, volatile int32_t* dest); +extern "C" int64_t _Atomic_swap64(int64_t exchange_value, volatile int64_t* dest); -extern "C" jint _Atomic_cas32(jint exchange_value, volatile jint* dest, jint compare_value); -extern "C" intptr_t _Atomic_cas64(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value); -extern "C" jlong _Atomic_casl (jlong exchange_value, volatile jlong* dest, jlong compare_value); +extern "C" int32_t _Atomic_cas32(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value); +extern "C" int64_t _Atomic_cas64(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value); -extern "C" jint _Atomic_add32(jint inc, volatile jint* dest); -extern "C" intptr_t _Atomic_add64(intptr_t add_value, volatile intptr_t* dest); +extern "C" int32_t _Atomic_add32(int32_t add_value, volatile int32_t* dest); +extern "C" int64_t _Atomic_add64(int64_t add_value, volatile int64_t* dest); -inline jint Atomic::add (jint add_value, volatile jint* dest) { +template <> +inline int32_t Atomic::specialized_add(int32_t add_value, volatile int32_t* dest) { return _Atomic_add32(add_value, dest); } -inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { +template <> +inline int64_t Atomic::specialized_add(int64_t add_value, volatile int64_t* dest) { return _Atomic_add64(add_value, dest); } -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void*)add_ptr((intptr_t)add_value, (volatile intptr_t*)dest); -} - - -inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { +template <> +inline int32_t Atomic::specialized_xchg(int32_t exchange_value, volatile int32_t* dest) { return _Atomic_swap32(exchange_value, dest); } -inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { +template <> +inline int64_t Atomic::specialized_xchg(int64_t exchange_value, volatile int64_t* dest) { return _Atomic_swap64(exchange_value, dest); } -inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { - return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest); -} - - -inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) { +template <> +inline int32_t Atomic::specialized_cmpxchg(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value, cmpxchg_memory_order order) { return _Atomic_cas32(exchange_value, dest, compare_value); } -inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) { - // Return 64 bit value in %o0 - return _Atomic_cas64((intptr_t)exchange_value, (intptr_t *)dest, (intptr_t)compare_value); -} - -inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) { +template <> +inline int64_t Atomic::specialized_cmpxchg(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) { return _Atomic_cas64(exchange_value, dest, compare_value); } -inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) { - return (void*)cmpxchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value, order); -} - #endif // OS_CPU_SOLARIS_SPARC_VM_ATOMIC_SOLARIS_SPARC_HPP --- old/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.hpp 2017-07-14 18:05:41.090232676 +0200 +++ new/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.hpp 2017-07-14 18:05:40.942232681 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,15 +28,15 @@ // // NOTE: we are back in class os here, not Solaris // - static jint (*atomic_xchg_func) (jint, volatile jint*); - static jint (*atomic_cmpxchg_func) (jint, volatile jint*, jint); - static jlong (*atomic_cmpxchg_long_func)(jlong, volatile jlong*, jlong); - static jint (*atomic_add_func) (jint, volatile jint*); + static int32_t (*atomic_xchg_func) (int32_t, volatile int32_t*); + static int32_t (*atomic_cmpxchg_func) (int32_t, volatile int32_t*, int32_t); + static int64_t (*atomic_cmpxchg_long_func)(int64_t, volatile int64_t*, int64_t); + static int32_t (*atomic_add_func) (int32_t, volatile int32_t*); - static jint atomic_xchg_bootstrap (jint, volatile jint*); - static jint atomic_cmpxchg_bootstrap (jint, volatile jint*, jint); - static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong); - static jint atomic_add_bootstrap (jint, volatile jint*); + static int32_t atomic_xchg_bootstrap (int32_t, volatile int32_t*); + static int32_t atomic_cmpxchg_bootstrap (int32_t, volatile int32_t*, int32_t); + static int64_t atomic_cmpxchg_long_bootstrap(int64_t, volatile int64_t*, int64_t); + static int32_t atomic_add_bootstrap (int32_t, volatile int32_t*); static void setup_fpu() {} --- old/src/os_cpu/solaris_sparc/vm/solaris_sparc.il 2017-07-14 18:05:41.834232650 +0200 +++ new/src/os_cpu/solaris_sparc/vm/solaris_sparc.il 2017-07-14 18:05:41.674232656 +0200 @@ -32,7 +32,7 @@ .end - // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest). + // Support for int32_t Atomic::specialized_xchg(int32_t exchange_value, volatile int32_t* dest). // // Arguments: // exchange_value: O0 @@ -48,7 +48,7 @@ .end - // Support for intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t * dest). + // Support for int64_t Atomic::specialized_xchg(int64_t exchange_value, volatile int64_t * dest). // // 64-bit // @@ -73,9 +73,9 @@ .end - // Support for jint Atomic::cmpxchg(jint exchange_value, - // volatile jint* dest, - // jint compare_value) + // Support for int32_t Atomic::specialized_cmpxchg(int32_t exchange_value, + // volatile int32_t* dest, + // int32_t compare_value) // // Arguments: // exchange_value: O0 @@ -92,9 +92,9 @@ .end - // Support for intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, - // volatile intptr_t* dest, - // intptr_t compare_value) + // Support for int64_t Atomic::specialized_cmpxchg(int64_t exchange_value, + // volatile int64_t* dest, + // int64_t compare_value) // // 64-bit // @@ -113,52 +113,7 @@ .end - // Support for jlong Atomic::cmpxchg(jlong exchange_value, - // volatile jlong* dest, - // jlong compare_value) - // - // 32-bit calling conventions - // - // Arguments: - // exchange_value: O1:O0 - // dest: O2 - // compare_value: O4:O3 - // - // Results: - // O1:O0: the value previously stored in dest - - .inline _Atomic_casl, 3 - .volatile - sllx %o0, 32, %o0 - srl %o1, 0, %o1 - or %o0,%o1,%o0 - sllx %o3, 32, %o3 - srl %o4, 0, %o4 - or %o3,%o4,%o3 - casx [%o2], %o3, %o0 - srl %o0, 0, %o1 - srlx %o0, 32, %o0 - .nonvolatile - .end - - // Support for jlong Atomic::load and Atomic::store on v9. - // - // void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst) - // - // Arguments: - // src: O0 - // dest: O1 - // - // Overwrites O2 - - .inline _Atomic_move_long_v9,2 - .volatile - ldx [%o0], %o2 - stx %o2, [%o1] - .nonvolatile - .end - - // Support for jint Atomic::add(jint add_value, volatile jint* dest). + // Support for int32_t Atomic::specialized_add(int32_t add_value, volatile int32_t* dest). // // Arguments: // add_value: O0 (e.g., +1 or -1) @@ -183,7 +138,7 @@ .end - // Support for intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) + // Support for int64_t Atomic::specialized_add(int64_t add_value, volatile int64_t* dest) // // 64-bit // --- old/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.hpp 2017-07-14 18:05:42.614232623 +0200 +++ new/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.hpp 2017-07-14 18:05:42.462232628 +0200 @@ -27,136 +27,75 @@ #include "runtime/os.hpp" -inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; } -inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; } -inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; } - - -inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; } -inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; } - -inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; } -inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; } -inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; } -inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; } -inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; } - -inline void Atomic::inc (volatile jint* dest) { (void)add (1, dest); } -inline void Atomic::inc_ptr(volatile intptr_t* dest) { (void)add_ptr(1, dest); } -inline void Atomic::inc_ptr(volatile void* dest) { (void)add_ptr(1, dest); } - -inline void Atomic::dec (volatile jint* dest) { (void)add (-1, dest); } -inline void Atomic::dec_ptr(volatile intptr_t* dest) { (void)add_ptr(-1, dest); } -inline void Atomic::dec_ptr(volatile void* dest) { (void)add_ptr(-1, dest); } - // For Sun Studio - implementation is in solaris_x86_[32/64].il. -// For gcc - implementation is just below. extern "C" { - jint _Atomic_add(jint add_value, volatile jint* dest); - jint _Atomic_xchg(jint exchange_value, volatile jint* dest); - jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest, - jbyte compare_value); - jint _Atomic_cmpxchg(jint exchange_value, volatile jint* dest, - jint compare_value); - jlong _Atomic_cmpxchg_long(jlong exchange_value, volatile jlong* dest, - jlong compare_value); + int32_t _Atomic_add(int32_t add_value, volatile int32_t* dest); + int32_t _Atomic_xchg(int32_t exchange_value, volatile int32_t* dest); + int8_t _Atomic_cmpxchg_byte(int8_t exchange_value, volatile int8_t* dest, + int8_t compare_value); + int32_t _Atomic_cmpxchg(int32_t exchange_value, volatile int32_t* dest, + int32_t compare_value); + int64_t _Atomic_cmpxchg_long(int64_t exchange_value, volatile int64_t* dest, + int64_t compare_value); } -inline jint Atomic::add (jint add_value, volatile jint* dest) { +template <> +inline int32_t Atomic::specialized_add(int32_t add_value, volatile int32_t* dest) { return _Atomic_add(add_value, dest); } -inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { +template <> +inline int32_t Atomic::specialized_xchg(int32_t exchange_value, volatile int32_t* dest) { return _Atomic_xchg(exchange_value, dest); } #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE -inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) { +template <> +inline int8_t Atomic::specialized_cmpxchg(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value, cmpxchg_memory_order order) { return _Atomic_cmpxchg_byte(exchange_value, dest, compare_value); } -inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) { +template <> +inline int32_t Atomic::specialized_cmpxchg(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value, cmpxchg_memory_order order) { return _Atomic_cmpxchg(exchange_value, dest, compare_value); } -inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) { +template <> +inline int64_t Atomic::specialized_cmpxchg(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) { return _Atomic_cmpxchg_long(exchange_value, dest, compare_value); } - #ifdef AMD64 -inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } -inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } -extern "C" jlong _Atomic_add_long(jlong add_value, volatile jlong* dest); -extern "C" jlong _Atomic_xchg_long(jlong exchange_value, volatile jlong* dest); - -inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { - return (intptr_t)_Atomic_add_long((jlong)add_value, (volatile jlong*)dest); -} - -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void*)_Atomic_add_long((jlong)add_value, (volatile jlong*)dest); -} -inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { - return (intptr_t)_Atomic_xchg_long((jlong)exchange_value, (volatile jlong*)dest); -} - -inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { - return (void*)_Atomic_xchg_long((jlong)exchange_value, (volatile jlong*)dest); -} +extern "C" int64_t _Atomic_add_long(int64_t add_value, volatile int64_t* dest); +extern "C" int64_t _Atomic_xchg_long(int64_t exchange_value, volatile int64_t* dest); -inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) { - return (intptr_t)_Atomic_cmpxchg_long((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value); +template <> +inline int64_t Atomic::specialized_add(int64_t add_value, volatile int64_t* dest) { + return _Atomic_add_long(add_value, dest); } -inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) { - return (void*)_Atomic_cmpxchg_long((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value); +template <> +inline int64_t Atomic::specialized_xchg(int64_t exchange_value, volatile int64_t* dest) { + return _Atomic_xchg_long(exchange_value, dest); } -inline jlong Atomic::load(const volatile jlong* src) { return *src; } - #else // !AMD64 -inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { - return (intptr_t)add((jint)add_value, (volatile jint*)dest); -} -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void*)add((jint)add_value, (volatile jint*)dest); -} +extern "C" void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst); -inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { - return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest); -} - -inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { - return (void*)xchg((jint)exchange_value, (volatile jint*)dest); -} - -inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) { - return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order); -} - -inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) { - return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order); -} - -extern "C" void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst); - -inline jlong Atomic::load(const volatile jlong* src) { - volatile jlong dest; +template <> +inline int64_t Atomic::specialized_load(const volatile int64_t* src) { + volatile int64_t dest; _Atomic_move_long(src, &dest); return dest; } -inline void Atomic::store(jlong store_value, jlong* dest) { - _Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest); -} - -inline void Atomic::store(jlong store_value, volatile jlong* dest) { - _Atomic_move_long((volatile jlong*)&store_value, dest); +template <> +inline void Atomic::specialized_store(int64_t store_value, volatile int64_t* dest) { + _Atomic_move_long((volatile int64_t*)&store_value, (volatile int64_t*)dest); } #endif // AMD64 --- old/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp 2017-07-14 18:05:43.394232596 +0200 +++ new/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp 2017-07-14 18:05:43.242232601 +0200 @@ -905,12 +905,12 @@ // until initialization is complete. // TODO - replace with .il implementation when compiler supports it. -typedef jint xchg_func_t (jint, volatile jint*); -typedef jint cmpxchg_func_t (jint, volatile jint*, jint); -typedef jlong cmpxchg_long_func_t(jlong, volatile jlong*, jlong); -typedef jint add_func_t (jint, volatile jint*); +typedef int32_t xchg_func_t (int32_t, volatile int32_t*); +typedef int32_t cmpxchg_func_t (int32_t, volatile int32_t*, int32_t); +typedef int64_t cmpxchg_long_func_t(int64_t, volatile int64_t*, int64_t); +typedef int32_t add_func_t (int32_t, volatile int32_t*); -jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) { +int32_t os::atomic_xchg_bootstrap(int32_t exchange_value, volatile int32_t* dest) { // try to use the stub: xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry()); @@ -920,12 +920,12 @@ } assert(Threads::number_of_threads() == 0, "for bootstrap only"); - jint old_value = *dest; + int32_t old_value = *dest; *dest = exchange_value; return old_value; } -jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint compare_value) { +int32_t os::atomic_cmpxchg_bootstrap(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value) { // try to use the stub: cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry()); @@ -935,13 +935,13 @@ } assert(Threads::number_of_threads() == 0, "for bootstrap only"); - jint old_value = *dest; + int32_t old_value = *dest; if (old_value == compare_value) *dest = exchange_value; return old_value; } -jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* dest, jlong compare_value) { +int64_t os::atomic_cmpxchg_long_bootstrap(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value) { // try to use the stub: cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry()); @@ -951,13 +951,13 @@ } assert(Threads::number_of_threads() == 0, "for bootstrap only"); - jlong old_value = *dest; + int64_t old_value = *dest; if (old_value == compare_value) *dest = exchange_value; return old_value; } -jint os::atomic_add_bootstrap(jint add_value, volatile jint* dest) { +int32_t os::atomic_add_bootstrap(int32_t add_value, volatile int32_t* dest) { // try to use the stub: add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry()); --- old/src/os_cpu/solaris_x86/vm/os_solaris_x86.hpp 2017-07-14 18:05:44.206232568 +0200 +++ new/src/os_cpu/solaris_x86/vm/os_solaris_x86.hpp 2017-07-14 18:05:44.050232573 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,15 +31,15 @@ #ifdef AMD64 static void setup_fpu() {} #else - static jint (*atomic_xchg_func) (jint, volatile jint*); - static jint (*atomic_cmpxchg_func) (jint, volatile jint*, jint); - static jlong (*atomic_cmpxchg_long_func)(jlong, volatile jlong*, jlong); - static jint (*atomic_add_func) (jint, volatile jint*); + static int32_t (*atomic_xchg_func) (int32_t, volatile int32_t*); + static int32_t (*atomic_cmpxchg_func) (int32_t, volatile int32_t*, int32_t); + static int64_t (*atomic_cmpxchg_long_func)(int64_t, volatile int64_t*, int64_t); + static int32_t (*atomic_add_func) (int32_t, volatile int32_t*); - static jint atomic_xchg_bootstrap (jint, volatile jint*); - static jint atomic_cmpxchg_bootstrap (jint, volatile jint*, jint); - static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong); - static jint atomic_add_bootstrap (jint, volatile jint*); + static int32_t atomic_xchg_bootstrap (int32_t, volatile int32_t*); + static int32_t atomic_cmpxchg_bootstrap (int32_t, volatile int32_t*, int32_t); + static int64_t atomic_cmpxchg_long_bootstrap(int64_t, volatile int64_t*, int64_t); + static int32_t atomic_add_bootstrap (int32_t, volatile int32_t*); static void setup_fpu(); #endif // AMD64 --- old/src/os_cpu/solaris_x86/vm/solaris_x86_64.il 2017-07-14 18:05:45.010232540 +0200 +++ new/src/os_cpu/solaris_x86/vm/solaris_x86_64.il 2017-07-14 18:05:44.854232545 +0200 @@ -1,5 +1,5 @@ // -// Copyright (c) 2004, 2015, Oracle and/or its affiliates. All rights reserved. +// Copyright (c) 2004, 2017, Oracle and/or its affiliates. All rights reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -49,7 +49,7 @@ orq %rdx, %rax .end - // Support for jint Atomic::add(jint add_value, volatile jint* dest) + // Support for int32_t Atomic::specialized_add(int32_t add_value, volatile int32_t* dest) .inline _Atomic_add,2 movl %edi, %eax // save add_value for return lock @@ -57,7 +57,7 @@ addl %edi, %eax .end - // Support for jlong Atomic::add(jlong add_value, volatile jlong* dest) + // Support for int64_t Atomic::specialized_add(int64_t add_value, volatile int64_t* dest) .inline _Atomic_add_long,2 movq %rdi, %rax // save add_value for return lock @@ -65,39 +65,39 @@ addq %rdi, %rax .end - // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest). + // Support for int32_t Atomic::specialized_xchg(int32_t exchange_value, volatile int32_t* dest). .inline _Atomic_xchg,2 xchgl (%rsi), %edi movl %edi, %eax .end - // Support for jlong Atomic::xchg(jlong exchange_value, volatile jlong* dest). + // Support for int64_t Atomic::specialized_xchg(int64_t exchange_value, volatile int64_t* dest). .inline _Atomic_xchg_long,2 xchgq (%rsi), %rdi movq %rdi, %rax .end - // Support for jbyte Atomic::cmpxchg(jbyte exchange_value, - // volatile jbyte *dest, - // jbyte compare_value) + // Support for int8_t Atomic::specialized_cmpxchg(int8_t exchange_value, + // volatile int8_t *dest, + // int8_t compare_value) .inline _Atomic_cmpxchg_byte,3 movb %dl, %al // compare_value lock cmpxchgb %dil, (%rsi) .end - // Support for jint Atomic::cmpxchg(jint exchange_value, - // volatile jint *dest, - // jint compare_value) + // Support for int32_t Atomic:specialized_:cmpxchg(int32_t exchange_value, + // volatile int32_t *dest, + // int32_t compare_value) .inline _Atomic_cmpxchg,3 movl %edx, %eax // compare_value lock cmpxchgl %edi, (%rsi) .end - // Support for jlong Atomic::cmpxchg(jlong exchange_value, - // volatile jlong* dest, - // jlong compare_value) + // Support for int64_t Atomic::specialized_cmpxchg(int64_t exchange_value, + // volatile int64_t* dest, + // int64_t compare_value) .inline _Atomic_cmpxchg_long,3 movq %rdx, %rax // compare_value lock --- old/src/os_cpu/windows_x86/vm/atomic_windows_x86.hpp 2017-07-14 18:05:45.774232513 +0200 +++ new/src/os_cpu/windows_x86/vm/atomic_windows_x86.hpp 2017-07-14 18:05:45.638232518 +0200 @@ -42,99 +42,48 @@ #pragma warning(disable: 4035) // Disables warnings reporting missing return statement -inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; } -inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; } -inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; } - -inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; } -inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; } - -inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; } -inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; } -inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; } - - -inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; } -inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; } - #ifdef AMD64 -inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } -inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } -inline jint Atomic::add (jint add_value, volatile jint* dest) { - return (jint)(*os::atomic_add_func)(add_value, dest); +template <> +inline int32_t Atomic::specialized_add(int32_t add_value, volatile int32_t* dest) { + return (int32_t)(*os::atomic_add_func)(add_value, dest); } -inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { +template <> +inline int64_t Atomic::specialized_add(int64_t add_value, volatile int64_t* dest) { return (intptr_t)(*os::atomic_add_ptr_func)(add_value, dest); } -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void*)(*os::atomic_add_ptr_func)(add_value, (volatile intptr_t*)dest); -} - -inline void Atomic::inc (volatile jint* dest) { - (void)add (1, dest); -} - -inline void Atomic::inc_ptr(volatile intptr_t* dest) { - (void)add_ptr(1, dest); -} - -inline void Atomic::inc_ptr(volatile void* dest) { - (void)add_ptr(1, dest); -} - -inline void Atomic::dec (volatile jint* dest) { - (void)add (-1, dest); -} - -inline void Atomic::dec_ptr(volatile intptr_t* dest) { - (void)add_ptr(-1, dest); -} - -inline void Atomic::dec_ptr(volatile void* dest) { - (void)add_ptr(-1, dest); -} - -inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { - return (jint)(*os::atomic_xchg_func)(exchange_value, dest); +template <> +inline int32_t Atomic::specialized_xchg(int32_t exchange_value, volatile int32_t* dest) { + return (int32_t)(*os::atomic_xchg_func)(exchange_value, dest); } -inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { +template <> +inline int64_t Atomic::specialized_xchg(int64_t exchange_value, volatile int64_t* dest) { return (intptr_t)(os::atomic_xchg_ptr_func)(exchange_value, dest); } -inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { - return (void *)(os::atomic_xchg_ptr_func)((intptr_t)exchange_value, (volatile intptr_t*)dest); -} - -inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) { +template <> +inline int32_t Atomic::specialized_cmpxchg(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value, cmpxchg_memory_order order) { return (*os::atomic_cmpxchg_func)(exchange_value, dest, compare_value); } #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE -inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) { +template <> +inline int8_t Atomic::specialized_cmpxchg(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value, cmpxchg_memory_order order) { return (*os::atomic_cmpxchg_byte_func)(exchange_value, dest, compare_value); } -inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) { +template <> +inline int64_t Atomic::specialized_cmpxchg(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) { return (*os::atomic_cmpxchg_long_func)(exchange_value, dest, compare_value); } -inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) { - return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order); -} - -inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) { - return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order); -} - -inline jlong Atomic::load(const volatile jlong* src) { return *src; } - #else // !AMD64 -inline jint Atomic::add (jint add_value, volatile jint* dest) { +template <> +inline int32_t Atomic::specialized_add(int32_t add_value, volatile int32_t* dest) { __asm { mov edx, dest; mov eax, add_value; @@ -144,15 +93,8 @@ } } -inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { - return (intptr_t)add((jint)add_value, (volatile jint*)dest); -} - -inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void*)add((jint)add_value, (volatile jint*)dest); -} - -inline void Atomic::inc (volatile jint* dest) { +template <> +inline void Atomic::specialized_inc(volatile int32_t* dest) { // alternative for InterlockedIncrement __asm { mov edx, dest; @@ -160,15 +102,8 @@ } } -inline void Atomic::inc_ptr(volatile intptr_t* dest) { - inc((volatile jint*)dest); -} - -inline void Atomic::inc_ptr(volatile void* dest) { - inc((volatile jint*)dest); -} - -inline void Atomic::dec (volatile jint* dest) { +template <> +inline void Atomic::specialized_dec(volatile int32_t* dest) { // alternative for InterlockedDecrement __asm { mov edx, dest; @@ -176,15 +111,8 @@ } } -inline void Atomic::dec_ptr(volatile intptr_t* dest) { - dec((volatile jint*)dest); -} - -inline void Atomic::dec_ptr(volatile void* dest) { - dec((volatile jint*)dest); -} - -inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { +template <> +inline int32_t Atomic::specialized_xchg(int32_t exchange_value, volatile int32_t* dest) { // alternative for InterlockedExchange __asm { mov eax, exchange_value; @@ -193,16 +121,9 @@ } } -inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { - return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest); -} - -inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { - return (void*)xchg((jint)exchange_value, (volatile jint*)dest); -} - #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE -inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) { +template <> +inline int8_t Atomic::specialized_cmpxchg(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value, cmpxchg_memory_order order) { // alternative for InterlockedCompareExchange __asm { mov edx, dest @@ -212,7 +133,8 @@ } } -inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) { +template <> +inline int32_t Atomic::specialized_cmpxchg(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value, cmpxchg_memory_order order) { // alternative for InterlockedCompareExchange __asm { mov edx, dest @@ -222,11 +144,12 @@ } } -inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) { - jint ex_lo = (jint)exchange_value; - jint ex_hi = *( ((jint*)&exchange_value) + 1 ); - jint cmp_lo = (jint)compare_value; - jint cmp_hi = *( ((jint*)&compare_value) + 1 ); +template <> +inline int64_t Atomic::specialized_cmpxchg(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) { + int32_t ex_lo = (int32_t)exchange_value; + int32_t ex_hi = *( ((int32_t*)&exchange_value) + 1 ); + int32_t cmp_lo = (int32_t)compare_value; + int32_t cmp_hi = *( ((int32_t*)&compare_value) + 1 ); __asm { push ebx push edi @@ -241,17 +164,10 @@ } } -inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) { - return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order); -} - -inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) { - return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order); -} - -inline jlong Atomic::load(const volatile jlong* src) { - volatile jlong dest; - volatile jlong* pdest = &dest; +template <> +inline int64_t Atomic::specialized_load(const volatile int64_t* src) { + volatile int64_t dest; + volatile int64_t* pdest = &dest; __asm { mov eax, src fild qword ptr [eax] @@ -261,8 +177,9 @@ return dest; } -inline void Atomic::store(jlong store_value, volatile jlong* dest) { - volatile jlong* src = &store_value; +template <> +inline void Atomic::specialized_store(int64_t store_value, volatile int64_t* dest) { + volatile int64_t* src = &store_value; __asm { mov eax, src fild qword ptr [eax] @@ -271,10 +188,6 @@ } } -inline void Atomic::store(jlong store_value, jlong* dest) { - Atomic::store(store_value, (volatile jlong*)dest); -} - #endif // AMD64 #pragma warning(default: 4035) // Enables warnings reporting missing return statement --- old/src/os_cpu/windows_x86/vm/os_windows_x86.cpp 2017-07-14 18:05:46.582232485 +0200 +++ new/src/os_cpu/windows_x86/vm/os_windows_x86.cpp 2017-07-14 18:05:46.426232490 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -217,17 +217,17 @@ // Atomics and Stub Functions -typedef jint xchg_func_t (jint, volatile jint*); -typedef intptr_t xchg_ptr_func_t (intptr_t, volatile intptr_t*); -typedef jint cmpxchg_func_t (jint, volatile jint*, jint); -typedef jbyte cmpxchg_byte_func_t (jbyte, volatile jbyte*, jbyte); -typedef jlong cmpxchg_long_func_t (jlong, volatile jlong*, jlong); -typedef jint add_func_t (jint, volatile jint*); -typedef intptr_t add_ptr_func_t (intptr_t, volatile intptr_t*); +typedef int32_t xchg_func_t (int32_t, volatile int32_t*); +typedef intptr_t xchg_ptr_func_t (intptr_t, volatile intptr_t*); +typedef int32_t cmpxchg_func_t (int32_t, volatile int32_t*, int32_t); +typedef int8_t cmpxchg_byte_func_t (int8_t, volatile int8_t*, int8_t); +typedef int64_t cmpxchg_long_func_t (int64_t, volatile int64_t*, int64_t); +typedef int32_t add_func_t (int32_t, volatile int32_t*); +typedef intptr_t add_ptr_func_t (intptr_t, volatile intptr_t*); #ifdef AMD64 -jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) { +int32_t os::atomic_xchg_bootstrap(int32_t exchange_value, volatile int32_t* dest) { // try to use the stub: xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry()); @@ -237,7 +237,7 @@ } assert(Threads::number_of_threads() == 0, "for bootstrap only"); - jint old_value = *dest; + int32_t old_value = *dest; *dest = exchange_value; return old_value; } @@ -258,7 +258,7 @@ } -jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint compare_value) { +int32_t os::atomic_cmpxchg_bootstrap(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value) { // try to use the stub: cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry()); @@ -268,13 +268,13 @@ } assert(Threads::number_of_threads() == 0, "for bootstrap only"); - jint old_value = *dest; + int32_t old_value = *dest; if (old_value == compare_value) *dest = exchange_value; return old_value; } -jbyte os::atomic_cmpxchg_byte_bootstrap(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) { +int8_t os::atomic_cmpxchg_byte_bootstrap(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value) { // try to use the stub: cmpxchg_byte_func_t* func = CAST_TO_FN_PTR(cmpxchg_byte_func_t*, StubRoutines::atomic_cmpxchg_byte_entry()); @@ -284,7 +284,7 @@ } assert(Threads::number_of_threads() == 0, "for bootstrap only"); - jbyte old_value = *dest; + int8_t old_value = *dest; if (old_value == compare_value) *dest = exchange_value; return old_value; @@ -292,7 +292,7 @@ #endif // AMD64 -jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* dest, jlong compare_value) { +int64_t os::atomic_cmpxchg_long_bootstrap(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value) { // try to use the stub: cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry()); @@ -302,7 +302,7 @@ } assert(Threads::number_of_threads() == 0, "for bootstrap only"); - jlong old_value = *dest; + int64_t old_value = *dest; if (old_value == compare_value) *dest = exchange_value; return old_value; @@ -310,7 +310,7 @@ #ifdef AMD64 -jint os::atomic_add_bootstrap(jint add_value, volatile jint* dest) { +int32_t os::atomic_add_bootstrap(int32_t add_value, volatile int32_t* dest) { // try to use the stub: add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry()); --- old/src/os_cpu/windows_x86/vm/os_windows_x86.hpp 2017-07-14 18:05:47.378232457 +0200 +++ new/src/os_cpu/windows_x86/vm/os_windows_x86.hpp 2017-07-14 18:05:47.226232462 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,32 +29,32 @@ // NOTE: we are back in class os here, not win32 // #ifdef AMD64 - static jint (*atomic_xchg_func) (jint, volatile jint*); - static intptr_t (*atomic_xchg_ptr_func) (intptr_t, volatile intptr_t*); + static int32_t (*atomic_xchg_func) (int32_t, volatile int32_t*); + static intptr_t (*atomic_xchg_ptr_func) (intptr_t, volatile intptr_t*); - static jint (*atomic_cmpxchg_func) (jint, volatile jint*, jint); - static jbyte (*atomic_cmpxchg_byte_func) (jbyte, volatile jbyte*, jbyte); - static jlong (*atomic_cmpxchg_long_func) (jlong, volatile jlong*, jlong); + static int32_t (*atomic_cmpxchg_func) (int32_t, volatile int32_t*, int32_t); + static int8_t (*atomic_cmpxchg_byte_func) (int8_t, volatile int8_t*, int8_t); + static int64_t (*atomic_cmpxchg_long_func) (int64_t, volatile int64_t*, int64_t); - static jint (*atomic_add_func) (jint, volatile jint*); - static intptr_t (*atomic_add_ptr_func) (intptr_t, volatile intptr_t*); + static int32_t (*atomic_add_func) (int32_t, volatile int32_t*); + static intptr_t (*atomic_add_ptr_func) (intptr_t, volatile intptr_t*); - static jint atomic_xchg_bootstrap (jint, volatile jint*); - static intptr_t atomic_xchg_ptr_bootstrap (intptr_t, volatile intptr_t*); + static int32_t atomic_xchg_bootstrap (int32_t, volatile int32_t*); + static intptr_t atomic_xchg_ptr_bootstrap (intptr_t, volatile intptr_t*); - static jint atomic_cmpxchg_bootstrap (jint, volatile jint*, jint); - static jbyte atomic_cmpxchg_byte_bootstrap(jbyte, volatile jbyte*, jbyte); + static int32_t atomic_cmpxchg_bootstrap (int32_t, volatile int32_t*, int32_t); + static int8_t atomic_cmpxchg_byte_bootstrap(int8_t, volatile int8_t*, int8_t); #else - static jlong (*atomic_cmpxchg_long_func) (jlong, volatile jlong*, jlong); + static int64_t (*atomic_cmpxchg_long_func) (int64_t, volatile int64_t*, int64_t); #endif // AMD64 - static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong); + static int64_t atomic_cmpxchg_long_bootstrap(int64_t, volatile int64_t*, int64_t); #ifdef AMD64 - static jint atomic_add_bootstrap (jint, volatile jint*); - static intptr_t atomic_add_ptr_bootstrap (intptr_t, volatile intptr_t*); + static int32_t atomic_add_bootstrap (int32_t, volatile int32_t*); + static intptr_t atomic_add_ptr_bootstrap (intptr_t, volatile intptr_t*); #endif // AMD64 static void setup_fpu(); --- old/src/share/vm/runtime/atomic.hpp 2017-07-14 18:05:48.118232431 +0200 +++ new/src/share/vm/runtime/atomic.hpp 2017-07-14 18:05:47.966232437 +0200 @@ -26,7 +26,15 @@ #define SHARE_VM_RUNTIME_ATOMIC_HPP #include "memory/allocation.hpp" +#include "metaprogramming/conditional.hpp" +#include "metaprogramming/enableIf.hpp" +#include "metaprogramming/integerTypes.hpp" +#include "metaprogramming/isIntegral.hpp" +#include "metaprogramming/isPointer.hpp" +#include "metaprogramming/isSame.hpp" +#include "metaprogramming/removePointer.hpp" #include "utilities/align.hpp" +#include "utilities/debug.hpp" #include "utilities/macros.hpp" enum cmpxchg_memory_order { @@ -36,9 +44,51 @@ }; class Atomic : AllStatic { + template class Never: public FalseType {}; + + template + inline static void specialized_store(T store_value, volatile T* dest) { + STATIC_ASSERT(sizeof(T) <= size_t(BytesPerWord)); // Does the machine support atomic wide accesses? + (void)const_cast(*dest = store_value); + } + + template + inline static T specialized_load(const volatile T* dest) { + STATIC_ASSERT(sizeof(T) <= size_t(BytesPerWord)); // Does the machine support atomic wide accesses? + return *dest; + } + + template + inline static T specialized_add(T add_value, volatile T* dest) { + STATIC_ASSERT(Never::value); + return add_value; + } + + template + inline static void specialized_inc(volatile T* dest) { + add(1, dest); + } + + template + inline static void specialized_dec(volatile T* dest) { + add(-1, dest); + } + + template + inline static T specialized_xchg(T exchange_value, volatile T* dest) { + STATIC_ASSERT(Never::value); + return exchange_value; + } + + template + inline static T specialized_cmpxchg(T exchange_value, volatile T* dest, T compare_value, cmpxchg_memory_order order) { + STATIC_ASSERT(Never::value); + return exchange_value; + } + public: - // Atomic operations on jlong types are not available on all 32-bit - // platforms. If atomic ops on jlongs are defined here they must only + // Atomic operations on 64-bit types are not available on all 32-bit + // platforms. If atomic ops on 64-bit types are defined here they must only // be used from code that verifies they are available at runtime and // can provide an alternative action if not - see supports_cx8() for // a means to test availability. @@ -56,108 +106,229 @@ // we can prove that a weaker form is sufficiently safe. // Atomically store to a location - inline static void store (jbyte store_value, jbyte* dest); - inline static void store (jshort store_value, jshort* dest); - inline static void store (jint store_value, jint* dest); - // See comment above about using jlong atomics on 32-bit platforms - inline static void store (jlong store_value, jlong* dest); - inline static void store_ptr(intptr_t store_value, intptr_t* dest); - inline static void store_ptr(void* store_value, void* dest); - - inline static void store (jbyte store_value, volatile jbyte* dest); - inline static void store (jshort store_value, volatile jshort* dest); - inline static void store (jint store_value, volatile jint* dest); - // See comment above about using jlong atomics on 32-bit platforms - inline static void store (jlong store_value, volatile jlong* dest); - inline static void store_ptr(intptr_t store_value, volatile intptr_t* dest); - inline static void store_ptr(void* store_value, volatile void* dest); - - // See comment above about using jlong atomics on 32-bit platforms - inline static jlong load(const volatile jlong* src); + // See comment above about using 64-bit atomics on 32-bit platforms + template + inline static void store(T store_value, volatile U* dest); + + // The store_ptr() member functions are deprecated. Use store() instead. + static void store_ptr(intptr_t store_value, volatile intptr_t* dest) { + store(store_value, dest); + } + + static void store_ptr(void* store_value, volatile void* dest) { + store((intptr_t)store_value, (volatile intptr_t*)dest); + } + + // Atomically load from a location + // See comment above about using 64-bit atomics on 32-bit platforms + template + inline static T load(volatile T* src); // Atomically add to a location. Returns updated value. add*() provide: // add-value-to-dest - inline static jshort add (jshort add_value, volatile jshort* dest); - inline static jint add (jint add_value, volatile jint* dest); - inline static size_t add (size_t add_value, volatile size_t* dest); - inline static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest); - inline static void* add_ptr(intptr_t add_value, volatile void* dest); + // add(I1 v, I* d) + // add(I1 v, P* d) + // where I, I1 are integral types, P is a pointer type. + // Functional behavior is modelled on *dest += add_value. + template + inline static U add(T add_value, volatile U* dst); + + template + inline static U* add(T add_value, U* volatile* dst); + + // The add_ptr() member functions are deprecated. Use add() instead. + static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest) { + return add(add_value, dest); + } + + static void* add_ptr(intptr_t add_value, volatile void* dest) { + return (void*)add(add_value, (volatile intptr_t*)dest); + } // Atomically increment location. inc*() provide: // increment-dest - inline static void inc (volatile jint* dest); - inline static void inc (volatile jshort* dest); - inline static void inc (volatile size_t* dest); - inline static void inc_ptr(volatile intptr_t* dest); - inline static void inc_ptr(volatile void* dest); + // Functional behavior is modelled on *dest++ + template + inline static void inc(volatile T* dest); + + template + inline static void inc(T* volatile* dest); + + // The inc_ptr member functions are deprecated. Use inc() instead. + static void inc_ptr(volatile intptr_t* dest) { + inc(dest); + } + + static void inc_ptr(volatile void* dest) { + inc((volatile intptr_t*)dest); + } // Atomically decrement a location. dec*() provide: // decrement-dest - inline static void dec (volatile jint* dest); - inline static void dec (volatile jshort* dest); - inline static void dec (volatile size_t* dest); - inline static void dec_ptr(volatile intptr_t* dest); - inline static void dec_ptr(volatile void* dest); + // Functional behavior is modelled on *dest-- + template + inline static void dec(volatile T* dest); + + template + inline static void dec(T* volatile* dest); + + // The dec_ptr member functions are deprecated. Use dec() instead. + static void dec_ptr(volatile intptr_t* dest) { + dec(dest); + } + + static void dec_ptr(volatile void* dest) { + dec((volatile intptr_t*)dest); + } // Performs atomic exchange of *dest with exchange_value. Returns old // prior value of *dest. xchg*() provide: // exchange-value-with-dest - inline static jint xchg (jint exchange_value, volatile jint* dest); - inline static unsigned int xchg (unsigned int exchange_value, volatile unsigned int* dest); - inline static intptr_t xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest); - inline static void* xchg_ptr(void* exchange_value, volatile void* dest); + template + inline static U xchg(T exchange_value, volatile U* dest); + + // The xchg_ptr() member functions are deprecated. Use xchg() instead. + static intptr_t xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { + return xchg(exchange_value, dest); + } + + static void* xchg_ptr(void* exchange_value, volatile void* dest) { + return (void*)xchg((intptr_t)exchange_value, (volatile intptr_t*)dest); + } // Performs atomic compare of *dest and compare_value, and exchanges // *dest with exchange_value if the comparison succeeded. Returns prior // value of *dest. cmpxchg*() provide: // compare-and-exchange - inline static jbyte cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order = memory_order_conservative); - inline static jint cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order = memory_order_conservative); - // See comment above about using jlong atomics on 32-bit platforms - inline static jlong cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order = memory_order_conservative); - inline static unsigned int cmpxchg (unsigned int exchange_value, volatile unsigned int* dest, unsigned int compare_value, cmpxchg_memory_order order = memory_order_conservative); - inline static intptr_t cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order = memory_order_conservative); - inline static void* cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order = memory_order_conservative); + // See comment above about using 64-bit atomics on 32-bit platforms + template + inline static U cmpxchg(T exchange_value, volatile U* dest, V compare_value, cmpxchg_memory_order order = memory_order_conservative); + + // The cmpxchg_ptr member functions are deprecated. Use cmpxchg() instead. + inline static intptr_t cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, + intptr_t compare_value, cmpxchg_memory_order order = memory_order_conservative) { + return cmpxchg(exchange_value, dest, compare_value, order); + + } + + inline static void* cmpxchg_ptr(void* exchange_value, volatile void* dest, + void* compare_value, cmpxchg_memory_order order = memory_order_conservative) { + return (void*)cmpxchg((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value, order); + } }; -// platform specific in-line definitions - must come before shared definitions +// internal implementation -#include OS_CPU_HEADER(atomic) +template +inline void Atomic::store(T store_value, volatile U* dest) { + typedef typename IntegerTypes::Signed::type Raw; + U store_value_cast = store_value; + specialized_store(IntegerTypes::cast_to_signed(store_value_cast), reinterpret_cast(dest)); +} -// shared in-line definitions +template +inline T Atomic::load(volatile T* src) { + typedef typename IntegerTypes::Signed::type Raw; + return IntegerTypes::cast(specialized_load(reinterpret_cast(src))); +} -// size_t casts... -#if (SIZE_MAX != UINTPTR_MAX) -#error size_t is not WORD_SIZE, interesting platform, but missing implementation here -#endif +template +inline U Atomic::add(T add_value, volatile U* dst) { + STATIC_ASSERT(IsIntegral::value); + STATIC_ASSERT(IsIntegral::value); + typedef typename IntegerTypes::Signed::type Raw; + // Allow -Wconversion or the like to complain about unsafe conversions. + U value = add_value; + Raw raw_value = IntegerTypes::cast_to_signed(value); + Raw result = specialized_add(raw_value, reinterpret_cast(dst)); + return IntegerTypes::cast(result); +} + +template +inline U* Atomic::add(T add_value, U* volatile* dst) { + STATIC_ASSERT(IsIntegral::value); + typedef typename IntegerTypes::Signed::type Raw; + ptrdiff_t value = add_value; + Raw raw_value = IntegerTypes::cast_to_signed(value * sizeof(U)); + Raw result = specialized_add(raw_value, reinterpret_cast(dst)); + return IntegerTypes::cast(result); +} + +template +inline void Atomic::inc(volatile T* src) { + STATIC_ASSERT(IsIntegral::value); + typedef typename IntegerTypes::Signed::type Raw; + specialized_inc(reinterpret_cast(src)); +} -inline size_t Atomic::add(size_t add_value, volatile size_t* dest) { - return (size_t) add_ptr((intptr_t) add_value, (volatile intptr_t*) dest); +template +inline void Atomic::inc(T* volatile* src) { + if (sizeof(T) != 1) { + add(1, src); + } else { + typedef typename IntegerTypes::Signed::type Raw; + specialized_inc(reinterpret_cast(src)); + } } -inline void Atomic::inc(volatile size_t* dest) { - inc_ptr((volatile intptr_t*) dest); +template +inline void Atomic::dec(volatile T* src) { + STATIC_ASSERT(IsIntegral::value); + typedef typename IntegerTypes::Signed::type Raw; + specialized_dec(reinterpret_cast(src)); } -inline void Atomic::dec(volatile size_t* dest) { - dec_ptr((volatile intptr_t*) dest); +template +inline void Atomic::dec(T* volatile* src) { + if (sizeof(T) != 1) { + add(-1, src); + } else { + typedef typename IntegerTypes::Signed::type Raw; + specialized_dec(reinterpret_cast(src)); + } } +template +inline U Atomic::xchg(T exchange_value, volatile U* dest) { + typedef typename IntegerTypes::Signed::type Raw; + U exchange_value_cast = exchange_value; + Raw result = specialized_xchg(IntegerTypes::cast_to_signed(exchange_value_cast), + reinterpret_cast(dest)); + return IntegerTypes::cast(result); +} + +template +inline U Atomic::cmpxchg(T exchange_value, volatile U* dest, V compare_value, cmpxchg_memory_order order) { + typedef typename IntegerTypes::Signed::type Raw; + U exchange_value_cast = exchange_value; + U compare_value_cast = compare_value; + Raw result = specialized_cmpxchg(IntegerTypes::cast_to_signed(exchange_value_cast), + reinterpret_cast(dest), + IntegerTypes::cast_to_signed(compare_value_cast), order); + return IntegerTypes::cast(result); +} + +// platform specific in-line definitions - must come before shared definitions + +#include OS_CPU_HEADER(atomic) + +// shared in-line definitions + #ifndef VM_HAS_SPECIALIZED_CMPXCHG_BYTE /* - * This is the default implementation of byte-sized cmpxchg. It emulates jbyte-sized cmpxchg - * in terms of jint-sized cmpxchg. Platforms may override this by defining their own inline definition + * This is the default implementation of byte-sized cmpxchg. It emulates 8-bit-sized cmpxchg + * in terms of 32-bit-sized cmpxchg. Platforms may override this by defining their own inline definition * as well as defining VM_HAS_SPECIALIZED_CMPXCHG_BYTE. This will cause the platform specific * implementation to be used instead. */ -inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, - jbyte compare_value, cmpxchg_memory_order order) { - STATIC_ASSERT(sizeof(jbyte) == 1); - volatile jint* dest_int = - reinterpret_cast(align_down(dest, sizeof(jint))); +template <> +inline int8_t Atomic::specialized_cmpxchg(int8_t exchange_value, volatile int8_t* dest, + int8_t compare_value, cmpxchg_memory_order order) { + volatile int32_t* dest_int = + reinterpret_cast(align_down(dest, sizeof(int32_t))); size_t offset = pointer_delta(dest, dest_int, 1); - jint cur = *dest_int; - jbyte* cur_as_bytes = reinterpret_cast(&cur); + int32_t cur = *dest_int; + int8_t* cur_as_bytes = reinterpret_cast(&cur); // current value may not be what we are looking for, so force it // to that value so the initial cmpxchg will fail if it is different @@ -167,17 +338,17 @@ // barriers even on initial failure do { // value to swap in matches current value ... - jint new_value = cur; - // ... except for the one jbyte we want to update - reinterpret_cast(&new_value)[offset] = exchange_value; + int32_t new_value = cur; + // ... except for the one byte we want to update + reinterpret_cast(&new_value)[offset] = exchange_value; - jint res = cmpxchg(new_value, dest_int, cur, order); + int32_t res = cmpxchg(new_value, dest_int, cur, order); if (res == cur) break; // success - // at least one jbyte in the jint changed value, so update - // our view of the current jint + // at least one byte in the int changed value, so update + // our view of the current int cur = res; - // if our jbyte is still as cur we loop and try again + // if our byte is still as cur we loop and try again } while (cur_as_bytes[offset] == compare_value); return cur_as_bytes[offset]; @@ -185,20 +356,8 @@ #endif // VM_HAS_SPECIALIZED_CMPXCHG_BYTE -inline unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) { - assert(sizeof(unsigned int) == sizeof(jint), "more work to do"); - return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest); -} - -inline unsigned Atomic::cmpxchg(unsigned int exchange_value, - volatile unsigned int* dest, unsigned int compare_value, - cmpxchg_memory_order order) { - assert(sizeof(unsigned int) == sizeof(jint), "more work to do"); - return (unsigned int)Atomic::cmpxchg((jint)exchange_value, (volatile jint*)dest, - (jint)compare_value, order); -} - -inline jshort Atomic::add(jshort add_value, volatile jshort* dest) { +template <> +inline int16_t Atomic::specialized_add(int16_t add_value, volatile int16_t* dest) { // Most platforms do not support atomic add on a 2-byte value. However, // if the value occupies the most significant 16 bits of an aligned 32-bit // word, then we can do this with an atomic add of (add_value << 16) @@ -210,20 +369,22 @@ // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment. #ifdef VM_LITTLE_ENDIAN assert((intx(dest) & 0x03) == 0x02, "wrong alignment"); - jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest-1)); + int32_t new_value = Atomic::add(int32_t(add_value) << 16, (volatile int32_t*)(dest-1)); #else assert((intx(dest) & 0x03) == 0x00, "wrong alignment"); - jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest)); + int32_t new_value = Atomic::add(int32_t(add_value) << 16, (volatile int32_t*)(dest)); #endif - return (jshort)(new_value >> 16); // preserves sign + return (int16_t)(new_value >> 16); // preserves sign } -inline void Atomic::inc(volatile jshort* dest) { - (void)add(1, dest); +template <> +inline void Atomic::specialized_inc(volatile int16_t* dest) { + (void)add(int16_t(1), dest); } -inline void Atomic::dec(volatile jshort* dest) { - (void)add(-1, dest); +template <> +inline void Atomic::specialized_dec(volatile int16_t* dest) { + (void)add(int16_t(-1), dest); } #endif // SHARE_VM_RUNTIME_ATOMIC_HPP --- /dev/null 2017-03-07 11:44:12.271151064 +0100 +++ new/src/share/vm/metaprogramming/integerTypes.hpp 2017-07-14 18:05:48.778232408 +0200 @@ -0,0 +1,293 @@ +/* + * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_METAPROGRAMMING_INTEGERTYPES_HPP +#define SHARE_VM_METAPROGRAMMING_INTEGERTYPES_HPP + +#include "memory/allocation.hpp" +#include "utilities/debug.hpp" +#include "metaprogramming/decay.hpp" +#include "metaprogramming/integralConstant.hpp" +#include "metaprogramming/isFloatingPoint.hpp" +#include "metaprogramming/isPointer.hpp" +#include "metaprogramming/isSigned.hpp" +#include "metaprogramming/isIntegral.hpp" + +class IntegerTypes : public AllStatic { +public: + // Metafunction returning a canonical type of the same size and signed as T. + // T must be an integral type. The canonical types are guaranteed to be + // the [u]intN_t types from stdint.h + template struct Canonical; + + // Metafunction returning the canonical signed integral type of the + // given size. + template struct SignedTypeOfSize; + + // Metafunction returning the canonical unsigned integral type of + // the given size. + template struct UnsignedTypeOfSize; + + // Metafunction returning the canonical signed integral type of the + // same size as T. T must be an integral, floating point, or + // pointer type. + template struct Signed; + + // Metafunction returning the canonical unsigned integral type of + // the same size as T. T must be an integral, floating point, or + // pointer type. + template struct Unsigned; + + // Return a signed integral value with the same representation as x. + // T must be an integral, floating point, or pointer type. + // If T is a signed type, then x == cast_to_signed(x). + template + static typename Signed::type cast_to_signed(T x); + + // Return an unsigned integral value with the same representation as x. + // T must be an integral, floating point, or pointer type. + // If T is an unsigned type, then x == cast_to_unsigned(x). + template + static typename Unsigned::type cast_to_unsigned(T x); + + // Return a value with the same representation as x. + // T must be an integral, floating point, or pointer type. + // U must be an integral type. T and U must be of the same size. + // If T and U are both signed or both unsigned integral types, then + // x == cast(x). + template + static T cast(U x); + +private: + template::value> + struct CanonicalImpl; + + template::value, bool is_float = IsFloatingPoint::value> + struct TypeForSize; + + template::value, bool from_float = IsFloatingPoint::value> + struct CastTo; + + template::value, + bool to_float = IsFloatingPoint::value> + struct Cast; + + template static T cast_integral(U x); + template static T cast_floating_point(U x); +}; + +// Convert between different integral types of the same size. +// See C++03 3.10/15 for discussion of reinterpret_cast to a reference +// as a means for converting integral types while keeping the representation. +template +inline T IntegerTypes::cast_integral(U x) { + STATIC_ASSERT(sizeof(T) == sizeof(U)); + return reinterpret_cast(x); +} + +// Convert between an integral type and a floating point type of the +// same size. The only truly correct way to do this is with memcpy. +// Both the union trick and type punning via casts are undefined +// behavior. gcc generates exactly the same code for all three methods, +// except where the UB of type punning causes it to go off into the weeds. +// (gcc explicitly supports the union trick.) Other compilers may vary. +// In particular, not all compilers do a good job with the memcpy. +template +inline T IntegerTypes::cast_floating_point(U x) { + STATIC_ASSERT(sizeof(T) == sizeof(U)); + T result; + memcpy(&result, &x, sizeof(x)); + return result; +} + +////////////////////////////////////////////////////////////////////////////// +// SignedTypeOfSize and UnsignedTypeOfSize + +#define DEFINE_CANONICAL_SIGNED_TYPE(T) \ + template<> \ + struct IntegerTypes::SignedTypeOfSize : public AllStatic { \ + typedef T type; \ + }; + +#define DEFINE_CANONICAL_UNSIGNED_TYPE(T) \ + template<> \ + struct IntegerTypes::UnsignedTypeOfSize : public AllStatic { \ + typedef T type; \ + }; + +#define DEFINE_INTEGER_TYPES_OF_SIZE(NBITS) \ + DEFINE_CANONICAL_SIGNED_TYPE(int ## NBITS ## _t) \ + DEFINE_CANONICAL_UNSIGNED_TYPE(uint ## NBITS ## _t) + +DEFINE_INTEGER_TYPES_OF_SIZE(8) +DEFINE_INTEGER_TYPES_OF_SIZE(16) +DEFINE_INTEGER_TYPES_OF_SIZE(32) +DEFINE_INTEGER_TYPES_OF_SIZE(64) + +#undef DEFINE_INTEGER_TYPES_OF_SIZE +#undef DEFINE_CANONICAL_SIGNED_TYPE +#undef DEFINE_CANONICAL_UNSIGNED_TYPE + +////////////////////////////////////////////////////////////////////////////// +// Canonical + +template +struct IntegerTypes::CanonicalImpl + : public SignedTypeOfSize +{ }; + +template +struct IntegerTypes::CanonicalImpl + : public UnsignedTypeOfSize +{ }; + +template +struct IntegerTypes::Canonical : public CanonicalImpl { + STATIC_ASSERT((IsIntegral::value)); +}; + +////////////////////////////////////////////////////////////////////////////// +// Signed and Unsigned + +// For other types, use the canonical type, ensuring T is integral. +template +struct IntegerTypes::TypeForSize : public Canonical { }; + +// For floating point types, use the type. +template +struct IntegerTypes::TypeForSize : public AllStatic { + typedef T type; +}; + +// For pointer types, use void*. +template +struct IntegerTypes::TypeForSize : public AllStatic { + typedef void* type; +}; + +template +struct IntegerTypes::Signed + : public SignedTypeOfSize::type)> +{ }; + +template +struct IntegerTypes::Unsigned + : public UnsignedTypeOfSize::type)> +{ }; + +////////////////////////////////////////////////////////////////////////////// +// cast(x) +// +// Cast from an integral type to an integral, floating point, or pointer type. +// Cast + +// Cast integral value to some different type of integral value. +template +struct IntegerTypes::Cast VALUE_OBJ_CLASS_SPEC { + T operator()(U x) const { + // Canonicalization verifies T is integral. + typedef typename Canonical::type canonical_type; + return static_cast(cast_integral(x)); + } +}; + +// Cast integral value to floating point. +template +struct IntegerTypes::Cast VALUE_OBJ_CLASS_SPEC { + T operator()(U x) const { return cast_floating_point(x); } +}; + +// Cast integral value to pointer. +template +struct IntegerTypes::Cast VALUE_OBJ_CLASS_SPEC { + T operator()(U x) const { return (T)((void*)x); } +}; + +// Same integral type is identity conversion. +template +struct IntegerTypes::Cast VALUE_OBJ_CLASS_SPEC { + T operator()(T x) const { return x; } +}; + +// Give an informative error if the sizes differ. +template +struct IntegerTypes::Cast VALUE_OBJ_CLASS_SPEC { + STATIC_ASSERT(sizeof(T) == sizeof(U)); +}; + +template +inline T IntegerTypes::cast(U x) { + // Canonicalization verifies U is integral. + typedef typename Canonical::type parameter_type; + return Cast()(static_cast(x)); +} + +////////////////////////////////////////////////////////////////////////////// +// cast_to_signed(x) +// cast_to_unsigned(x) +// +// Cast from an integral, floating point, or pointer type to an integral type. + +// Cast integral to different integral. +template +struct IntegerTypes::CastTo VALUE_OBJ_CLASS_SPEC { + T operator()(U x) const { return cast_integral(x); } +}; + +// Cast floating point to integral value. +template +struct IntegerTypes::CastTo VALUE_OBJ_CLASS_SPEC { + T operator()(U x) const { return cast_floating_point(x); } +}; + +// Cast pointer to integral value. +template +struct IntegerTypes::CastTo VALUE_OBJ_CLASS_SPEC { + T operator()(U x) const { return reinterpret_cast(x); } +}; + +// Identity conversion +template +struct IntegerTypes::CastTo VALUE_OBJ_CLASS_SPEC { + T operator()(T x) const { return x; } +}; + +template +inline typename IntegerTypes::Signed::type +IntegerTypes::cast_to_signed(T x) { + typedef typename Signed::type result_type; + return CastTo()(x); +} + +template +inline typename IntegerTypes::Unsigned::type +IntegerTypes::cast_to_unsigned(T x) { + typedef typename Unsigned::type result_type; + return CastTo()(x); +} + +#endif // SHARE_VM_METAPROGRAMMING_INTEGERTYPES_HPP --- /dev/null 2017-03-07 11:44:12.271151064 +0100 +++ new/test/native/metaprogramming/test_integerTypes.cpp 2017-07-14 18:05:49.542232382 +0200 @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "memory/allocation.hpp" +#include "metaprogramming/integerTypes.hpp" +#include "metaprogramming/isSame.hpp" +#include "unittest.hpp" +#include "utilities/debug.hpp" + +class IntegerTypesTest: AllStatic { + template + class TestIntegers: AllStatic { + static const bool _cts_unsigned_type_is_signed_type = IsSame::type, SignedType>::value; + STATIC_ASSERT(_cts_unsigned_type_is_signed_type); + static const bool _ctu_signed_type_is_unsigned_type = IsSame::type, UnsignedType>::value; + STATIC_ASSERT(_ctu_signed_type_is_unsigned_type); + }; + + const TestIntegers TestByte; + const TestIntegers TestShort; + const TestIntegers TestInt; + const TestIntegers TestLong; + + typedef IntegerTypes::Canonical::type CanonicalIntPtr; + typedef IntegerTypes::Canonical::type CanonicalUIntPtr; + + static const bool _cts_voidptr_is_intptrt = IsSame::type, CanonicalIntPtr>::value; + STATIC_ASSERT(_cts_voidptr_is_intptrt); + static const bool _ctu_voidptr_is_uintptrt = IsSame::type, CanonicalUIntPtr>::value; + STATIC_ASSERT(_ctu_voidptr_is_uintptrt); + + class A VALUE_OBJ_CLASS_SPEC { + intptr_t _value; + public: + operator void* () { return reinterpret_cast(_value); } + A(void* arg) : _value(reinterpret_cast(arg)) {} + }; + + static const bool _cts_cvptrcv_is_intptrt = IsSame::type, CanonicalIntPtr>::value; + STATIC_ASSERT(_cts_cvptrcv_is_intptrt); + static const bool _ctu_cvptrcv_is_uintptrt = IsSame::type, CanonicalUIntPtr>::value; + STATIC_ASSERT(_ctu_cvptrcv_is_uintptrt); + + static const bool _cts_float_is_int = IsSame::type, int32_t>::value; + STATIC_ASSERT(_cts_float_is_int); + static const bool _ctu_float_is_uint = IsSame::type, uint32_t>::value; + STATIC_ASSERT(_ctu_float_is_uint); + + static const bool _cts_double_is_long = IsSame::type, int64_t>::value; + STATIC_ASSERT(_cts_double_is_long); + static const bool _ctu_double_is_ulong = IsSame::type, uint64_t>::value; + STATIC_ASSERT(_ctu_double_is_ulong); + + static void test_cast() { + (void)IntegerTypes::cast(intptr_t(0)); + (void)IntegerTypes::cast(int32_t(0)); + (void)IntegerTypes::cast(int64_t(0)); + (void)IntegerTypes::cast(int64_t(0)); + } +}; + +TEST(IntegerTypesRTest, round_trip_int) { + int sfive = 5; + int mfive = -5; + uint ufive = 5u; + + EXPECT_EQ(sfive, IntegerTypes::cast(IntegerTypes::cast_to_signed(sfive))); + EXPECT_EQ(sfive, IntegerTypes::cast(IntegerTypes::cast_to_unsigned(sfive))); + + EXPECT_EQ(mfive, IntegerTypes::cast(IntegerTypes::cast_to_signed(mfive))); + EXPECT_EQ(mfive, IntegerTypes::cast(IntegerTypes::cast_to_unsigned(mfive))); + + EXPECT_EQ(ufive, IntegerTypes::cast(IntegerTypes::cast_to_signed(ufive))); + EXPECT_EQ(ufive, IntegerTypes::cast(IntegerTypes::cast_to_unsigned(ufive))); +} + +TEST(IntegerTypesRTest, round_trip_float) { + float ffive = 5.0f; + double dfive = 5.0; + + EXPECT_EQ(ffive, IntegerTypes::cast(IntegerTypes::cast_to_signed(ffive))); + EXPECT_EQ(ffive, IntegerTypes::cast(IntegerTypes::cast_to_unsigned(ffive))); + + EXPECT_EQ(dfive, IntegerTypes::cast(IntegerTypes::cast_to_signed(dfive))); + EXPECT_EQ(dfive, IntegerTypes::cast(IntegerTypes::cast_to_unsigned(dfive))); +} + +TEST(IntegerTypesRTest, round_trip_ptr) { + int five = 5; + int* pfive = &five; + const int* cpfive = &five; + + EXPECT_EQ(pfive, IntegerTypes::cast(IntegerTypes::cast_to_signed(pfive))); + EXPECT_EQ(pfive, IntegerTypes::cast(IntegerTypes::cast_to_unsigned(pfive))); + + EXPECT_EQ(cpfive, IntegerTypes::cast(IntegerTypes::cast_to_signed(cpfive))); + EXPECT_EQ(cpfive, IntegerTypes::cast(IntegerTypes::cast_to_unsigned(cpfive))); +} --- /dev/null 2017-03-07 11:44:12.271151064 +0100 +++ new/test/native/runtime/test_atomic.cpp 2017-07-14 18:05:50.298232355 +0200 @@ -0,0 +1,213 @@ +/* + * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "runtime/atomic.hpp" +#include "utilities/macros.hpp" +#include "unittest.hpp" + +#include + +struct AtomicShortAddTest: public testing::Test { + ATOMIC_SHORT_PAIR( + volatile int16_t _atomic, + int16_t _not_atomic + ); + + void add_max() { + Atomic::add(std::numeric_limits::max(), &_atomic); + } + + void sub_max() { + Atomic::add(std::numeric_limits::min(), &_atomic); + } + + AtomicShortAddTest() : _atomic(0), _not_atomic(0) {} +}; + +// This test tests whether the neighbouring short will be +// touched by an overflow. +TEST_VM_F(AtomicShortAddTest, test_short_add_overflow) { + EXPECT_EQ(_atomic, 0); + EXPECT_EQ(_not_atomic, 0); + add_max(); + EXPECT_EQ(_atomic, std::numeric_limits::max()); + EXPECT_EQ(_not_atomic, 0); + add_max(); + EXPECT_EQ(_not_atomic, 0); +} + +// This test tests whether the neighbouring short will be +// touched by an underflow. +TEST_VM_F(AtomicShortAddTest, test_short_add_underflow) { + EXPECT_EQ(0, _atomic); + EXPECT_EQ(_not_atomic, 0); + sub_max(); + EXPECT_EQ(_atomic, std::numeric_limits::min()); + EXPECT_EQ(_not_atomic, 0); + sub_max(); + EXPECT_EQ(_not_atomic, 0); +} + +class AtomicIntegerTest: public testing::Test { +public: + template + void test_add() { + volatile T value = 0; + T result; + const T zero = 0; + const T one = 1; + const T max = std::numeric_limits::max(); + const T min = std::numeric_limits::min(); + + EXPECT_EQ(zero, value); + result = Atomic::add(1, &value); + EXPECT_EQ(one, value); + EXPECT_EQ(result, value); + result = Atomic::add(-1, &value); + EXPECT_EQ(zero, value); + EXPECT_EQ(result, value); + result = Atomic::add(max, &value); + EXPECT_EQ(max, value); + EXPECT_EQ(result, value); + } + + template + void test_inc_dec() { + volatile T value = 0; + const T zero = 0; + const T one = 1; + const T max = std::numeric_limits::max(); + const T min = std::numeric_limits::min(); + + EXPECT_EQ(zero, value); + Atomic::inc(&value); + EXPECT_EQ(one, value); + Atomic::dec(&value); + EXPECT_EQ(zero, value); + Atomic::add(max - 1, &value); + EXPECT_EQ(max - 1, value); + Atomic::inc(&value); + EXPECT_EQ(max, value); + } + + template + void test_cas() { + const T max = std::numeric_limits::max(); + const T min = std::numeric_limits::min(); + + volatile T value = min; + + // Successful cas + T result = Atomic::cmpxchg(max, &value, min); + EXPECT_EQ(max, value); + EXPECT_EQ(min, result); + + // Unsuccessful cas + result = Atomic::cmpxchg(max, &value, min); + EXPECT_EQ(max, value); + EXPECT_EQ(max, result); + + // Another successful cas + result = Atomic::cmpxchg(min, &value, max); + EXPECT_EQ(min, value); + EXPECT_EQ(max, result); + } + + template + void test_swap() { + const T zero = 0; + const T one = 1; + const T max = std::numeric_limits::max(); + const T min = std::numeric_limits::min(); + + volatile T value = zero; + + T result = Atomic::xchg(one, &value); + EXPECT_EQ(one, value); + EXPECT_EQ(zero, result); + + value = min; + result = Atomic::xchg(max, &value); + EXPECT_EQ(max, value); + EXPECT_EQ(min, result); + + result = Atomic::xchg(min, &value); + EXPECT_EQ(min, value); + EXPECT_EQ(max, result); + } +}; + +#define GENERATE_ATOMIC_INTEGER_TEST(T) \ +TEST_VM_F(AtomicIntegerTest, atomic_add_##T) { \ + test_add(); \ + test_inc_dec(); \ + test_cas(); \ + test_swap(); \ +} + +GENERATE_ATOMIC_INTEGER_TEST(int32) +GENERATE_ATOMIC_INTEGER_TEST(intptr) +GENERATE_ATOMIC_INTEGER_TEST(uint32) +GENERATE_ATOMIC_INTEGER_TEST(uintptr) + +#undef GENERATE_ATOMIC_INTEGER_TEST + +TEST(AtomicTest, pointer_arithmetic_byte) { + const intptr_t max = std::numeric_limits::max(); + const intptr_t min = std::numeric_limits::min(); + + int8_t *volatile byte_array = NULL; + + Atomic::inc(&byte_array); + EXPECT_EQ(1, IntegerTypes::cast_to_signed(byte_array)); + int8_t* result = Atomic::add(5, &byte_array); + EXPECT_EQ(6, IntegerTypes::cast_to_signed(byte_array)); + EXPECT_EQ(6, IntegerTypes::cast_to_signed(result)); + byte_array = NULL; + result = Atomic::add(max, &byte_array); + EXPECT_EQ(max, IntegerTypes::cast_to_signed(byte_array)); + EXPECT_EQ(max, IntegerTypes::cast_to_signed(result)); + Atomic::dec(&byte_array); + EXPECT_EQ(max - 1, IntegerTypes::cast_to_signed(byte_array)); +} + +TEST(AtomicTest, pointer_arithmetic_pointer) { + const intptr_t max = std::numeric_limits::max() / sizeof(void*); + const intptr_t min = std::numeric_limits::min() / sizeof(void*); + + void **volatile pointer_array = NULL; + + Atomic::inc(&pointer_array); + EXPECT_EQ(IntegerTypes::cast_to_signed(1 * sizeof(void*)), IntegerTypes::cast_to_signed(pointer_array)); + void** result = Atomic::add(5, &pointer_array); + EXPECT_EQ(IntegerTypes::cast_to_signed(6 * sizeof(void*)), IntegerTypes::cast_to_signed(pointer_array)); + EXPECT_EQ(IntegerTypes::cast_to_signed(6 * sizeof(void*)), IntegerTypes::cast_to_signed(result)); + pointer_array = NULL; + result = Atomic::add(max, &pointer_array); + EXPECT_EQ(IntegerTypes::cast_to_signed(max * sizeof(void*)), IntegerTypes::cast_to_signed(pointer_array)); + EXPECT_EQ(IntegerTypes::cast_to_signed(max * sizeof(void*)), IntegerTypes::cast_to_signed(result)); + Atomic::dec(&pointer_array); + EXPECT_EQ(IntegerTypes::cast_to_signed((max - 1) * sizeof(void*)), IntegerTypes::cast_to_signed(pointer_array)); +}