< prev index next >

src/os_cpu/windows_x86/vm/atomic_windows_x86.hpp

Print this page
rev 13266 : imported patch Atomic_refactoring

*** 40,234 **** // necessary (and expensive). We should generate separate cases if // this becomes a performance problem. #pragma warning(disable: 4035) // Disables warnings reporting missing return statement - inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; } - inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; } - inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; } - - inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; } - inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; } - - inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; } - inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; } - inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; } - - - inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; } - inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; } - #ifdef AMD64 - inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } - inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } ! inline jint Atomic::add (jint add_value, volatile jint* dest) { ! return (jint)(*os::atomic_add_func)(add_value, dest); } ! inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { return (intptr_t)(*os::atomic_add_ptr_func)(add_value, dest); } ! inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { ! return (void*)(*os::atomic_add_ptr_func)(add_value, (volatile intptr_t*)dest); ! } ! ! inline void Atomic::inc (volatile jint* dest) { ! (void)add (1, dest); ! } ! ! inline void Atomic::inc_ptr(volatile intptr_t* dest) { ! (void)add_ptr(1, dest); ! } ! ! inline void Atomic::inc_ptr(volatile void* dest) { ! (void)add_ptr(1, dest); ! } ! ! inline void Atomic::dec (volatile jint* dest) { ! (void)add (-1, dest); ! } ! ! inline void Atomic::dec_ptr(volatile intptr_t* dest) { ! (void)add_ptr(-1, dest); ! } ! ! inline void Atomic::dec_ptr(volatile void* dest) { ! (void)add_ptr(-1, dest); ! } ! ! inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { ! return (jint)(*os::atomic_xchg_func)(exchange_value, dest); } ! inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { return (intptr_t)(os::atomic_xchg_ptr_func)(exchange_value, dest); } ! inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { ! return (void *)(os::atomic_xchg_ptr_func)((intptr_t)exchange_value, (volatile intptr_t*)dest); ! } ! ! inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) { return (*os::atomic_cmpxchg_func)(exchange_value, dest, compare_value); } #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE ! inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) { return (*os::atomic_cmpxchg_byte_func)(exchange_value, dest, compare_value); } ! inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) { return (*os::atomic_cmpxchg_long_func)(exchange_value, dest, compare_value); } - inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) { - return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order); - } - - inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) { - return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order); - } - - inline jlong Atomic::load(const volatile jlong* src) { return *src; } - #else // !AMD64 ! inline jint Atomic::add (jint add_value, volatile jint* dest) { __asm { mov edx, dest; mov eax, add_value; mov ecx, eax; lock xadd dword ptr [edx], eax; add eax, ecx; } } ! inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { ! return (intptr_t)add((jint)add_value, (volatile jint*)dest); ! } ! ! inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { ! return (void*)add((jint)add_value, (volatile jint*)dest); ! } ! ! inline void Atomic::inc (volatile jint* dest) { // alternative for InterlockedIncrement __asm { mov edx, dest; lock add dword ptr [edx], 1; } } ! inline void Atomic::inc_ptr(volatile intptr_t* dest) { ! inc((volatile jint*)dest); ! } ! ! inline void Atomic::inc_ptr(volatile void* dest) { ! inc((volatile jint*)dest); ! } ! ! inline void Atomic::dec (volatile jint* dest) { // alternative for InterlockedDecrement __asm { mov edx, dest; lock sub dword ptr [edx], 1; } } ! inline void Atomic::dec_ptr(volatile intptr_t* dest) { ! dec((volatile jint*)dest); ! } ! ! inline void Atomic::dec_ptr(volatile void* dest) { ! dec((volatile jint*)dest); ! } ! ! inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { // alternative for InterlockedExchange __asm { mov eax, exchange_value; mov ecx, dest; xchg eax, dword ptr [ecx]; } } - inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { - return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest); - } - - inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { - return (void*)xchg((jint)exchange_value, (volatile jint*)dest); - } - #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE ! inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) { // alternative for InterlockedCompareExchange __asm { mov edx, dest mov cl, exchange_value mov al, compare_value lock cmpxchg byte ptr [edx], cl } } ! inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) { // alternative for InterlockedCompareExchange __asm { mov edx, dest mov ecx, exchange_value mov eax, compare_value lock cmpxchg dword ptr [edx], ecx } } ! inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) { ! jint ex_lo = (jint)exchange_value; ! jint ex_hi = *( ((jint*)&exchange_value) + 1 ); ! jint cmp_lo = (jint)compare_value; ! jint cmp_hi = *( ((jint*)&compare_value) + 1 ); __asm { push ebx push edi mov eax, cmp_lo mov edx, cmp_hi --- 40,157 ---- // necessary (and expensive). We should generate separate cases if // this becomes a performance problem. #pragma warning(disable: 4035) // Disables warnings reporting missing return statement #ifdef AMD64 ! template <> ! inline int32_t Atomic::specialized_add<int32_t>(int32_t add_value, volatile int32_t* dest) { ! return (int32_t)(*os::atomic_add_func)(add_value, dest); } ! template <> ! inline int64_t Atomic::specialized_add<int64_t>(int64_t add_value, volatile int64_t* dest) { return (intptr_t)(*os::atomic_add_ptr_func)(add_value, dest); } ! template <> ! inline int32_t Atomic::specialized_xchg<int32_t>(int32_t exchange_value, volatile int32_t* dest) { ! return (int32_t)(*os::atomic_xchg_func)(exchange_value, dest); } ! template <> ! inline int64_t Atomic::specialized_xchg<int64_t>(int64_t exchange_value, volatile int64_t* dest) { return (intptr_t)(os::atomic_xchg_ptr_func)(exchange_value, dest); } ! template <> ! inline int32_t Atomic::specialized_cmpxchg<int32_t>(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value, cmpxchg_memory_order order) { return (*os::atomic_cmpxchg_func)(exchange_value, dest, compare_value); } #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE ! template <> ! inline int8_t Atomic::specialized_cmpxchg<int8_t>(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value, cmpxchg_memory_order order) { return (*os::atomic_cmpxchg_byte_func)(exchange_value, dest, compare_value); } ! template <> ! inline int64_t Atomic::specialized_cmpxchg<int64_t>(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) { return (*os::atomic_cmpxchg_long_func)(exchange_value, dest, compare_value); } #else // !AMD64 ! template <> ! inline int32_t Atomic::specialized_add<int32_t>(int32_t add_value, volatile int32_t* dest) { __asm { mov edx, dest; mov eax, add_value; mov ecx, eax; lock xadd dword ptr [edx], eax; add eax, ecx; } } ! template <> ! inline void Atomic::specialized_inc<int32_t>(volatile int32_t* dest) { // alternative for InterlockedIncrement __asm { mov edx, dest; lock add dword ptr [edx], 1; } } ! template <> ! inline void Atomic::specialized_dec<int32_t>(volatile int32_t* dest) { // alternative for InterlockedDecrement __asm { mov edx, dest; lock sub dword ptr [edx], 1; } } ! template <> ! inline int32_t Atomic::specialized_xchg<int32_t>(int32_t exchange_value, volatile int32_t* dest) { // alternative for InterlockedExchange __asm { mov eax, exchange_value; mov ecx, dest; xchg eax, dword ptr [ecx]; } } #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE ! template <> ! inline int8_t Atomic::specialized_cmpxchg<int8_t>(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value, cmpxchg_memory_order order) { // alternative for InterlockedCompareExchange __asm { mov edx, dest mov cl, exchange_value mov al, compare_value lock cmpxchg byte ptr [edx], cl } } ! template <> ! inline int32_t Atomic::specialized_cmpxchg<int32_t>(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value, cmpxchg_memory_order order) { // alternative for InterlockedCompareExchange __asm { mov edx, dest mov ecx, exchange_value mov eax, compare_value lock cmpxchg dword ptr [edx], ecx } } ! template <> ! inline int64_t Atomic::specialized_cmpxchg<int64_t>(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) { ! int32_t ex_lo = (int32_t)exchange_value; ! int32_t ex_hi = *( ((int32_t*)&exchange_value) + 1 ); ! int32_t cmp_lo = (int32_t)compare_value; ! int32_t cmp_hi = *( ((int32_t*)&compare_value) + 1 ); __asm { push ebx push edi mov eax, cmp_lo mov edx, cmp_hi
*** 239,282 **** pop edi pop ebx } } ! inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) { ! return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order); ! } ! ! inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) { ! return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order); ! } ! ! inline jlong Atomic::load(const volatile jlong* src) { ! volatile jlong dest; ! volatile jlong* pdest = &dest; __asm { mov eax, src fild qword ptr [eax] mov eax, pdest fistp qword ptr [eax] } return dest; } ! inline void Atomic::store(jlong store_value, volatile jlong* dest) { ! volatile jlong* src = &store_value; __asm { mov eax, src fild qword ptr [eax] mov eax, dest fistp qword ptr [eax] } } - inline void Atomic::store(jlong store_value, jlong* dest) { - Atomic::store(store_value, (volatile jlong*)dest); - } - #endif // AMD64 #pragma warning(default: 4035) // Enables warnings reporting missing return statement #endif // OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP --- 162,195 ---- pop edi pop ebx } } ! template <> ! inline int64_t Atomic::specialized_load<int64_t>(const volatile int64_t* src) { ! volatile int64_t dest; ! volatile int64_t* pdest = &dest; __asm { mov eax, src fild qword ptr [eax] mov eax, pdest fistp qword ptr [eax] } return dest; } ! template <> ! inline void Atomic::specialized_store<int64_t>(int64_t store_value, volatile int64_t* dest) { ! volatile int64_t* src = &store_value; __asm { mov eax, src fild qword ptr [eax] mov eax, dest fistp qword ptr [eax] } } #endif // AMD64 #pragma warning(default: 4035) // Enables warnings reporting missing return statement #endif // OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP
< prev index next >