< prev index next >

src/os_cpu/windows_x86/vm/atomic_windows_x86.hpp

Print this page
rev 13452 : [mq]: coleen_review1
rev 13458 : imported patch cmpxchg_using_helper


  99 
 100 inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
 101   return (jint)(*os::atomic_xchg_func)(exchange_value, dest);
 102 }
 103 
 104 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
 105   return (intptr_t)(os::atomic_xchg_ptr_func)(exchange_value, dest);
 106 }
 107 
 108 inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
 109   return (void *)(os::atomic_xchg_ptr_func)((intptr_t)exchange_value, (volatile intptr_t*)dest);
 110 }
 111 
 112 #define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName)               \
 113   template<>                                                            \
 114   template<typename T>                                                  \
 115   inline T Atomic::PlatformCmpxchg<ByteSize>::operator()(T exchange_value, \
 116                                                          T volatile* dest, \
 117                                                          T compare_value, \
 118                                                          cmpxchg_memory_order order) const { \
 119     return cmpxchg_using_stub<StubType>(StubName, exchange_value, dest, compare_value); \
 120 }

 121 
 122 DEFINE_STUB_CMPXCHG(1, jbyte, os::atomic_cmpxchg_byte_func)
 123 DEFINE_STUB_CMPXCHG(4, jint,  os::atomic_cmpxchg_func)
 124 DEFINE_STUB_CMPXCHG(8, jlong, os::atomic_cmpxchg_long_func)
 125 
 126 #undef DEFINE_STUB_CMPXCHG
 127 
 128 inline jlong Atomic::load(const volatile jlong* src) { return *src; }
 129 
 130 #else // !AMD64
 131 
 132 inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
 133   __asm {
 134     mov edx, dest;
 135     mov eax, add_value;
 136     mov ecx, eax;
 137     lock xadd dword ptr [edx], eax;
 138     add eax, ecx;
 139   }
 140 }




  99 
 100 inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
 101   return (jint)(*os::atomic_xchg_func)(exchange_value, dest);
 102 }
 103 
 104 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
 105   return (intptr_t)(os::atomic_xchg_ptr_func)(exchange_value, dest);
 106 }
 107 
 108 inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
 109   return (void *)(os::atomic_xchg_ptr_func)((intptr_t)exchange_value, (volatile intptr_t*)dest);
 110 }
 111 
 112 #define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName)               \
 113   template<>                                                            \
 114   template<typename T>                                                  \
 115   inline T Atomic::PlatformCmpxchg<ByteSize>::operator()(T exchange_value, \
 116                                                          T volatile* dest, \
 117                                                          T compare_value, \
 118                                                          cmpxchg_memory_order order) const { \
 119     STATIC_ASSERT(ByteSize == sizeof(T));                               \
 120     return cmpxchg_using_helper<StubType>(StubName, exchange_value, dest, compare_value); \
 121   }
 122 
 123 DEFINE_STUB_CMPXCHG(1, jbyte, os::atomic_cmpxchg_byte_func)
 124 DEFINE_STUB_CMPXCHG(4, jint,  os::atomic_cmpxchg_func)
 125 DEFINE_STUB_CMPXCHG(8, jlong, os::atomic_cmpxchg_long_func)
 126 
 127 #undef DEFINE_STUB_CMPXCHG
 128 
 129 inline jlong Atomic::load(const volatile jlong* src) { return *src; }
 130 
 131 #else // !AMD64
 132 
 133 inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
 134   __asm {
 135     mov edx, dest;
 136     mov eax, add_value;
 137     mov ecx, eax;
 138     lock xadd dword ptr [edx], eax;
 139     add eax, ecx;
 140   }
 141 }


< prev index next >