40 41 // The following alternative implementations are needed because 42 // Windows 95 doesn't support (some of) the corresponding Windows NT 43 // calls. Furthermore, these versions allow inlining in the caller. 44 // (More precisely: The documentation for InterlockedExchange says 45 // it is supported for Windows 95. However, when single-stepping 46 // through the assembly code we cannot step into the routine and 47 // when looking at the routine address we see only garbage code. 48 // Better safe then sorry!). Was bug 7/31/98 (gri). 49 // 50 // Performance note: On uniprocessors, the 'lock' prefixes are not 51 // necessary (and expensive). We should generate separate cases if 52 // this becomes a performance problem. 53 54 #pragma warning(disable: 4035) // Disables warnings reporting missing return statement 55 56 template<size_t byte_size> 57 struct Atomic::PlatformAdd 58 : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > 59 { 60 template<typename I, typename D> 61 D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const; 62 }; 63 64 #ifdef AMD64 65 template<> 66 template<typename I, typename D> 67 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, 68 atomic_memory_order order) const { 69 return add_using_helper<int32_t>(os::atomic_add_func, add_value, dest); 70 } 71 72 template<> 73 template<typename I, typename D> 74 inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest, 75 atomic_memory_order order) const { 76 return add_using_helper<int64_t>(os::atomic_add_long_func, add_value, dest); 77 } 78 79 #define DEFINE_STUB_XCHG(ByteSize, StubType, StubName) \ 80 template<> \ 81 template<typename T> \ 82 inline T Atomic::PlatformXchg<ByteSize>::operator()(T exchange_value, \ 83 T volatile* dest, \ 84 atomic_memory_order order) const { \ 85 STATIC_ASSERT(ByteSize == sizeof(T)); \ 86 return xchg_using_helper<StubType>(StubName, exchange_value, dest); \ 87 } 88 89 DEFINE_STUB_XCHG(4, int32_t, os::atomic_xchg_func) 90 DEFINE_STUB_XCHG(8, int64_t, os::atomic_xchg_long_func) 91 92 #undef DEFINE_STUB_XCHG 93 94 #define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName) \ 95 template<> \ 96 template<typename T> \ 97 inline T Atomic::PlatformCmpxchg<ByteSize>::operator()(T exchange_value, \ 98 T volatile* dest, \ 99 T compare_value, \ 100 atomic_memory_order order) const { \ 101 STATIC_ASSERT(ByteSize == sizeof(T)); \ 102 return cmpxchg_using_helper<StubType>(StubName, exchange_value, dest, compare_value); \ 103 } 104 105 DEFINE_STUB_CMPXCHG(1, int8_t, os::atomic_cmpxchg_byte_func) 106 DEFINE_STUB_CMPXCHG(4, int32_t, os::atomic_cmpxchg_func) 107 DEFINE_STUB_CMPXCHG(8, int64_t, os::atomic_cmpxchg_long_func) 108 109 #undef DEFINE_STUB_CMPXCHG 110 111 #else // !AMD64 112 113 template<> 114 template<typename I, typename D> 115 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, 116 atomic_memory_order order) const { 117 STATIC_ASSERT(4 == sizeof(I)); 118 STATIC_ASSERT(4 == sizeof(D)); 119 __asm { 120 mov edx, dest; 121 mov eax, add_value; 122 mov ecx, eax; 123 lock xadd dword ptr [edx], eax; 124 add eax, ecx; 125 } 126 } 127 128 template<> 129 template<typename T> 130 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, 131 T volatile* dest, 132 atomic_memory_order order) const { 133 STATIC_ASSERT(4 == sizeof(T)); 134 // alternative for InterlockedExchange 135 __asm { | 40 41 // The following alternative implementations are needed because 42 // Windows 95 doesn't support (some of) the corresponding Windows NT 43 // calls. Furthermore, these versions allow inlining in the caller. 44 // (More precisely: The documentation for InterlockedExchange says 45 // it is supported for Windows 95. However, when single-stepping 46 // through the assembly code we cannot step into the routine and 47 // when looking at the routine address we see only garbage code. 48 // Better safe then sorry!). Was bug 7/31/98 (gri). 49 // 50 // Performance note: On uniprocessors, the 'lock' prefixes are not 51 // necessary (and expensive). We should generate separate cases if 52 // this becomes a performance problem. 53 54 #pragma warning(disable: 4035) // Disables warnings reporting missing return statement 55 56 template<size_t byte_size> 57 struct Atomic::PlatformAdd 58 : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > 59 { 60 template<typename D, typename I> 61 D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; 62 }; 63 64 #ifdef AMD64 65 template<> 66 template<typename D, typename I> 67 inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, 68 atomic_memory_order order) const { 69 return add_using_helper<int32_t>(os::atomic_add_func, dest, add_value); 70 } 71 72 template<> 73 template<typename D, typename I> 74 inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value, 75 atomic_memory_order order) const { 76 return add_using_helper<int64_t>(os::atomic_add_long_func, dest, add_value); 77 } 78 79 #define DEFINE_STUB_XCHG(ByteSize, StubType, StubName) \ 80 template<> \ 81 template<typename T> \ 82 inline T Atomic::PlatformXchg<ByteSize>::operator()(T exchange_value, \ 83 T volatile* dest, \ 84 atomic_memory_order order) const { \ 85 STATIC_ASSERT(ByteSize == sizeof(T)); \ 86 return xchg_using_helper<StubType>(StubName, exchange_value, dest); \ 87 } 88 89 DEFINE_STUB_XCHG(4, int32_t, os::atomic_xchg_func) 90 DEFINE_STUB_XCHG(8, int64_t, os::atomic_xchg_long_func) 91 92 #undef DEFINE_STUB_XCHG 93 94 #define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName) \ 95 template<> \ 96 template<typename T> \ 97 inline T Atomic::PlatformCmpxchg<ByteSize>::operator()(T exchange_value, \ 98 T volatile* dest, \ 99 T compare_value, \ 100 atomic_memory_order order) const { \ 101 STATIC_ASSERT(ByteSize == sizeof(T)); \ 102 return cmpxchg_using_helper<StubType>(StubName, exchange_value, dest, compare_value); \ 103 } 104 105 DEFINE_STUB_CMPXCHG(1, int8_t, os::atomic_cmpxchg_byte_func) 106 DEFINE_STUB_CMPXCHG(4, int32_t, os::atomic_cmpxchg_func) 107 DEFINE_STUB_CMPXCHG(8, int64_t, os::atomic_cmpxchg_long_func) 108 109 #undef DEFINE_STUB_CMPXCHG 110 111 #else // !AMD64 112 113 template<> 114 template<typename D, typename I> 115 inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, 116 atomic_memory_order order) const { 117 STATIC_ASSERT(4 == sizeof(I)); 118 STATIC_ASSERT(4 == sizeof(D)); 119 __asm { 120 mov edx, dest; 121 mov eax, add_value; 122 mov ecx, eax; 123 lock xadd dword ptr [edx], eax; 124 add eax, ecx; 125 } 126 } 127 128 template<> 129 template<typename T> 130 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, 131 T volatile* dest, 132 atomic_memory_order order) const { 133 STATIC_ASSERT(4 == sizeof(T)); 134 // alternative for InterlockedExchange 135 __asm { |