< prev index next >

src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp

Print this page
rev 47321 : [mq]: Atomic_loadstore

@@ -40,37 +40,19 @@
 // necessary (and expensive). We should generate separate cases if
 // this becomes a performance problem.
 
 #pragma warning(disable: 4035) // Disables warnings reporting missing return statement
 
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-
-
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
-
 template<size_t byte_size>
 struct Atomic::PlatformAdd
   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
 {
   template<typename I, typename D>
   D add_and_fetch(I add_value, D volatile* dest) const;
 };
 
 #ifdef AMD64
-inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
-
 template<>
 template<typename I, typename D>
 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
   return add_using_helper<jint>(os::atomic_add_func, add_value, dest);
 }

@@ -110,12 +92,10 @@
 DEFINE_STUB_CMPXCHG(4, jint,  os::atomic_cmpxchg_func)
 DEFINE_STUB_CMPXCHG(8, jlong, os::atomic_cmpxchg_long_func)
 
 #undef DEFINE_STUB_CMPXCHG
 
-inline jlong Atomic::load(const volatile jlong* src) { return *src; }
-
 #else // !AMD64
 
 template<>
 template<typename I, typename D>
 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {

@@ -198,36 +178,39 @@
     pop edi
     pop ebx
   }
 }
 
-inline jlong Atomic::load(const volatile jlong* src) {
-  volatile jlong dest;
-  volatile jlong* pdest = &dest;
+template<>
+template<typename T>
+inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
+  STATIC_ASSERT(8 == sizeof(T));
+  volatile T dest;
+  volatile T* pdest = &dest;
   __asm {
     mov eax, src
     fild     qword ptr [eax]
     mov eax, pdest
     fistp    qword ptr [eax]
   }
   return dest;
 }
 
-inline void Atomic::store(jlong store_value, volatile jlong* dest) {
-  volatile jlong* src = &store_value;
+template<>
+template<typename T>
+inline void Atomic::PlatformStore<8>::operator()(T store_value,
+                                                 T volatile* dest) const {
+  STATIC_ASSERT(8 == sizeof(T));
+  volatile T* src = &store_value;
   __asm {
     mov eax, src
     fild     qword ptr [eax]
     mov eax, dest
     fistp    qword ptr [eax]
   }
 }
 
-inline void Atomic::store(jlong store_value, jlong* dest) {
-  Atomic::store(store_value, (volatile jlong*)dest);
-}
-
 #endif // AMD64
 
 #pragma warning(default: 4035) // Enables warnings reporting missing return statement
 
 #endif // OS_CPU_WINDOWS_X86_VM_ATOMIC_WINDOWS_X86_HPP
< prev index next >