< prev index next >

src/os_cpu/windows_x86/vm/atomic_windows_x86.hpp

Print this page
rev 13491 : imported patch add_windows_x86


  40 // necessary (and expensive). We should generate separate cases if
  41 // this becomes a performance problem.
  42 
  43 #pragma warning(disable: 4035) // Disables warnings reporting missing return statement
  44 
  45 inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
  46 inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
  47 inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
  48 
  49 inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
  50 inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
  51 
  52 inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
  53 inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
  54 inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
  55 
  56 
  57 inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
  58 inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
  59 








  60 #ifdef AMD64
  61 inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
  62 inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
  63 
  64 inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
  65   return (jint)(*os::atomic_add_func)(add_value, dest);
  66 }
  67 
  68 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
  69   return (intptr_t)(*os::atomic_add_ptr_func)(add_value, dest);
  70 }
  71 
  72 inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
  73   return (void*)(*os::atomic_add_ptr_func)(add_value, (volatile intptr_t*)dest);


  74 }
  75 
  76 inline void Atomic::inc    (volatile jint*     dest) {
  77   (void)add    (1, dest);
  78 }
  79 
  80 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
  81   (void)add_ptr(1, dest);
  82 }
  83 
  84 inline void Atomic::inc_ptr(volatile void*     dest) {
  85   (void)add_ptr(1, dest);
  86 }
  87 
  88 inline void Atomic::dec    (volatile jint*     dest) {
  89   (void)add    (-1, dest);
  90 }
  91 
  92 inline void Atomic::dec_ptr(volatile intptr_t* dest) {
  93   (void)add_ptr(-1, dest);


 113   template<>                                                            \
 114   template<typename T>                                                  \
 115   inline T Atomic::PlatformCmpxchg<ByteSize>::operator()(T exchange_value, \
 116                                                          T volatile* dest, \
 117                                                          T compare_value, \
 118                                                          cmpxchg_memory_order order) const { \
 119     STATIC_ASSERT(ByteSize == sizeof(T));                               \
 120     return cmpxchg_using_helper<StubType>(StubName, exchange_value, dest, compare_value); \
 121   }
 122 
 123 DEFINE_STUB_CMPXCHG(1, jbyte, os::atomic_cmpxchg_byte_func)
 124 DEFINE_STUB_CMPXCHG(4, jint,  os::atomic_cmpxchg_func)
 125 DEFINE_STUB_CMPXCHG(8, jlong, os::atomic_cmpxchg_long_func)
 126 
 127 #undef DEFINE_STUB_CMPXCHG
 128 
 129 inline jlong Atomic::load(const volatile jlong* src) { return *src; }
 130 
 131 #else // !AMD64
 132 
 133 inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {




 134   __asm {
 135     mov edx, dest;
 136     mov eax, add_value;
 137     mov ecx, eax;
 138     lock xadd dword ptr [edx], eax;
 139     add eax, ecx;
 140   }
 141 }
 142 
 143 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
 144   return (intptr_t)add((jint)add_value, (volatile jint*)dest);
 145 }
 146 
 147 inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
 148   return (void*)add((jint)add_value, (volatile jint*)dest);
 149 }
 150 
 151 inline void Atomic::inc    (volatile jint*     dest) {
 152   // alternative for InterlockedIncrement
 153   __asm {
 154     mov edx, dest;
 155     lock add dword ptr [edx], 1;
 156   }
 157 }
 158 
 159 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
 160   inc((volatile jint*)dest);
 161 }
 162 
 163 inline void Atomic::inc_ptr(volatile void*     dest) {
 164   inc((volatile jint*)dest);
 165 }
 166 
 167 inline void Atomic::dec    (volatile jint*     dest) {
 168   // alternative for InterlockedDecrement




  40 // necessary (and expensive). We should generate separate cases if
  41 // this becomes a performance problem.
  42 
  43 #pragma warning(disable: 4035) // Disables warnings reporting missing return statement
  44 
  45 inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
  46 inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
  47 inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
  48 
  49 inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
  50 inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
  51 
  52 inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
  53 inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
  54 inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
  55 
  56 
  57 inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
  58 inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
  59 
  60 template<size_t byte_size>
  61 struct Atomic::PlatformAdd
  62   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
  63 {
  64   template<typename I, typename D>
  65   D add_and_fetch(I add_value, D volatile* dest) const;
  66 };
  67 
  68 #ifdef AMD64
  69 inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
  70 inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
  71 
  72 template<>
  73 template<typename I, typename D>
  74 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
  75   return add_using_helper<jint>(os::atomic_add_func, add_value, dest);


  76 }
  77 
  78 template<>
  79 template<typename I, typename D>
  80 inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
  81   return add_using_helper<intptr_t>(os::atomic_add_ptr_func, add_value, dest);
  82 }
  83 
  84 inline void Atomic::inc    (volatile jint*     dest) {
  85   (void)add    (1, dest);
  86 }
  87 
  88 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
  89   (void)add_ptr(1, dest);
  90 }
  91 
  92 inline void Atomic::inc_ptr(volatile void*     dest) {
  93   (void)add_ptr(1, dest);
  94 }
  95 
  96 inline void Atomic::dec    (volatile jint*     dest) {
  97   (void)add    (-1, dest);
  98 }
  99 
 100 inline void Atomic::dec_ptr(volatile intptr_t* dest) {
 101   (void)add_ptr(-1, dest);


 121   template<>                                                            \
 122   template<typename T>                                                  \
 123   inline T Atomic::PlatformCmpxchg<ByteSize>::operator()(T exchange_value, \
 124                                                          T volatile* dest, \
 125                                                          T compare_value, \
 126                                                          cmpxchg_memory_order order) const { \
 127     STATIC_ASSERT(ByteSize == sizeof(T));                               \
 128     return cmpxchg_using_helper<StubType>(StubName, exchange_value, dest, compare_value); \
 129   }
 130 
 131 DEFINE_STUB_CMPXCHG(1, jbyte, os::atomic_cmpxchg_byte_func)
 132 DEFINE_STUB_CMPXCHG(4, jint,  os::atomic_cmpxchg_func)
 133 DEFINE_STUB_CMPXCHG(8, jlong, os::atomic_cmpxchg_long_func)
 134 
 135 #undef DEFINE_STUB_CMPXCHG
 136 
 137 inline jlong Atomic::load(const volatile jlong* src) { return *src; }
 138 
 139 #else // !AMD64
 140 
 141 template<>
 142 template<typename I, typename D>
 143 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
 144   STATIC_ASSERT(4 == sizeof(I));
 145   STATIC_ASSERT(4 == sizeof(D));
 146   __asm {
 147     mov edx, dest;
 148     mov eax, add_value;
 149     mov ecx, eax;
 150     lock xadd dword ptr [edx], eax;
 151     add eax, ecx;
 152   }








 153 }
 154 
 155 inline void Atomic::inc    (volatile jint*     dest) {
 156   // alternative for InterlockedIncrement
 157   __asm {
 158     mov edx, dest;
 159     lock add dword ptr [edx], 1;
 160   }
 161 }
 162 
 163 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
 164   inc((volatile jint*)dest);
 165 }
 166 
 167 inline void Atomic::inc_ptr(volatile void*     dest) {
 168   inc((volatile jint*)dest);
 169 }
 170 
 171 inline void Atomic::dec    (volatile jint*     dest) {
 172   // alternative for InterlockedDecrement


< prev index next >