< prev index next >

src/share/vm/runtime/atomic.hpp

Print this page




 133 
 134 inline size_t Atomic::add(size_t add_value, volatile size_t* dest) {
 135   return (size_t) add_ptr((intptr_t) add_value, (volatile intptr_t*) dest);
 136 }
 137 
 138 inline void Atomic::inc(volatile size_t* dest) {
 139   inc_ptr((volatile intptr_t*) dest);
 140 }
 141 
 142 inline void Atomic::dec(volatile size_t* dest) {
 143   dec_ptr((volatile intptr_t*) dest);
 144 }
 145 
 146 #ifndef VM_HAS_SPECIALIZED_CMPXCHG_BYTE
 147 /*
 148  * This is the default implementation of byte-sized cmpxchg. It emulates jbyte-sized cmpxchg
 149  * in terms of jint-sized cmpxchg. Platforms may override this by defining their own inline definition
 150  * as well as defining VM_HAS_SPECIALIZED_CMPXCHG_BYTE. This will cause the platform specific
 151  * implementation to be used instead.
 152  */
 153 inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte *dest, jbyte comparand, cmpxchg_memory_order order)
 154 {
 155   assert(sizeof(jbyte) == 1, "assumption.");
 156   uintptr_t dest_addr = (uintptr_t)dest;
 157   uintptr_t offset = dest_addr % sizeof(jint);
 158   volatile jint* dest_int = (volatile jint*)(dest_addr - offset);
 159   jint cur = *dest_int;
 160   jbyte* cur_as_bytes = (jbyte*)(&cur);
 161   jint new_val = cur;
 162   jbyte* new_val_as_bytes = (jbyte*)(&new_val);
 163   new_val_as_bytes[offset] = exchange_value;
 164   while (cur_as_bytes[offset] == comparand) {
 165     jint res = cmpxchg(new_val, dest_int, cur, order);
 166     if (res == cur) break;












 167     cur = res;
 168     new_val = cur;
 169     new_val_as_bytes[offset] = exchange_value;
 170   }
 171   return cur_as_bytes[offset];
 172 }

 173 #endif // VM_HAS_SPECIALIZED_CMPXCHG_BYTE
 174 
 175 inline unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) {
 176   assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
 177   return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest);
 178 }
 179 
 180 inline unsigned Atomic::cmpxchg(unsigned int exchange_value,
 181                          volatile unsigned int* dest, unsigned int compare_value,
 182                          cmpxchg_memory_order order) {
 183   assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
 184   return (unsigned int)Atomic::cmpxchg((jint)exchange_value, (volatile jint*)dest,
 185                                        (jint)compare_value, order);
 186 }
 187 
 188 inline jlong Atomic::add(jlong    add_value, volatile jlong*    dest) {
 189   jlong old = load(dest);
 190   jlong new_value = old + add_value;
 191   while (old != cmpxchg(new_value, dest, old)) {
 192     old = load(dest);




 133 
 134 inline size_t Atomic::add(size_t add_value, volatile size_t* dest) {
 135   return (size_t) add_ptr((intptr_t) add_value, (volatile intptr_t*) dest);
 136 }
 137 
 138 inline void Atomic::inc(volatile size_t* dest) {
 139   inc_ptr((volatile intptr_t*) dest);
 140 }
 141 
 142 inline void Atomic::dec(volatile size_t* dest) {
 143   dec_ptr((volatile intptr_t*) dest);
 144 }
 145 
 146 #ifndef VM_HAS_SPECIALIZED_CMPXCHG_BYTE
 147 /*
 148  * This is the default implementation of byte-sized cmpxchg. It emulates jbyte-sized cmpxchg
 149  * in terms of jint-sized cmpxchg. Platforms may override this by defining their own inline definition
 150  * as well as defining VM_HAS_SPECIALIZED_CMPXCHG_BYTE. This will cause the platform specific
 151  * implementation to be used instead.
 152  */
 153 inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, 
 154                              jbyte compare_value, cmpxchg_memory_order order) {
 155   STATIC_ASSERT(sizeof(jbyte) == 1);
 156   volatile jint* dest_int =
 157       static_cast<volatile jint*>(align_ptr_down(dest, sizeof(jint)));
 158   size_t offset = pointer_delta(dest, dest_int, 1);
 159   jint cur = *dest_int;
 160   jbyte* cur_as_bytes = reinterpret_cast<jbyte*>(&cur);
 161 
 162   // current value may not be what we are looking for, so force it
 163   // to that value so the initial cmpxchg will fail if it is different
 164   cur_as_bytes[offset] = compare_value;
 165 
 166   // always execute a real cmpxchg so that we get the required memory
 167   // barriers even on initial failure
 168   do {
 169     // value to swap in matches current value ...
 170     jint new_value = cur;
 171     // ... except for the one jbyte we want to update
 172     reinterpret_cast<jbyte*>(&new_value)[offset] = exchange_value;
 173 
 174     jint res = cmpxchg(new_value, dest_int, cur, order);
 175     if (res == cur) break; // success
 176 
 177     // at least one jbyte in the jint changed value, so update
 178     // our view of the current jint
 179     cur = res;
 180     // if our jbyte is still as cur we loop and try again
 181   } while (cur_as_bytes[offset] == compare_value);
 182 
 183   return cur_as_bytes[offset];
 184 } 
 185 
 186 #endif // VM_HAS_SPECIALIZED_CMPXCHG_BYTE
 187 
 188 inline unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) {
 189   assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
 190   return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest);
 191 }
 192 
 193 inline unsigned Atomic::cmpxchg(unsigned int exchange_value,
 194                          volatile unsigned int* dest, unsigned int compare_value,
 195                          cmpxchg_memory_order order) {
 196   assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
 197   return (unsigned int)Atomic::cmpxchg((jint)exchange_value, (volatile jint*)dest,
 198                                        (jint)compare_value, order);
 199 }
 200 
 201 inline jlong Atomic::add(jlong    add_value, volatile jlong*    dest) {
 202   jlong old = load(dest);
 203   jlong new_value = old + add_value;
 204   while (old != cmpxchg(new_value, dest, old)) {
 205     old = load(dest);


< prev index next >