< prev index next >

src/share/vm/runtime/atomic.cpp

Print this page
rev 10933 : 8154736: enhancement of cmpxchg and copy_to_survivor for ppc64
Reviewed-by:
Contributed-by: HORII@jp.ibm.com, mdoerr


  40   jbyte* cur_as_bytes = (jbyte*)(&cur);
  41   jint new_val = cur;
  42   jbyte* new_val_as_bytes = (jbyte*)(&new_val);
  43   new_val_as_bytes[offset] = exchange_value;
  44   while (cur_as_bytes[offset] == compare_value) {
  45     jint res = cmpxchg(new_val, dest_int, cur);
  46     if (res == cur) break;
  47     cur = res;
  48     new_val = cur;
  49     new_val_as_bytes[offset] = exchange_value;
  50   }
  51   return cur_as_bytes[offset];
  52 }
  53 
  54 unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) {
  55   assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
  56   return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest);
  57 }
  58 
  59 unsigned Atomic::cmpxchg(unsigned int exchange_value,
  60                          volatile unsigned int* dest, unsigned int compare_value) {

  61   assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
  62   return (unsigned int)Atomic::cmpxchg((jint)exchange_value, (volatile jint*)dest,
  63                                        (jint)compare_value);
  64 }
  65 
  66 jlong Atomic::add(jlong    add_value, volatile jlong*    dest) {
  67   jlong old = load(dest);
  68   jlong new_value = old + add_value;
  69   while (old != cmpxchg(new_value, dest, old)) {
  70     old = load(dest);
  71     new_value = old + add_value;
  72   }
  73   return old;
  74 }
  75 
  76 void Atomic::inc(volatile short* dest) {
  77   // Most platforms do not support atomic increment on a 2-byte value. However,
  78   // if the value occupies the most significant 16 bits of an aligned 32-bit
  79   // word, then we can do this with an atomic add of 0x10000 to the 32-bit word.
  80   //
  81   // The least significant parts of this 32-bit word will never be affected, even
  82   // in case of overflow/underflow.
  83   //


  40   jbyte* cur_as_bytes = (jbyte*)(&cur);
  41   jint new_val = cur;
  42   jbyte* new_val_as_bytes = (jbyte*)(&new_val);
  43   new_val_as_bytes[offset] = exchange_value;
  44   while (cur_as_bytes[offset] == compare_value) {
  45     jint res = cmpxchg(new_val, dest_int, cur);
  46     if (res == cur) break;
  47     cur = res;
  48     new_val = cur;
  49     new_val_as_bytes[offset] = exchange_value;
  50   }
  51   return cur_as_bytes[offset];
  52 }
  53 
  54 unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) {
  55   assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
  56   return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest);
  57 }
  58 
  59 unsigned Atomic::cmpxchg(unsigned int exchange_value,
  60                            volatile unsigned int* dest, unsigned int compare_value,
  61                            memory_order order) {
  62   assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
  63   return (unsigned int)Atomic::cmpxchg((jint)exchange_value, (volatile jint*)dest,
  64                                        (jint)compare_value, order);
  65 }
  66 
  67 jlong Atomic::add(jlong    add_value, volatile jlong*    dest) {
  68   jlong old = load(dest);
  69   jlong new_value = old + add_value;
  70   while (old != cmpxchg(new_value, dest, old)) {
  71     old = load(dest);
  72     new_value = old + add_value;
  73   }
  74   return old;
  75 }
  76 
  77 void Atomic::inc(volatile short* dest) {
  78   // Most platforms do not support atomic increment on a 2-byte value. However,
  79   // if the value occupies the most significant 16 bits of an aligned 32-bit
  80   // word, then we can do this with an atomic add of 0x10000 to the 32-bit word.
  81   //
  82   // The least significant parts of this 32-bit word will never be affected, even
  83   // in case of overflow/underflow.
  84   //
< prev index next >