< prev index next >

src/share/vm/runtime/atomic.hpp

Print this page


   1 /*
   2  * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


  64   inline static void store_ptr(void*    store_value, void*     dest);
  65 
  66   inline static void store    (jbyte    store_value, volatile jbyte*    dest);
  67   inline static void store    (jshort   store_value, volatile jshort*   dest);
  68   inline static void store    (jint     store_value, volatile jint*     dest);
  69   // See comment above about using jlong atomics on 32-bit platforms
  70   inline static void store    (jlong    store_value, volatile jlong*    dest);
  71   inline static void store_ptr(intptr_t store_value, volatile intptr_t* dest);
  72   inline static void store_ptr(void*    store_value, volatile void*     dest);
  73 
  74   // See comment above about using jlong atomics on 32-bit platforms
  75   inline static jlong load(volatile jlong* src);
  76 
  77   // Atomically add to a location. Returns updated value. add*() provide:
  78   // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
  79   inline static jshort   add    (jshort   add_value, volatile jshort*   dest);
  80   inline static jint     add    (jint     add_value, volatile jint*     dest);
  81   inline static size_t   add    (size_t   add_value, volatile size_t*   dest);
  82   inline static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest);
  83   inline static void*    add_ptr(intptr_t add_value, volatile void*     dest);
  84   // See comment above about using jlong atomics on 32-bit platforms
  85   inline static jlong    add    (jlong    add_value, volatile jlong*    dest);
  86 
  87   // Atomically increment location. inc*() provide:
  88   // <fence> increment-dest <membar StoreLoad|StoreStore>
  89   inline static void inc    (volatile jint*     dest);
  90   inline static void inc    (volatile jshort*   dest);
  91   inline static void inc    (volatile size_t*   dest);
  92   inline static void inc_ptr(volatile intptr_t* dest);
  93   inline static void inc_ptr(volatile void*     dest);
  94 
  95   // Atomically decrement a location. dec*() provide:
  96   // <fence> decrement-dest <membar StoreLoad|StoreStore>
  97   inline static void dec    (volatile jint*     dest);
  98   inline static void dec    (volatile jshort*   dest);
  99   inline static void dec    (volatile size_t*   dest);
 100   inline static void dec_ptr(volatile intptr_t* dest);
 101   inline static void dec_ptr(volatile void*     dest);
 102 
 103   // Performs atomic exchange of *dest with exchange_value. Returns old
 104   // prior value of *dest. xchg*() provide:
 105   // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>


 180     cur = res;
 181     // if our jbyte is still as cur we loop and try again
 182   } while (cur_as_bytes[offset] == compare_value);
 183 
 184   return cur_as_bytes[offset];
 185 }
 186 
 187 #endif // VM_HAS_SPECIALIZED_CMPXCHG_BYTE
 188 
 189 inline unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) {
 190   assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
 191   return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest);
 192 }
 193 
 194 inline unsigned Atomic::cmpxchg(unsigned int exchange_value,
 195                          volatile unsigned int* dest, unsigned int compare_value,
 196                          cmpxchg_memory_order order) {
 197   assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
 198   return (unsigned int)Atomic::cmpxchg((jint)exchange_value, (volatile jint*)dest,
 199                                        (jint)compare_value, order);
 200 }
 201 
 202 inline jlong Atomic::add(jlong    add_value, volatile jlong*    dest) {
 203   jlong old = load(dest);
 204   jlong new_value = old + add_value;
 205   while (old != cmpxchg(new_value, dest, old)) {
 206     old = load(dest);
 207     new_value = old + add_value;
 208   }
 209   return old;
 210 }
 211 
 212 inline jshort Atomic::add(jshort add_value, volatile jshort* dest) {
 213   // Most platforms do not support atomic add on a 2-byte value. However,
 214   // if the value occupies the most significant 16 bits of an aligned 32-bit
 215   // word, then we can do this with an atomic add of (add_value << 16)
 216   // to the 32-bit word.
 217   //
 218   // The least significant parts of this 32-bit word will never be affected, even
 219   // in case of overflow/underflow.
 220   //
 221   // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
 222 #ifdef VM_LITTLE_ENDIAN
 223   assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
 224   jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest-1));
 225 #else
 226   assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
 227   jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest));
 228 #endif
 229   return (jshort)(new_value >> 16); // preserves sign
   1 /*
   2  * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


  64   inline static void store_ptr(void*    store_value, void*     dest);
  65 
  66   inline static void store    (jbyte    store_value, volatile jbyte*    dest);
  67   inline static void store    (jshort   store_value, volatile jshort*   dest);
  68   inline static void store    (jint     store_value, volatile jint*     dest);
  69   // See comment above about using jlong atomics on 32-bit platforms
  70   inline static void store    (jlong    store_value, volatile jlong*    dest);
  71   inline static void store_ptr(intptr_t store_value, volatile intptr_t* dest);
  72   inline static void store_ptr(void*    store_value, volatile void*     dest);
  73 
  74   // See comment above about using jlong atomics on 32-bit platforms
  75   inline static jlong load(volatile jlong* src);
  76 
  77   // Atomically add to a location. Returns updated value. add*() provide:
  78   // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
  79   inline static jshort   add    (jshort   add_value, volatile jshort*   dest);
  80   inline static jint     add    (jint     add_value, volatile jint*     dest);
  81   inline static size_t   add    (size_t   add_value, volatile size_t*   dest);
  82   inline static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest);
  83   inline static void*    add_ptr(intptr_t add_value, volatile void*     dest);


  84 
  85   // Atomically increment location. inc*() provide:
  86   // <fence> increment-dest <membar StoreLoad|StoreStore>
  87   inline static void inc    (volatile jint*     dest);
  88   inline static void inc    (volatile jshort*   dest);
  89   inline static void inc    (volatile size_t*   dest);
  90   inline static void inc_ptr(volatile intptr_t* dest);
  91   inline static void inc_ptr(volatile void*     dest);
  92 
  93   // Atomically decrement a location. dec*() provide:
  94   // <fence> decrement-dest <membar StoreLoad|StoreStore>
  95   inline static void dec    (volatile jint*     dest);
  96   inline static void dec    (volatile jshort*   dest);
  97   inline static void dec    (volatile size_t*   dest);
  98   inline static void dec_ptr(volatile intptr_t* dest);
  99   inline static void dec_ptr(volatile void*     dest);
 100 
 101   // Performs atomic exchange of *dest with exchange_value. Returns old
 102   // prior value of *dest. xchg*() provide:
 103   // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>


 178     cur = res;
 179     // if our jbyte is still as cur we loop and try again
 180   } while (cur_as_bytes[offset] == compare_value);
 181 
 182   return cur_as_bytes[offset];
 183 }
 184 
 185 #endif // VM_HAS_SPECIALIZED_CMPXCHG_BYTE
 186 
 187 inline unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) {
 188   assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
 189   return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest);
 190 }
 191 
 192 inline unsigned Atomic::cmpxchg(unsigned int exchange_value,
 193                          volatile unsigned int* dest, unsigned int compare_value,
 194                          cmpxchg_memory_order order) {
 195   assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
 196   return (unsigned int)Atomic::cmpxchg((jint)exchange_value, (volatile jint*)dest,
 197                                        (jint)compare_value, order);










 198 }
 199 
 200 inline jshort Atomic::add(jshort add_value, volatile jshort* dest) {
 201   // Most platforms do not support atomic add on a 2-byte value. However,
 202   // if the value occupies the most significant 16 bits of an aligned 32-bit
 203   // word, then we can do this with an atomic add of (add_value << 16)
 204   // to the 32-bit word.
 205   //
 206   // The least significant parts of this 32-bit word will never be affected, even
 207   // in case of overflow/underflow.
 208   //
 209   // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
 210 #ifdef VM_LITTLE_ENDIAN
 211   assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
 212   jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest-1));
 213 #else
 214   assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
 215   jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest));
 216 #endif
 217   return (jshort)(new_value >> 16); // preserves sign
< prev index next >