1 /*
   2  * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_RUNTIME_ATOMIC_INLINE_HPP
  26 #define SHARE_VM_RUNTIME_ATOMIC_INLINE_HPP
  27 
  28 #include "runtime/atomic.hpp"
  29 
  30 // Linux
  31 #ifdef TARGET_OS_ARCH_linux_x86
  32 # include "atomic_linux_x86.inline.hpp"
  33 #endif
  34 #ifdef TARGET_OS_ARCH_linux_sparc
  35 # include "atomic_linux_sparc.inline.hpp"
  36 #endif
  37 #ifdef TARGET_OS_ARCH_linux_zero
  38 # include "atomic_linux_zero.inline.hpp"
  39 #endif
  40 #ifdef TARGET_OS_ARCH_linux_arm
  41 # include "atomic_linux_arm.inline.hpp"
  42 #endif
  43 #ifdef TARGET_OS_ARCH_linux_ppc
  44 # include "atomic_linux_ppc.inline.hpp"
  45 #endif
  46 #ifdef TARGET_OS_ARCH_linux_aarch64
  47 # include "atomic_linux_aarch64.inline.hpp"
  48 #endif
  49 
  50 // Solaris
  51 #ifdef TARGET_OS_ARCH_solaris_x86
  52 # include "atomic_solaris_x86.inline.hpp"
  53 #endif
  54 #ifdef TARGET_OS_ARCH_solaris_sparc
  55 # include "atomic_solaris_sparc.inline.hpp"
  56 #endif
  57 
  58 // Windows
  59 #ifdef TARGET_OS_ARCH_windows_x86
  60 # include "atomic_windows_x86.inline.hpp"
  61 #endif
  62 
  63 // AIX
  64 #ifdef TARGET_OS_ARCH_aix_ppc
  65 # include "atomic_aix_ppc.inline.hpp"
  66 #endif
  67 
  68 // BSD
  69 #ifdef TARGET_OS_ARCH_bsd_x86
  70 # include "atomic_bsd_x86.inline.hpp"
  71 #endif
  72 #ifdef TARGET_OS_ARCH_bsd_zero
  73 # include "atomic_bsd_zero.inline.hpp"
  74 #endif
  75 
  76 // size_t casts...
  77 #if (SIZE_MAX != UINTPTR_MAX)
  78 #error size_t is not WORD_SIZE, interesting platform, but missing implementation here
  79 #endif
  80 
  81 inline size_t Atomic::add(size_t add_value, volatile size_t* dest) {
  82   return (size_t) add_ptr((intptr_t) add_value, (volatile intptr_t*) dest);
  83 }
  84 
  85 inline void Atomic::inc(volatile size_t* dest) {
  86   inc_ptr((volatile intptr_t*) dest);
  87 }
  88 
  89 inline void Atomic::dec(volatile size_t* dest) {
  90   dec_ptr((volatile intptr_t*) dest);
  91 }
  92 
  93 #ifndef VM_HAS_SPECIALIZED_CMPXCHG_BYTE
  94 /*
  95  * This is the default implementation of byte-sized cmpxchg. It emulates jbyte-sized cmpxchg
  96  * in terms of jint-sized cmpxchg. Platforms may override this by defining their own inline definition
  97  * as well as defining VM_HAS_SPECIALIZED_CMPXCHG_BYTE. This will cause the platform specific
  98  * implementation to be used instead.
  99  */
 100 inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte *dest, jbyte comparand, cmpxchg_memory_order order)
 101 {
 102   assert(sizeof(jbyte) == 1, "assumption.");
 103   uintptr_t dest_addr = (uintptr_t)dest;
 104   uintptr_t offset = dest_addr % sizeof(jint);
 105   volatile jint* dest_int = (volatile jint*)(dest_addr - offset);
 106   jint cur = *dest_int;
 107   jbyte* cur_as_bytes = (jbyte*)(&cur);
 108   jint new_val = cur;
 109   jbyte* new_val_as_bytes = (jbyte*)(&new_val);
 110   new_val_as_bytes[offset] = exchange_value;
 111   while (cur_as_bytes[offset] == comparand) {
 112     jint res = cmpxchg(new_val, dest_int, cur, order);
 113     if (res == cur) break;
 114     cur = res;
 115     new_val = cur;
 116     new_val_as_bytes[offset] = exchange_value;
 117   }
 118   return cur_as_bytes[offset];
 119 }
 120 #endif // VM_HAS_SPECIALIZED_CMPXCHG_BYTE
 121 
 122 inline unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) {
 123   assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
 124   return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest);
 125 }
 126 
 127 inline unsigned Atomic::cmpxchg(unsigned int exchange_value,
 128                          volatile unsigned int* dest, unsigned int compare_value,
 129                          cmpxchg_memory_order order) {
 130   assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
 131   return (unsigned int)Atomic::cmpxchg((jint)exchange_value, (volatile jint*)dest,
 132                                        (jint)compare_value, order);
 133 }
 134 
 135 inline jlong Atomic::add(jlong    add_value, volatile jlong*    dest) {
 136   jlong old = load(dest);
 137   jlong new_value = old + add_value;
 138   while (old != cmpxchg(new_value, dest, old)) {
 139     old = load(dest);
 140     new_value = old + add_value;
 141   }
 142   return old;
 143 }
 144 
 145 inline void Atomic::inc(volatile short* dest) {
 146   // Most platforms do not support atomic increment on a 2-byte value. However,
 147   // if the value occupies the most significant 16 bits of an aligned 32-bit
 148   // word, then we can do this with an atomic add of 0x10000 to the 32-bit word.
 149   //
 150   // The least significant parts of this 32-bit word will never be affected, even
 151   // in case of overflow/underflow.
 152   //
 153   // Use the ATOMIC_SHORT_PAIR macro to get the desired alignment.
 154 #ifdef VM_LITTLE_ENDIAN
 155   assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
 156   (void)Atomic::add(0x10000, (volatile int*)(dest-1));
 157 #else
 158   assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
 159   (void)Atomic::add(0x10000, (volatile int*)(dest));
 160 #endif
 161 }
 162 
 163 inline void Atomic::dec(volatile short* dest) {
 164 #ifdef VM_LITTLE_ENDIAN
 165   assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
 166   (void)Atomic::add(-0x10000, (volatile int*)(dest-1));
 167 #else
 168   assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
 169   (void)Atomic::add(-0x10000, (volatile int*)(dest));
 170 #endif
 171 }
 172 
 173 #endif // SHARE_VM_RUNTIME_ATOMIC_INLINE_HPP