1 /*
   2  * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef OS_CPU_LINUX_X86_VM_ATOMIC_LINUX_X86_HPP
  26 #define OS_CPU_LINUX_X86_VM_ATOMIC_LINUX_X86_HPP
  27 
  28 #include "runtime/os.hpp"
  29 
  30 // Implementation of class atomic
  31 
  32 template <>
  33 inline int32_t Atomic::specialized_add<int32_t>(int32_t add_value, volatile int32_t* dest) {
  34   int32_t addend = add_value;
  35   __asm__ volatile (  "lock xaddl %0,(%2)"
  36                     : "=r" (addend)
  37                     : "0" (addend), "r" (dest)
  38                     : "cc", "memory");
  39   return addend + add_value;
  40 }
  41 
  42 template <>
  43 inline void Atomic::specialized_inc<int32_t>(volatile int32_t* dest) {
  44   __asm__ volatile (  "lock addl $1,(%0)" :
  45                     : "r" (dest) : "cc", "memory");
  46 }
  47 
  48 template <>
  49 inline void Atomic::specialized_dec<int32_t>(volatile int32_t* dest) {
  50   __asm__ volatile (  "lock subl $1,(%0)" :
  51                     : "r" (dest) : "cc", "memory");
  52 }
  53 
  54 template <>
  55 inline int32_t Atomic::specialized_xchg<int32_t>(int32_t exchange_value, volatile int32_t* dest) {
  56   __asm__ volatile (  "xchgl (%2),%0"
  57                     : "=r" (exchange_value)
  58                     : "0" (exchange_value), "r" (dest)
  59                     : "memory");
  60   return exchange_value;
  61 }
  62 
  63 #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
  64 template <>
  65 inline int8_t Atomic::specialized_cmpxchg<int8_t>(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value, cmpxchg_memory_order order) {
  66   __asm__ volatile ("lock cmpxchgb %1,(%3)"
  67                     : "=a" (exchange_value)
  68                     : "q" (exchange_value), "a" (compare_value), "r" (dest)
  69                     : "cc", "memory");
  70   return exchange_value;
  71 }
  72 
  73 template <>
  74 inline int32_t Atomic::specialized_cmpxchg<int32_t>(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value, cmpxchg_memory_order order) {
  75   __asm__ volatile ("lock cmpxchgl %1,(%3)"
  76                     : "=a" (exchange_value)
  77                     : "r" (exchange_value), "a" (compare_value), "r" (dest)
  78                     : "cc", "memory");
  79   return exchange_value;
  80 }
  81 
  82 #ifdef AMD64
  83 
  84 template <>
  85 inline int64_t Atomic::specialized_add<int64_t>(int64_t add_value, volatile int64_t* dest) {
  86   int64_t addend = add_value;
  87   __asm__ __volatile__ ("lock xaddq %0,(%2)"
  88                         : "=r" (addend)
  89                         : "0" (addend), "r" (dest)
  90                         : "cc", "memory");
  91   return addend + add_value;
  92 }
  93 
  94 template <>
  95 inline void Atomic::specialized_inc<int64_t>(volatile int64_t* dest) {
  96   __asm__ __volatile__ ("lock addq $1,(%0)"
  97                         :
  98                         : "r" (dest)
  99                         : "cc", "memory");
 100 }
 101 
 102 template <>
 103 inline void Atomic::specialized_dec<int64_t>(volatile int64_t* dest) {
 104   __asm__ __volatile__ ("lock subq $1,(%0)"
 105                         :
 106                         : "r" (dest)
 107                         : "cc", "memory");
 108 }
 109 
 110 template <>
 111 inline int64_t Atomic::specialized_xchg<int64_t>(int64_t exchange_value, volatile int64_t* dest) {
 112   __asm__ __volatile__ ("xchgq (%2),%0"
 113                         : "=r" (exchange_value)
 114                         : "0" (exchange_value), "r" (dest)
 115                         : "memory");
 116   return exchange_value;
 117 }
 118 
 119 template <>
 120 inline int64_t Atomic::specialized_cmpxchg<int64_t>(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) {
 121   __asm__ __volatile__ ("lock cmpxchgq %1,(%3)"
 122                         : "=a" (exchange_value)
 123                         : "r" (exchange_value), "a" (compare_value), "r" (dest)
 124                         : "cc", "memory");
 125   return exchange_value;
 126 }
 127 
 128 #else // !AMD64
 129 
 130 extern "C" {
 131   // defined in linux_x86.s
 132   int64_t _Atomic_cmpxchg_long(int64_t, volatile int64_t*, int64_t);
 133   void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst);
 134 }
 135 
 136 template <>
 137 inline void Atomic::specialized_store<int64_t>(int64_t store_value, volatile int64_t* dest) {
 138   _Atomic_move_long(&store_value, dest);
 139 }
 140 
 141 template <>
 142 inline int64_t Atomic::specialized_load<int64_t>(const volatile int64_t* src) {
 143   volatile int64_t dest;
 144   _Atomic_move_long(src, &dest);
 145   return dest;
 146 }
 147 
 148 template <>
 149 inline int64_t Atomic::specialized_cmpxchg<int64_t>(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) {
 150   return _Atomic_cmpxchg_long(exchange_value, dest, compare_value);
 151 }
 152 
 153 #endif // AMD64
 154 
 155 #endif // OS_CPU_LINUX_X86_VM_ATOMIC_LINUX_X86_HPP