1 /*
   2  * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef OS_CPU_LINUX_SPARC_ATOMIC_LINUX_SPARC_HPP
  26 #define OS_CPU_LINUX_SPARC_ATOMIC_LINUX_SPARC_HPP
  27 
  28 // Implementation of class atomic
  29 
  30 template<size_t byte_size>
  31 struct Atomic::PlatformAdd {
  32   template<typename D, typename I>
  33   D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
  34 
  35   template<typename D, typename I>
  36   D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
  37     return add_and_fetch(dest, add_value, order) - add_value;
  38   }
  39 };
  40 
  41 template<>
  42 template<typename D, typename I>
  43 inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
  44                                                atomic_memory_order order) const {
  45   STATIC_ASSERT(4 == sizeof(I));
  46   STATIC_ASSERT(4 == sizeof(D));
  47 
  48   D rv;
  49   __asm__ volatile(
  50     "1: \n\t"
  51     " ld     [%2], %%o2\n\t"
  52     " add    %1, %%o2, %%o3\n\t"
  53     " cas    [%2], %%o2, %%o3\n\t"
  54     " cmp    %%o2, %%o3\n\t"
  55     " bne    1b\n\t"
  56     "  nop\n\t"
  57     " add    %1, %%o2, %0\n\t"
  58     : "=r" (rv)
  59     : "r" (add_value), "r" (dest)
  60     : "memory", "o2", "o3");
  61   return rv;
  62 }
  63 
  64 template<>
  65 template<typename D, typename I>
  66 inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
  67                                                atomic_memory_order order) const {
  68   STATIC_ASSERT(8 == sizeof(I));
  69   STATIC_ASSERT(8 == sizeof(D));
  70 
  71   D rv;
  72   __asm__ volatile(
  73     "1: \n\t"
  74     " ldx    [%2], %%o2\n\t"
  75     " add    %1, %%o2, %%o3\n\t"
  76     " casx   [%2], %%o2, %%o3\n\t"
  77     " cmp    %%o2, %%o3\n\t"
  78     " bne    %%xcc, 1b\n\t"
  79     "  nop\n\t"
  80     " add    %1, %%o2, %0\n\t"
  81     : "=r" (rv)
  82     : "r" (add_value), "r" (dest)
  83     : "memory", "o2", "o3");
  84   return rv;
  85 }
  86 
  87 template<>
  88 template<typename T>
  89 inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
  90                                              T exchange_value,
  91                                              atomic_memory_order order) const {
  92   STATIC_ASSERT(4 == sizeof(T));
  93   T rv = exchange_value;
  94   __asm__ volatile(
  95     " swap   [%2],%1\n\t"
  96     : "=r" (rv)
  97     : "0" (exchange_value) /* we use same register as for return value */, "r" (dest)
  98     : "memory");
  99   return rv;
 100 }
 101 
 102 template<>
 103 template<typename T>
 104 inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
 105                                              T exchange_value,
 106                                              atomic_memory_order order) const {
 107   STATIC_ASSERT(8 == sizeof(T));
 108   T rv = exchange_value;
 109   __asm__ volatile(
 110     "1:\n\t"
 111     " mov    %1, %%o3\n\t"
 112     " ldx    [%2], %%o2\n\t"
 113     " casx   [%2], %%o2, %%o3\n\t"
 114     " cmp    %%o2, %%o3\n\t"
 115     " bne    %%xcc, 1b\n\t"
 116     "  nop\n\t"
 117     " mov    %%o2, %0\n\t"
 118     : "=r" (rv)
 119     : "r" (exchange_value), "r" (dest)
 120     : "memory", "o2", "o3");
 121   return rv;
 122 }
 123 
 124 // No direct support for cmpxchg of bytes; emulate using int.
 125 template<>
 126 struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
 127 
 128 template<>
 129 template<typename T>
 130 inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
 131                                                 T compare_value,
 132                                                 T exchange_value,
 133                                                 atomic_memory_order order) const {
 134   STATIC_ASSERT(4 == sizeof(T));
 135   T rv;
 136   __asm__ volatile(
 137     " cas    [%2], %3, %0"
 138     : "=r" (rv)
 139     : "0" (exchange_value), "r" (dest), "r" (compare_value)
 140     : "memory");
 141   return rv;
 142 }
 143 
 144 template<>
 145 template<typename T>
 146 inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
 147                                                 T compare_value,
 148                                                 T exchange_value,
 149                                                 atomic_memory_order order) const {
 150   STATIC_ASSERT(8 == sizeof(T));
 151   T rv;
 152   __asm__ volatile(
 153     " casx   [%2], %3, %0"
 154     : "=r" (rv)
 155     : "0" (exchange_value), "r" (dest), "r" (compare_value)
 156     : "memory");
 157   return rv;
 158 }
 159 
 160 #endif // OS_CPU_LINUX_SPARC_ATOMIC_LINUX_SPARC_HPP