10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_RUNTIME_ATOMIC_HPP
26 #define SHARE_VM_RUNTIME_ATOMIC_HPP
27
28 #include "memory/allocation.hpp"
29
30 class Atomic : AllStatic {
31 private:
32 static jbyte cmpxchg_general(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value);
33
34 public:
35 // Atomic operations on jlong types are not available on all 32-bit
36 // platforms. If atomic ops on jlongs are defined here they must only
37 // be used from code that verifies they are available at runtime and
38 // can provide an alternative action if not - see supports_cx8() for
39 // a means to test availability.
40
41 // The memory operations that are mentioned with each of the atomic
42 // function families come from src/share/vm/runtime/orderAccess.hpp,
43 // e.g., <fence> is described in that file and is implemented by the
44 // OrderAccess::fence() function. See that file for the gory details
45 // on the Memory Access Ordering Model.
46
47 // All of the atomic operations that imply a read-modify-write action
48 // guarantee a two-way memory barrier across that operation. Historically
49 // these semantics reflect the strength of atomic operations that are
90 // Atomically decrement a location. dec*() provide:
91 // <fence> decrement-dest <membar StoreLoad|StoreStore>
92 inline static void dec (volatile jint* dest);
93 static void dec (volatile jshort* dest);
94 inline static void dec (volatile size_t* dest);
95 inline static void dec_ptr(volatile intptr_t* dest);
96 inline static void dec_ptr(volatile void* dest);
97
98 // Performs atomic exchange of *dest with exchange_value. Returns old
99 // prior value of *dest. xchg*() provide:
100 // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>
101 inline static jint xchg (jint exchange_value, volatile jint* dest);
102 static unsigned int xchg (unsigned int exchange_value, volatile unsigned int* dest);
103 inline static intptr_t xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest);
104 inline static void* xchg_ptr(void* exchange_value, volatile void* dest);
105
106 // Performs atomic compare of *dest and compare_value, and exchanges
107 // *dest with exchange_value if the comparison succeeded. Returns prior
108 // value of *dest. cmpxchg*() provide:
109 // <fence> compare-and-exchange <membar StoreLoad|StoreStore>
110 inline static jbyte cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value);
111 inline static jint cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value);
112 // See comment above about using jlong atomics on 32-bit platforms
113 inline static jlong cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value);
114 static unsigned int cmpxchg (unsigned int exchange_value, volatile unsigned int* dest, unsigned int compare_value);
115 inline static intptr_t cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value);
116 inline static void* cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value);
117 };
118
119 // To use Atomic::inc(jshort* dest) and Atomic::dec(jshort* dest), the address must be specially
120 // aligned, such that (*dest) occupies the upper 16 bits of an aligned 32-bit word. The best way to
121 // achieve is to place your short value next to another short value, which doesn't need atomic ops.
122 //
123 // Example
124 // ATOMIC_SHORT_PAIR(
125 // volatile short _refcount, // needs atomic operation
126 // unsigned short _length // number of UTF8 characters in the symbol (does not need atomic op)
127 // );
128
129 #ifdef VM_LITTLE_ENDIAN
130 #define ATOMIC_SHORT_PAIR(atomic_decl, non_atomic_decl) \
131 non_atomic_decl; \
132 atomic_decl
133 #else
134 #define ATOMIC_SHORT_PAIR(atomic_decl, non_atomic_decl) \
135 atomic_decl; \
136 non_atomic_decl
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_RUNTIME_ATOMIC_HPP
26 #define SHARE_VM_RUNTIME_ATOMIC_HPP
27
28 #include "memory/allocation.hpp"
29
30 typedef enum cmpxchg_cmpxchg_memory_order {
31 memory_order_relaxed,
32 // Use value which doesn't interfere with C++2011. We need to be more conservative.
33 memory_order_conservative = 8
34 } cmpxchg_memory_order;
35
36 class Atomic : AllStatic {
37 private:
38 static jbyte cmpxchg_general(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value);
39
40 public:
41 // Atomic operations on jlong types are not available on all 32-bit
42 // platforms. If atomic ops on jlongs are defined here they must only
43 // be used from code that verifies they are available at runtime and
44 // can provide an alternative action if not - see supports_cx8() for
45 // a means to test availability.
46
47 // The memory operations that are mentioned with each of the atomic
48 // function families come from src/share/vm/runtime/orderAccess.hpp,
49 // e.g., <fence> is described in that file and is implemented by the
50 // OrderAccess::fence() function. See that file for the gory details
51 // on the Memory Access Ordering Model.
52
53 // All of the atomic operations that imply a read-modify-write action
54 // guarantee a two-way memory barrier across that operation. Historically
55 // these semantics reflect the strength of atomic operations that are
96 // Atomically decrement a location. dec*() provide:
97 // <fence> decrement-dest <membar StoreLoad|StoreStore>
98 inline static void dec (volatile jint* dest);
99 static void dec (volatile jshort* dest);
100 inline static void dec (volatile size_t* dest);
101 inline static void dec_ptr(volatile intptr_t* dest);
102 inline static void dec_ptr(volatile void* dest);
103
104 // Performs atomic exchange of *dest with exchange_value. Returns old
105 // prior value of *dest. xchg*() provide:
106 // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>
107 inline static jint xchg (jint exchange_value, volatile jint* dest);
108 static unsigned int xchg (unsigned int exchange_value, volatile unsigned int* dest);
109 inline static intptr_t xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest);
110 inline static void* xchg_ptr(void* exchange_value, volatile void* dest);
111
112 // Performs atomic compare of *dest and compare_value, and exchanges
113 // *dest with exchange_value if the comparison succeeded. Returns prior
114 // value of *dest. cmpxchg*() provide:
115 // <fence> compare-and-exchange <membar StoreLoad|StoreStore>
116 inline static jbyte cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order = memory_order_conservative);
117 inline static jint cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order = memory_order_conservative);
118 // See comment above about using jlong atomics on 32-bit platforms
119 inline static jlong cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order = memory_order_conservative);
120 static unsigned int cmpxchg (unsigned int exchange_value, volatile unsigned int* dest, unsigned int compare_value, cmpxchg_memory_order order = memory_order_conservative);
121 inline static intptr_t cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order = memory_order_conservative);
122 inline static void* cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order = memory_order_conservative);
123 };
124
125 // To use Atomic::inc(jshort* dest) and Atomic::dec(jshort* dest), the address must be specially
126 // aligned, such that (*dest) occupies the upper 16 bits of an aligned 32-bit word. The best way to
127 // achieve is to place your short value next to another short value, which doesn't need atomic ops.
128 //
129 // Example
130 // ATOMIC_SHORT_PAIR(
131 // volatile short _refcount, // needs atomic operation
132 // unsigned short _length // number of UTF8 characters in the symbol (does not need atomic op)
133 // );
134
135 #ifdef VM_LITTLE_ENDIAN
136 #define ATOMIC_SHORT_PAIR(atomic_decl, non_atomic_decl) \
137 non_atomic_decl; \
138 atomic_decl
139 #else
140 #define ATOMIC_SHORT_PAIR(atomic_decl, non_atomic_decl) \
141 atomic_decl; \
142 non_atomic_decl
|