1 /*
2 * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef OS_CPU_LINUX_SPARC_VM_ATOMIC_LINUX_SPARC_INLINE_HPP
26 #define OS_CPU_LINUX_SPARC_VM_ATOMIC_LINUX_SPARC_INLINE_HPP
27
28 // Implementation of class atomic
29
30 template<size_t byte_size>
31 struct Atomic::PlatformAdd
32 : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
33 {
34 template<typename I, typename D>
35 D add_and_fetch(I add_value, D volatile* dest) const;
36 };
37
38 template<>
39 template<typename I, typename D>
40 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
41 STATIC_ASSERT(4 == sizeof(I));
42 STATIC_ASSERT(4 == sizeof(D));
43
44 D rv;
45 __asm__ volatile(
46 "1: \n\t"
47 " ld [%2], %%o2\n\t"
48 " add %1, %%o2, %%o3\n\t"
49 " cas [%2], %%o2, %%o3\n\t"
50 " cmp %%o2, %%o3\n\t"
51 " bne 1b\n\t"
52 " nop\n\t"
53 " add %1, %%o2, %0\n\t"
54 : "=r" (rv)
55 : "r" (add_value), "r" (dest)
56 : "memory", "o2", "o3");
57 return rv;
58 }
59
60 template<>
61 template<typename I, typename D>
62 inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
63 STATIC_ASSERT(8 == sizeof(I));
64 STATIC_ASSERT(8 == sizeof(D));
65
66 D rv;
67 __asm__ volatile(
68 "1: \n\t"
69 " ldx [%2], %%o2\n\t"
70 " add %1, %%o2, %%o3\n\t"
71 " casx [%2], %%o2, %%o3\n\t"
72 " cmp %%o2, %%o3\n\t"
73 " bne %%xcc, 1b\n\t"
74 " nop\n\t"
75 " add %1, %%o2, %0\n\t"
76 : "=r" (rv)
77 : "r" (add_value), "r" (dest)
78 : "memory", "o2", "o3");
79 return rv;
80 }
81
82 template<>
83 template<typename T>
84 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
85 T volatile* dest) const {
86 STATIC_ASSERT(4 == sizeof(T));
87 T rv = exchange_value;
88 __asm__ volatile(
89 " swap [%2],%1\n\t"
90 : "=r" (rv)
91 : "0" (exchange_value) /* we use same register as for return value */, "r" (dest)
92 : "memory");
93 return rv;
94 }
95
96 template<>
97 template<typename T>
98 inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
99 T volatile* dest) const {
100 STATIC_ASSERT(8 == sizeof(T));
101 T rv = exchange_value;
102 __asm__ volatile(
103 "1:\n\t"
104 " mov %1, %%o3\n\t"
105 " ldx [%2], %%o2\n\t"
106 " casx [%2], %%o2, %%o3\n\t"
107 " cmp %%o2, %%o3\n\t"
108 " bne %%xcc, 1b\n\t"
109 " nop\n\t"
110 " mov %%o2, %0\n\t"
111 : "=r" (rv)
112 : "r" (exchange_value), "r" (dest)
113 : "memory", "o2", "o3");
114 return rv;
115 }
116
117 // No direct support for cmpxchg of bytes; emulate using int.
118 template<>
119 struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
120
121 template<>
122 template<typename T>
123 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
124 T volatile* dest,
125 T compare_value,
126 cmpxchg_memory_order order) const {
127 STATIC_ASSERT(4 == sizeof(T));
128 T rv;
129 __asm__ volatile(
130 " cas [%2], %3, %0"
131 : "=r" (rv)
132 : "0" (exchange_value), "r" (dest), "r" (compare_value)
133 : "memory");
134 return rv;
135 }
136
137 template<>
138 template<typename T>
139 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
140 T volatile* dest,
141 T compare_value,
142 cmpxchg_memory_order order) const {
143 STATIC_ASSERT(8 == sizeof(T));
144 T rv;
145 __asm__ volatile(
146 " casx [%2], %3, %0"
147 : "=r" (rv)
148 : "0" (exchange_value), "r" (dest), "r" (compare_value)
149 : "memory");
150 return rv;
151 }
152
153 #endif // OS_CPU_LINUX_SPARC_VM_ATOMIC_LINUX_SPARC_INLINE_HPP
|
1 /*
2 * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef OS_CPU_LINUX_SPARC_VM_ATOMIC_LINUX_SPARC_INLINE_HPP
26 #define OS_CPU_LINUX_SPARC_VM_ATOMIC_LINUX_SPARC_INLINE_HPP
27
28 // Implementation of class atomic
29
30 template<size_t byte_size>
31 struct Atomic::PlatformAdd
32 : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
33 {
34 template<typename I, typename D>
35 D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
36 };
37
38 template<>
39 template<typename I, typename D>
40 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
41 atomic_memory_order order) const {
42 STATIC_ASSERT(4 == sizeof(I));
43 STATIC_ASSERT(4 == sizeof(D));
44
45 D rv;
46 __asm__ volatile(
47 "1: \n\t"
48 " ld [%2], %%o2\n\t"
49 " add %1, %%o2, %%o3\n\t"
50 " cas [%2], %%o2, %%o3\n\t"
51 " cmp %%o2, %%o3\n\t"
52 " bne 1b\n\t"
53 " nop\n\t"
54 " add %1, %%o2, %0\n\t"
55 : "=r" (rv)
56 : "r" (add_value), "r" (dest)
57 : "memory", "o2", "o3");
58 return rv;
59 }
60
61 template<>
62 template<typename I, typename D>
63 inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
64 atomic_memory_order order) const {
65 STATIC_ASSERT(8 == sizeof(I));
66 STATIC_ASSERT(8 == sizeof(D));
67
68 D rv;
69 __asm__ volatile(
70 "1: \n\t"
71 " ldx [%2], %%o2\n\t"
72 " add %1, %%o2, %%o3\n\t"
73 " casx [%2], %%o2, %%o3\n\t"
74 " cmp %%o2, %%o3\n\t"
75 " bne %%xcc, 1b\n\t"
76 " nop\n\t"
77 " add %1, %%o2, %0\n\t"
78 : "=r" (rv)
79 : "r" (add_value), "r" (dest)
80 : "memory", "o2", "o3");
81 return rv;
82 }
83
84 template<>
85 template<typename T>
86 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
87 T volatile* dest,
88 atomic_memory_order order) const {
89 STATIC_ASSERT(4 == sizeof(T));
90 T rv = exchange_value;
91 __asm__ volatile(
92 " swap [%2],%1\n\t"
93 : "=r" (rv)
94 : "0" (exchange_value) /* we use same register as for return value */, "r" (dest)
95 : "memory");
96 return rv;
97 }
98
99 template<>
100 template<typename T>
101 inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
102 T volatile* dest,
103 atomic_memory_order order) const {
104 STATIC_ASSERT(8 == sizeof(T));
105 T rv = exchange_value;
106 __asm__ volatile(
107 "1:\n\t"
108 " mov %1, %%o3\n\t"
109 " ldx [%2], %%o2\n\t"
110 " casx [%2], %%o2, %%o3\n\t"
111 " cmp %%o2, %%o3\n\t"
112 " bne %%xcc, 1b\n\t"
113 " nop\n\t"
114 " mov %%o2, %0\n\t"
115 : "=r" (rv)
116 : "r" (exchange_value), "r" (dest)
117 : "memory", "o2", "o3");
118 return rv;
119 }
120
121 // No direct support for cmpxchg of bytes; emulate using int.
122 template<>
123 struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
124
125 template<>
126 template<typename T>
127 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
128 T volatile* dest,
129 T compare_value,
130 atomic_memory_order order) const {
131 STATIC_ASSERT(4 == sizeof(T));
132 T rv;
133 __asm__ volatile(
134 " cas [%2], %3, %0"
135 : "=r" (rv)
136 : "0" (exchange_value), "r" (dest), "r" (compare_value)
137 : "memory");
138 return rv;
139 }
140
141 template<>
142 template<typename T>
143 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
144 T volatile* dest,
145 T compare_value,
146 atomic_memory_order order) const {
147 STATIC_ASSERT(8 == sizeof(T));
148 T rv;
149 __asm__ volatile(
150 " casx [%2], %3, %0"
151 : "=r" (rv)
152 : "0" (exchange_value), "r" (dest), "r" (compare_value)
153 : "memory");
154 return rv;
155 }
156
157 #endif // OS_CPU_LINUX_SPARC_VM_ATOMIC_LINUX_SPARC_INLINE_HPP
|