1 // 2 // Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved. 3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 // 5 // This code is free software; you can redistribute it and/or modify it 6 // under the terms of the GNU General Public License version 2 only, as 7 // published by the Free Software Foundation. 8 // 9 // This code is distributed in the hope that it will be useful, but WITHOUT 10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 // version 2 for more details (a copy is included in the LICENSE file that 13 // accompanied this code). 14 // 15 // You should have received a copy of the GNU General Public License version 16 // 2 along with this work; if not, write to the Free Software Foundation, 17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 // 19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 // or visit www.oracle.com if you need additional information or have any 21 // questions. 22 // 23 // 24 25 // Get the raw thread ID from %g7 26 27 .inline _raw_thread_id, 0 28 .register %g7,#scratch 29 .volatile 30 mov %g7, %o0 31 .nonvolatile 32 .end 33 34 35 // Support for int32_t Atomic::specialized_xchg(int32_t exchange_value, volatile int32_t* dest). 36 // 37 // Arguments: 38 // exchange_value: O0 39 // dest: O1 40 // 41 // Results: 42 // O0: the value previously stored in dest 43 44 .inline _Atomic_swap32, 2 45 .volatile 46 swap [%o1],%o0 47 .nonvolatile 48 .end 49 50 51 // Support for int64_t Atomic::specialized_xchg(int64_t exchange_value, volatile int64_t * dest). 52 // 53 // 64-bit 54 // 55 // Arguments: 56 // exchange_value: O0 57 // dest: O1 58 // 59 // Results: 60 // O0: the value previously stored in dest 61 62 .inline _Atomic_swap64, 2 63 .volatile 64 1: 65 mov %o0, %o3 66 ldx [%o1], %o2 67 casx [%o1], %o2, %o3 68 cmp %o2, %o3 69 bne %xcc, 1b 70 nop 71 mov %o2, %o0 72 .nonvolatile 73 .end 74 75 76 // Support for int32_t Atomic::specialized_cmpxchg(int32_t exchange_value, 77 // volatile int32_t* dest, 78 // int32_t compare_value) 79 // 80 // Arguments: 81 // exchange_value: O0 82 // dest: O1 83 // compare_value: O2 84 // 85 // Results: 86 // O0: the value previously stored in dest 87 88 .inline _Atomic_cas32, 3 89 .volatile 90 cas [%o1], %o2, %o0 91 .nonvolatile 92 .end 93 94 95 // Support for int64_t Atomic::specialized_cmpxchg(int64_t exchange_value, 96 // volatile int64_t* dest, 97 // int64_t compare_value) 98 // 99 // 64-bit 100 // 101 // Arguments: 102 // exchange_value: O0 103 // dest: O1 104 // compare_value: O2 105 // 106 // Results: 107 // O0: the value previously stored in dest 108 109 .inline _Atomic_cas64, 3 110 .volatile 111 casx [%o1], %o2, %o0 112 .nonvolatile 113 .end 114 115 116 // Support for int32_t Atomic::specialized_add(int32_t add_value, volatile int32_t* dest). 117 // 118 // Arguments: 119 // add_value: O0 (e.g., +1 or -1) 120 // dest: O1 121 // 122 // Results: 123 // O0: the new value stored in dest 124 // 125 // Overwrites O3 126 127 .inline _Atomic_add32, 2 128 .volatile 129 2: 130 ld [%o1], %o2 131 add %o0, %o2, %o3 132 cas [%o1], %o2, %o3 133 cmp %o2, %o3 134 bne 2b 135 nop 136 add %o0, %o2, %o0 137 .nonvolatile 138 .end 139 140 141 // Support for int64_t Atomic::specialized_add(int64_t add_value, volatile int64_t* dest) 142 // 143 // 64-bit 144 // 145 // Arguments: 146 // add_value: O0 (e.g., +1 or -1) 147 // dest: O1 148 // 149 // Results: 150 // O0: the new value stored in dest 151 // 152 // Overwrites O3 153 154 .inline _Atomic_add64, 2 155 .volatile 156 3: 157 ldx [%o1], %o2 158 add %o0, %o2, %o3 159 casx [%o1], %o2, %o3 160 cmp %o2, %o3 161 bne %xcc, 3b 162 nop 163 add %o0, %o2, %o0 164 .nonvolatile 165 .end 166 167 168 // Support for void Prefetch::read(void *loc, intx interval) 169 // 170 // Prefetch for several reads. 171 172 .inline _Prefetch_read, 2 173 .volatile 174 prefetch [%o0+%o1], 0 175 .nonvolatile 176 .end 177 178 179 // Support for void Prefetch::write(void *loc, intx interval) 180 // 181 // Prefetch for several writes. 182 183 .inline _Prefetch_write, 2 184 .volatile 185 prefetch [%o0+%o1], 2 186 .nonvolatile 187 .end 188 189 190 // Support for void Copy::conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) 191 // 192 // 32-bit 193 // 194 // Arguments: 195 // from: O0 196 // to: O1 197 // count: O2 treated as signed 198 // 199 // Clobbers: 200 // long_value: O2, O3 201 // count: O4 202 // 203 // if (from > to) { 204 // while (--count >= 0) { 205 // *to++ = *from++; 206 // } 207 // } else { 208 // while (--count >= 0) { 209 // to[count] = from[count]; 210 // } 211 // } 212 .inline _Copy_conjoint_jlongs_atomic, 3 213 .volatile 214 cmp %o0, %o1 215 bleu 4f 216 sll %o2, 3, %o4 217 ba 2f 218 1: 219 subcc %o4, 8, %o4 220 std %o2, [%o1] 221 add %o0, 8, %o0 222 add %o1, 8, %o1 223 2: 224 bge,a 1b 225 ldd [%o0], %o2 226 ba 5f 227 nop 228 3: 229 std %o2, [%o1+%o4] 230 4: 231 subcc %o4, 8, %o4 232 bge,a 3b 233 ldd [%o0+%o4], %o2 234 5: 235 .nonvolatile 236 .end