1 // 2 // Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved. 3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 // 5 // This code is free software; you can redistribute it and/or modify it 6 // under the terms of the GNU General Public License version 2 only, as 7 // published by the Free Software Foundation. 8 // 9 // This code is distributed in the hope that it will be useful, but WITHOUT 10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 // version 2 for more details (a copy is included in the LICENSE file that 13 // accompanied this code). 14 // 15 // You should have received a copy of the GNU General Public License version 16 // 2 along with this work; if not, write to the Free Software Foundation, 17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 // 19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 // or visit www.oracle.com if you need additional information or have any 21 // questions. 22 // 23 // 24 25 // Get the raw thread ID from %g7 26 27 .inline _raw_thread_id, 0 28 .register %g7,#scratch 29 .volatile 30 mov %g7, %o0 31 .nonvolatile 32 .end 33 34 35 // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest). 36 // 37 // Arguments: 38 // exchange_value: O0 39 // dest: O1 40 // 41 // Results: 42 // O0: the value previously stored in dest 43 44 .inline _Atomic_swap32, 2 45 .volatile 46 swap [%o1],%o0 47 .nonvolatile 48 .end 49 50 51 // Support for intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t * dest). 52 // 53 // 64-bit 54 // 55 // Arguments: 56 // exchange_value: O0 57 // dest: O1 58 // 59 // Results: 60 // O0: the value previously stored in dest 61 62 .inline _Atomic_swap64, 2 63 .volatile 64 1: 65 mov %o0, %o3 66 ldx [%o1], %o2 67 casx [%o1], %o2, %o3 68 cmp %o2, %o3 69 bne %xcc, 1b 70 nop 71 mov %o2, %o0 72 .nonvolatile 73 .end 74 75 76 // Support for jlong Atomic::load and Atomic::store on v9. 77 // 78 // void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst) 79 // 80 // Arguments: 81 // src: O0 82 // dest: O1 83 // 84 // Overwrites O2 85 86 .inline _Atomic_move_long_v9,2 87 .volatile 88 ldx [%o0], %o2 89 stx %o2, [%o1] 90 .nonvolatile 91 .end 92 93 // Support for void Prefetch::read(void *loc, intx interval) 94 // 95 // Prefetch for several reads. 96 97 .inline _Prefetch_read, 2 98 .volatile 99 prefetch [%o0+%o1], 0 100 .nonvolatile 101 .end 102 103 104 // Support for void Prefetch::write(void *loc, intx interval) 105 // 106 // Prefetch for several writes. 107 108 .inline _Prefetch_write, 2 109 .volatile 110 prefetch [%o0+%o1], 2 111 .nonvolatile 112 .end 113 114 115 // Support for void Copy::conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) 116 // 117 // 32-bit 118 // 119 // Arguments: 120 // from: O0 121 // to: O1 122 // count: O2 treated as signed 123 // 124 // Clobbers: 125 // long_value: O2, O3 126 // count: O4 127 // 128 // if (from > to) { 129 // while (--count >= 0) { 130 // *to++ = *from++; 131 // } 132 // } else { 133 // while (--count >= 0) { 134 // to[count] = from[count]; 135 // } 136 // } 137 .inline _Copy_conjoint_jlongs_atomic, 3 138 .volatile 139 cmp %o0, %o1 140 bleu 4f 141 sll %o2, 3, %o4 142 ba 2f 143 1: 144 subcc %o4, 8, %o4 145 std %o2, [%o1] 146 add %o0, 8, %o0 147 add %o1, 8, %o1 148 2: 149 bge,a 1b 150 ldd [%o0], %o2 151 ba 5f 152 nop 153 3: 154 std %o2, [%o1+%o4] 155 4: 156 subcc %o4, 8, %o4 157 bge,a 3b 158 ldd [%o0+%o4], %o2 159 5: 160 .nonvolatile 161 .end