1 // 2 // Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. 3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 // 5 // This code is free software; you can redistribute it and/or modify it 6 // under the terms of the GNU General Public License version 2 only, as 7 // published by the Free Software Foundation. 8 // 9 // This code is distributed in the hope that it will be useful, but WITHOUT 10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 // version 2 for more details (a copy is included in the LICENSE file that 13 // accompanied this code). 14 // 15 // You should have received a copy of the GNU General Public License version 16 // 2 along with this work; if not, write to the Free Software Foundation, 17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 // 19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 // or visit www.oracle.com if you need additional information or have any 21 // questions. 22 // 23 // 24 25 26 // Support for u8 os::setup_fpu() 27 .inline _solaris_raw_setup_fpu,1 28 movl 0(%esp), %eax 29 fldcw (%eax) 30 .end 31 32 // The argument size of each inline directive is ignored by the compiler 33 // and is set to 0 for compatibility reason. 34 35 // Get the raw thread ID from %gs:0 36 .inline _raw_thread_id,0 37 movl %gs:0, %eax 38 .end 39 40 // Get current sp 41 .inline _get_current_sp,0 42 .volatile 43 movl %esp, %eax 44 .end 45 46 // Get current fp 47 .inline _get_current_fp,0 48 .volatile 49 movl %ebp, %eax 50 .end 51 52 // Support for os::rdtsc() 53 .inline _raw_rdtsc,0 54 rdtsc 55 .end 56 57 // Support for jint Atomic::add(jint inc, volatile jint* dest) 58 // An additional bool (os::is_MP()) is passed as the last argument. 59 .inline _Atomic_add,3 60 movl 0(%esp), %eax // inc 61 movl 4(%esp), %edx // dest 62 movl %eax, %ecx 63 cmpl $0, 8(%esp) // MP test 64 jne 1f 65 xaddl %eax, (%edx) 66 jmp 2f 67 1: lock 68 xaddl %eax, (%edx) 69 2: addl %ecx, %eax 70 .end 71 72 // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest). 73 .inline _Atomic_xchg,2 74 movl 0(%esp), %eax // exchange_value 75 movl 4(%esp), %ecx // dest 76 xchgl (%ecx), %eax 77 .end 78 79 // Support for jint Atomic::cmpxchg(jint exchange_value, 80 // volatile jint *dest, 81 // jint compare_value) 82 // An additional bool (os::is_MP()) is passed as the last argument. 83 .inline _Atomic_cmpxchg,4 84 movl 8(%esp), %eax // compare_value 85 movl 0(%esp), %ecx // exchange_value 86 movl 4(%esp), %edx // dest 87 cmp $0, 12(%esp) // MP test 88 jne 1f 89 cmpxchgl %ecx, (%edx) 90 jmp 2f 91 1: lock 92 cmpxchgl %ecx, (%edx) 93 2: 94 .end 95 96 // Support for jlong Atomic::cmpxchg(jlong exchange_value, 97 // volatile jlong* dest, 98 // jlong compare_value) 99 // An additional bool (os::is_MP()) is passed as the last argument. 100 .inline _Atomic_cmpxchg_long,6 101 pushl %ebx 102 pushl %edi 103 movl 20(%esp), %eax // compare_value (low) 104 movl 24(%esp), %edx // compare_value (high) 105 movl 16(%esp), %edi // dest 106 movl 8(%esp), %ebx // exchange_value (low) 107 movl 12(%esp), %ecx // exchange_high (high) 108 cmp $0, 28(%esp) // MP test 109 jne 1f 110 cmpxchg8b (%edi) 111 jmp 2f 112 1: lock 113 cmpxchg8b (%edi) 114 2: popl %edi 115 popl %ebx 116 .end 117 118 // Support for jlong Atomic::load and Atomic::store. 119 // void _Atomic_move_long(volatile jlong* src, volatile jlong* dst) 120 .inline _Atomic_move_long,2 121 movl 0(%esp), %eax // src 122 fildll (%eax) 123 movl 4(%esp), %eax // dest 124 fistpll (%eax) 125 .end 126 127 // Support for OrderAccess::acquire() 128 .inline _OrderAccess_acquire,0 129 movl 0(%esp), %eax 130 .end 131 132 // Support for OrderAccess::fence() 133 .inline _OrderAccess_fence,0 134 lock 135 addl $0, (%esp) 136 .end 137 138 // Support for u2 Bytes::swap_u2(u2 x) 139 .inline _raw_swap_u2,1 140 movl 0(%esp), %eax 141 xchgb %al, %ah 142 .end 143 144 // Support for u4 Bytes::swap_u4(u4 x) 145 .inline _raw_swap_u4,1 146 movl 0(%esp), %eax 147 bswap %eax 148 .end 149 150 // Support for u8 Bytes::swap_u8_base(u4 x, u4 y) 151 .inline _raw_swap_u8,2 152 movl 4(%esp), %eax // y 153 movl 0(%esp), %edx // x 154 bswap %eax 155 bswap %edx 156 .end