1 /* 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_SPARC_VM_ASSEMBLER_SPARC_INLINE_HPP 26 #define CPU_SPARC_VM_ASSEMBLER_SPARC_INLINE_HPP 27 28 #include "asm/assembler.inline.hpp" 29 #include "asm/codeBuffer.hpp" 30 #include "code/codeCache.hpp" 31 #include "runtime/handles.inline.hpp" 32 33 inline void MacroAssembler::pd_patch_instruction(address branch, address target) { 34 jint& stub_inst = *(jint*) branch; 35 stub_inst = patched_branch(target - branch, stub_inst, 0); 36 } 37 38 #ifndef PRODUCT 39 inline void MacroAssembler::pd_print_patched_instruction(address branch) { 40 jint stub_inst = *(jint*) branch; 41 print_instruction(stub_inst); 42 ::tty->print("%s", " (unresolved)"); 43 } 44 #endif // PRODUCT 45 46 inline bool Address::is_simm13(int offset) { return Assembler::is_simm13(disp() + offset); } 47 48 49 inline int AddressLiteral::low10() const { 50 return Assembler::low10(value()); 51 } 52 53 54 // inlines for SPARC assembler -- dmu 5/97 55 56 inline void Assembler::check_delay() { 57 # ifdef CHECK_DELAY 58 guarantee( delay_state != at_delay_slot, "must say delayed() when filling delay slot"); 59 delay_state = no_delay; 60 # endif 61 } 62 63 inline void Assembler::emit_long(int x) { 64 check_delay(); 65 AbstractAssembler::emit_long(x); 66 } 67 68 inline void Assembler::emit_data(int x, relocInfo::relocType rtype) { 69 relocate(rtype); 70 emit_long(x); 71 } 72 73 inline void Assembler::emit_data(int x, RelocationHolder const& rspec) { 74 relocate(rspec); 75 emit_long(x); 76 } 77 78 79 inline void Assembler::add(Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | rs2(s2) ); } 80 inline void Assembler::add(Register s1, int simm13a, Register d, relocInfo::relocType rtype ) { emit_data( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rtype ); } 81 inline void Assembler::add(Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { emit_data( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec ); } 82 83 inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt ) { v9_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bpr_op2) | wdisp16(intptr_t(d), intptr_t(pc())) | predict(p) | rs1(s1), rt); has_delay_slot(); } 84 inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, Label& L) { bpr( c, a, p, s1, target(L)); } 85 86 inline void Assembler::fb( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); } 87 inline void Assembler::fb( Condition c, bool a, Label& L ) { fb(c, a, target(L)); } 88 89 inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fbp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); } 90 inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) { fbp(c, a, cc, p, target(L)); } 91 92 inline void Assembler::cb( Condition c, bool a, address d, relocInfo::relocType rt ) { v8_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(cb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); } 93 inline void Assembler::cb( Condition c, bool a, Label& L ) { cb(c, a, target(L)); } 94 95 inline void Assembler::br( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(br_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); } 96 inline void Assembler::br( Condition c, bool a, Label& L ) { br(c, a, target(L)); } 97 98 inline void Assembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); } 99 inline void Assembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) { bp(c, a, cc, p, target(L)); } 100 101 // compare and branch 102 inline void Assembler::cbc(Condition c, CC cc, Register s1, Register s2, Label& L) { cti(); no_cbc_before(); emit_data(op(branch_op) | cond_cbc(c) | op2(bpr_op2) | branchcc(cc) | wdisp10(intptr_t(target(L)), intptr_t(pc())) | rs1(s1) | rs2(s2)); } 103 inline void Assembler::cbc(Condition c, CC cc, Register s1, int simm5, Label& L) { cti(); no_cbc_before(); emit_data(op(branch_op) | cond_cbc(c) | op2(bpr_op2) | branchcc(cc) | wdisp10(intptr_t(target(L)), intptr_t(pc())) | rs1(s1) | immed(true) | simm(simm5, 5)); } 104 105 inline void Assembler::call( address d, relocInfo::relocType rt ) { cti(); emit_data( op(call_op) | wdisp(intptr_t(d), intptr_t(pc()), 30), rt); has_delay_slot(); assert(rt != relocInfo::virtual_call_type, "must use virtual_call_Relocation::spec"); } 106 inline void Assembler::call( Label& L, relocInfo::relocType rt ) { call( target(L), rt); } 107 108 inline void Assembler::flush( Register s1, Register s2) { emit_long( op(arith_op) | op3(flush_op3) | rs1(s1) | rs2(s2)); } 109 inline void Assembler::flush( Register s1, int simm13a) { emit_data( op(arith_op) | op3(flush_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 110 111 inline void Assembler::jmpl( Register s1, Register s2, Register d ) { cti(); emit_long( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); } 112 inline void Assembler::jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { cti(); emit_data( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); has_delay_slot(); } 113 114 inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, RegisterOrConstant s2, FloatRegister d) { 115 if (s2.is_register()) ldf(w, s1, s2.as_register(), d); 116 else ldf(w, s1, s2.as_constant(), d); 117 } 118 119 inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_long( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); } 120 inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); } 121 122 inline void Assembler::ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset) { relocate(a.rspec(offset)); ldf( w, a.base(), a.disp() + offset, d); } 123 124 inline void Assembler::ldfsr( Register s1, Register s2) { v9_dep(); emit_long( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); } 125 inline void Assembler::ldfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 126 inline void Assembler::ldxfsr( Register s1, Register s2) { v9_only(); emit_long( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); } 127 inline void Assembler::ldxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 128 129 inline void Assembler::ldc( Register s1, Register s2, int crd) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(ldc_op3 ) | rs1(s1) | rs2(s2) ); } 130 inline void Assembler::ldc( Register s1, int simm13a, int crd) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(ldc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 131 inline void Assembler::lddc( Register s1, Register s2, int crd) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(lddc_op3 ) | rs1(s1) | rs2(s2) ); } 132 inline void Assembler::lddc( Register s1, int simm13a, int crd) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(lddc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 133 inline void Assembler::ldcsr( Register s1, Register s2, int crd) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(ldcsr_op3) | rs1(s1) | rs2(s2) ); } 134 inline void Assembler::ldcsr( Register s1, int simm13a, int crd) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(ldcsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 135 136 inline void Assembler::ldsb( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | rs2(s2) ); } 137 inline void Assembler::ldsb( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 138 139 inline void Assembler::ldsh( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldsh_op3) | rs1(s1) | rs2(s2) ); } 140 inline void Assembler::ldsh( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldsh_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 141 inline void Assembler::ldsw( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldsw_op3) | rs1(s1) | rs2(s2) ); } 142 inline void Assembler::ldsw( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldsw_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 143 inline void Assembler::ldub( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldub_op3) | rs1(s1) | rs2(s2) ); } 144 inline void Assembler::ldub( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 145 inline void Assembler::lduh( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(lduh_op3) | rs1(s1) | rs2(s2) ); } 146 inline void Assembler::lduh( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(lduh_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 147 inline void Assembler::lduw( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(lduw_op3) | rs1(s1) | rs2(s2) ); } 148 inline void Assembler::lduw( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(lduw_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 149 150 inline void Assembler::ldx( Register s1, Register s2, Register d) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(ldx_op3) | rs1(s1) | rs2(s2) ); } 151 inline void Assembler::ldx( Register s1, int simm13a, Register d) { v9_only(); emit_data( op(ldst_op) | rd(d) | op3(ldx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 152 inline void Assembler::ldd( Register s1, Register s2, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_long( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | rs2(s2) ); } 153 inline void Assembler::ldd( Register s1, int simm13a, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 154 155 #ifdef _LP64 156 // Make all 32 bit loads signed so 64 bit registers maintain proper sign 157 inline void Assembler::ld( Register s1, Register s2, Register d) { ldsw( s1, s2, d); } 158 inline void Assembler::ld( Register s1, int simm13a, Register d) { ldsw( s1, simm13a, d); } 159 #else 160 inline void Assembler::ld( Register s1, Register s2, Register d) { lduw( s1, s2, d); } 161 inline void Assembler::ld( Register s1, int simm13a, Register d) { lduw( s1, simm13a, d); } 162 #endif 163 164 #ifdef ASSERT 165 // ByteSize is only a class when ASSERT is defined, otherwise it's an int. 166 # ifdef _LP64 167 inline void Assembler::ld( Register s1, ByteSize simm13a, Register d) { ldsw( s1, in_bytes(simm13a), d); } 168 # else 169 inline void Assembler::ld( Register s1, ByteSize simm13a, Register d) { lduw( s1, in_bytes(simm13a), d); } 170 # endif 171 #endif 172 173 inline void Assembler::ld( const Address& a, Register d, int offset) { 174 if (a.has_index()) { assert(offset == 0, ""); ld( a.base(), a.index(), d); } 175 else { ld( a.base(), a.disp() + offset, d); } 176 } 177 inline void Assembler::ldsb(const Address& a, Register d, int offset) { 178 if (a.has_index()) { assert(offset == 0, ""); ldsb(a.base(), a.index(), d); } 179 else { ldsb(a.base(), a.disp() + offset, d); } 180 } 181 inline void Assembler::ldsh(const Address& a, Register d, int offset) { 182 if (a.has_index()) { assert(offset == 0, ""); ldsh(a.base(), a.index(), d); } 183 else { ldsh(a.base(), a.disp() + offset, d); } 184 } 185 inline void Assembler::ldsw(const Address& a, Register d, int offset) { 186 if (a.has_index()) { assert(offset == 0, ""); ldsw(a.base(), a.index(), d); } 187 else { ldsw(a.base(), a.disp() + offset, d); } 188 } 189 inline void Assembler::ldub(const Address& a, Register d, int offset) { 190 if (a.has_index()) { assert(offset == 0, ""); ldub(a.base(), a.index(), d); } 191 else { ldub(a.base(), a.disp() + offset, d); } 192 } 193 inline void Assembler::lduh(const Address& a, Register d, int offset) { 194 if (a.has_index()) { assert(offset == 0, ""); lduh(a.base(), a.index(), d); } 195 else { lduh(a.base(), a.disp() + offset, d); } 196 } 197 inline void Assembler::lduw(const Address& a, Register d, int offset) { 198 if (a.has_index()) { assert(offset == 0, ""); lduw(a.base(), a.index(), d); } 199 else { lduw(a.base(), a.disp() + offset, d); } 200 } 201 inline void Assembler::ldd( const Address& a, Register d, int offset) { 202 if (a.has_index()) { assert(offset == 0, ""); ldd( a.base(), a.index(), d); } 203 else { ldd( a.base(), a.disp() + offset, d); } 204 } 205 inline void Assembler::ldx( const Address& a, Register d, int offset) { 206 if (a.has_index()) { assert(offset == 0, ""); ldx( a.base(), a.index(), d); } 207 else { ldx( a.base(), a.disp() + offset, d); } 208 } 209 210 inline void Assembler::ldub(Register s1, RegisterOrConstant s2, Register d) { ldub(Address(s1, s2), d); } 211 inline void Assembler::ldsb(Register s1, RegisterOrConstant s2, Register d) { ldsb(Address(s1, s2), d); } 212 inline void Assembler::lduh(Register s1, RegisterOrConstant s2, Register d) { lduh(Address(s1, s2), d); } 213 inline void Assembler::ldsh(Register s1, RegisterOrConstant s2, Register d) { ldsh(Address(s1, s2), d); } 214 inline void Assembler::lduw(Register s1, RegisterOrConstant s2, Register d) { lduw(Address(s1, s2), d); } 215 inline void Assembler::ldsw(Register s1, RegisterOrConstant s2, Register d) { ldsw(Address(s1, s2), d); } 216 inline void Assembler::ldx( Register s1, RegisterOrConstant s2, Register d) { ldx( Address(s1, s2), d); } 217 inline void Assembler::ld( Register s1, RegisterOrConstant s2, Register d) { ld( Address(s1, s2), d); } 218 inline void Assembler::ldd( Register s1, RegisterOrConstant s2, Register d) { ldd( Address(s1, s2), d); } 219 220 // form effective addresses this way: 221 inline void Assembler::add(const Address& a, Register d, int offset) { 222 if (a.has_index()) add(a.base(), a.index(), d); 223 else { add(a.base(), a.disp() + offset, d, a.rspec(offset)); offset = 0; } 224 if (offset != 0) add(d, offset, d); 225 } 226 inline void Assembler::add(Register s1, RegisterOrConstant s2, Register d, int offset) { 227 if (s2.is_register()) add(s1, s2.as_register(), d); 228 else { add(s1, s2.as_constant() + offset, d); offset = 0; } 229 if (offset != 0) add(d, offset, d); 230 } 231 232 inline void Assembler::andn(Register s1, RegisterOrConstant s2, Register d) { 233 if (s2.is_register()) andn(s1, s2.as_register(), d); 234 else andn(s1, s2.as_constant(), d); 235 } 236 237 inline void Assembler::ldstub( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | rs2(s2) ); } 238 inline void Assembler::ldstub( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 239 240 241 inline void Assembler::prefetch(Register s1, Register s2, PrefetchFcn f) { v9_only(); emit_long( op(ldst_op) | fcn(f) | op3(prefetch_op3) | rs1(s1) | rs2(s2) ); } 242 inline void Assembler::prefetch(Register s1, int simm13a, PrefetchFcn f) { v9_only(); emit_data( op(ldst_op) | fcn(f) | op3(prefetch_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 243 244 inline void Assembler::prefetch(const Address& a, PrefetchFcn f, int offset) { v9_only(); relocate(a.rspec(offset)); prefetch(a.base(), a.disp() + offset, f); } 245 246 247 inline void Assembler::rett( Register s1, Register s2 ) { cti(); emit_long( op(arith_op) | op3(rett_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); } 248 inline void Assembler::rett( Register s1, int simm13a, relocInfo::relocType rt) { cti(); emit_data( op(arith_op) | op3(rett_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rt); has_delay_slot(); } 249 250 inline void Assembler::sethi( int imm22a, Register d, RelocationHolder const& rspec ) { emit_data( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(imm22a), rspec); } 251 252 // pp 222 253 254 inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, RegisterOrConstant s2) { 255 if (s2.is_register()) stf(w, d, s1, s2.as_register()); 256 else stf(w, d, s1, s2.as_constant()); 257 } 258 259 inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2) { emit_long( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | rs2(s2) ); } 260 inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 261 262 inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset) { 263 relocate(a.rspec(offset)); 264 if (a.has_index()) { assert(offset == 0, ""); stf(w, d, a.base(), a.index() ); } 265 else { stf(w, d, a.base(), a.disp() + offset); } 266 } 267 268 inline void Assembler::stfsr( Register s1, Register s2) { v9_dep(); emit_long( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); } 269 inline void Assembler::stfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 270 inline void Assembler::stxfsr( Register s1, Register s2) { v9_only(); emit_long( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); } 271 inline void Assembler::stxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 272 273 // p 226 274 275 inline void Assembler::stb( Register d, Register s1, Register s2) { emit_long( op(ldst_op) | rd(d) | op3(stb_op3) | rs1(s1) | rs2(s2) ); } 276 inline void Assembler::stb( Register d, Register s1, int simm13a) { emit_data( op(ldst_op) | rd(d) | op3(stb_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 277 inline void Assembler::sth( Register d, Register s1, Register s2) { emit_long( op(ldst_op) | rd(d) | op3(sth_op3) | rs1(s1) | rs2(s2) ); } 278 inline void Assembler::sth( Register d, Register s1, int simm13a) { emit_data( op(ldst_op) | rd(d) | op3(sth_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 279 inline void Assembler::stw( Register d, Register s1, Register s2) { emit_long( op(ldst_op) | rd(d) | op3(stw_op3) | rs1(s1) | rs2(s2) ); } 280 inline void Assembler::stw( Register d, Register s1, int simm13a) { emit_data( op(ldst_op) | rd(d) | op3(stw_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 281 282 283 inline void Assembler::stx( Register d, Register s1, Register s2) { v9_only(); emit_long( op(ldst_op) | rd(d) | op3(stx_op3) | rs1(s1) | rs2(s2) ); } 284 inline void Assembler::stx( Register d, Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(d) | op3(stx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 285 inline void Assembler::std( Register d, Register s1, Register s2) { v9_dep(); assert(d->is_even(), "not even"); emit_long( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | rs2(s2) ); } 286 inline void Assembler::std( Register d, Register s1, int simm13a) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 287 288 inline void Assembler::st( Register d, Register s1, Register s2) { stw(d, s1, s2); } 289 inline void Assembler::st( Register d, Register s1, int simm13a) { stw(d, s1, simm13a); } 290 291 #ifdef ASSERT 292 // ByteSize is only a class when ASSERT is defined, otherwise it's an int. 293 inline void Assembler::st( Register d, Register s1, ByteSize simm13a) { stw(d, s1, in_bytes(simm13a)); } 294 #endif 295 296 inline void Assembler::stb(Register d, const Address& a, int offset) { 297 if (a.has_index()) { assert(offset == 0, ""); stb(d, a.base(), a.index() ); } 298 else { stb(d, a.base(), a.disp() + offset); } 299 } 300 inline void Assembler::sth(Register d, const Address& a, int offset) { 301 if (a.has_index()) { assert(offset == 0, ""); sth(d, a.base(), a.index() ); } 302 else { sth(d, a.base(), a.disp() + offset); } 303 } 304 inline void Assembler::stw(Register d, const Address& a, int offset) { 305 if (a.has_index()) { assert(offset == 0, ""); stw(d, a.base(), a.index() ); } 306 else { stw(d, a.base(), a.disp() + offset); } 307 } 308 inline void Assembler::st( Register d, const Address& a, int offset) { 309 if (a.has_index()) { assert(offset == 0, ""); st( d, a.base(), a.index() ); } 310 else { st( d, a.base(), a.disp() + offset); } 311 } 312 inline void Assembler::std(Register d, const Address& a, int offset) { 313 if (a.has_index()) { assert(offset == 0, ""); std(d, a.base(), a.index() ); } 314 else { std(d, a.base(), a.disp() + offset); } 315 } 316 inline void Assembler::stx(Register d, const Address& a, int offset) { 317 if (a.has_index()) { assert(offset == 0, ""); stx(d, a.base(), a.index() ); } 318 else { stx(d, a.base(), a.disp() + offset); } 319 } 320 321 inline void Assembler::stb(Register d, Register s1, RegisterOrConstant s2) { stb(d, Address(s1, s2)); } 322 inline void Assembler::sth(Register d, Register s1, RegisterOrConstant s2) { sth(d, Address(s1, s2)); } 323 inline void Assembler::stw(Register d, Register s1, RegisterOrConstant s2) { stw(d, Address(s1, s2)); } 324 inline void Assembler::stx(Register d, Register s1, RegisterOrConstant s2) { stx(d, Address(s1, s2)); } 325 inline void Assembler::std(Register d, Register s1, RegisterOrConstant s2) { std(d, Address(s1, s2)); } 326 inline void Assembler::st( Register d, Register s1, RegisterOrConstant s2) { st( d, Address(s1, s2)); } 327 328 // v8 p 99 329 330 inline void Assembler::stc( int crd, Register s1, Register s2) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | rs2(s2) ); } 331 inline void Assembler::stc( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 332 inline void Assembler::stdc( int crd, Register s1, Register s2) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | rs2(s2) ); } 333 inline void Assembler::stdc( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 334 inline void Assembler::stcsr( int crd, Register s1, Register s2) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | rs2(s2) ); } 335 inline void Assembler::stcsr( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 336 inline void Assembler::stdcq( int crd, Register s1, Register s2) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | rs2(s2) ); } 337 inline void Assembler::stdcq( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 338 339 inline void Assembler::sub(Register s1, RegisterOrConstant s2, Register d, int offset) { 340 if (s2.is_register()) sub(s1, s2.as_register(), d); 341 else { sub(s1, s2.as_constant() + offset, d); offset = 0; } 342 if (offset != 0) sub(d, offset, d); 343 } 344 345 // pp 231 346 347 inline void Assembler::swap( Register s1, Register s2, Register d) { v9_dep(); emit_long( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | rs2(s2) ); } 348 inline void Assembler::swap( Register s1, int simm13a, Register d) { v9_dep(); emit_data( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 349 350 inline void Assembler::swap( Address& a, Register d, int offset ) { relocate(a.rspec(offset)); swap( a.base(), a.disp() + offset, d ); } 351 352 353 // Use the right loads/stores for the platform 354 inline void MacroAssembler::ld_ptr( Register s1, Register s2, Register d ) { 355 #ifdef _LP64 356 Assembler::ldx(s1, s2, d); 357 #else 358 Assembler::ld( s1, s2, d); 359 #endif 360 } 361 362 inline void MacroAssembler::ld_ptr( Register s1, int simm13a, Register d ) { 363 #ifdef _LP64 364 Assembler::ldx(s1, simm13a, d); 365 #else 366 Assembler::ld( s1, simm13a, d); 367 #endif 368 } 369 370 #ifdef ASSERT 371 // ByteSize is only a class when ASSERT is defined, otherwise it's an int. 372 inline void MacroAssembler::ld_ptr( Register s1, ByteSize simm13a, Register d ) { 373 ld_ptr(s1, in_bytes(simm13a), d); 374 } 375 #endif 376 377 inline void MacroAssembler::ld_ptr( Register s1, RegisterOrConstant s2, Register d ) { 378 #ifdef _LP64 379 Assembler::ldx(s1, s2, d); 380 #else 381 Assembler::ld( s1, s2, d); 382 #endif 383 } 384 385 inline void MacroAssembler::ld_ptr(const Address& a, Register d, int offset) { 386 #ifdef _LP64 387 Assembler::ldx(a, d, offset); 388 #else 389 Assembler::ld( a, d, offset); 390 #endif 391 } 392 393 inline void MacroAssembler::st_ptr( Register d, Register s1, Register s2 ) { 394 #ifdef _LP64 395 Assembler::stx(d, s1, s2); 396 #else 397 Assembler::st( d, s1, s2); 398 #endif 399 } 400 401 inline void MacroAssembler::st_ptr( Register d, Register s1, int simm13a ) { 402 #ifdef _LP64 403 Assembler::stx(d, s1, simm13a); 404 #else 405 Assembler::st( d, s1, simm13a); 406 #endif 407 } 408 409 #ifdef ASSERT 410 // ByteSize is only a class when ASSERT is defined, otherwise it's an int. 411 inline void MacroAssembler::st_ptr( Register d, Register s1, ByteSize simm13a ) { 412 st_ptr(d, s1, in_bytes(simm13a)); 413 } 414 #endif 415 416 inline void MacroAssembler::st_ptr( Register d, Register s1, RegisterOrConstant s2 ) { 417 #ifdef _LP64 418 Assembler::stx(d, s1, s2); 419 #else 420 Assembler::st( d, s1, s2); 421 #endif 422 } 423 424 inline void MacroAssembler::st_ptr(Register d, const Address& a, int offset) { 425 #ifdef _LP64 426 Assembler::stx(d, a, offset); 427 #else 428 Assembler::st( d, a, offset); 429 #endif 430 } 431 432 // Use the right loads/stores for the platform 433 inline void MacroAssembler::ld_long( Register s1, Register s2, Register d ) { 434 #ifdef _LP64 435 Assembler::ldx(s1, s2, d); 436 #else 437 Assembler::ldd(s1, s2, d); 438 #endif 439 } 440 441 inline void MacroAssembler::ld_long( Register s1, int simm13a, Register d ) { 442 #ifdef _LP64 443 Assembler::ldx(s1, simm13a, d); 444 #else 445 Assembler::ldd(s1, simm13a, d); 446 #endif 447 } 448 449 inline void MacroAssembler::ld_long( Register s1, RegisterOrConstant s2, Register d ) { 450 #ifdef _LP64 451 Assembler::ldx(s1, s2, d); 452 #else 453 Assembler::ldd(s1, s2, d); 454 #endif 455 } 456 457 inline void MacroAssembler::ld_long(const Address& a, Register d, int offset) { 458 #ifdef _LP64 459 Assembler::ldx(a, d, offset); 460 #else 461 Assembler::ldd(a, d, offset); 462 #endif 463 } 464 465 inline void MacroAssembler::st_long( Register d, Register s1, Register s2 ) { 466 #ifdef _LP64 467 Assembler::stx(d, s1, s2); 468 #else 469 Assembler::std(d, s1, s2); 470 #endif 471 } 472 473 inline void MacroAssembler::st_long( Register d, Register s1, int simm13a ) { 474 #ifdef _LP64 475 Assembler::stx(d, s1, simm13a); 476 #else 477 Assembler::std(d, s1, simm13a); 478 #endif 479 } 480 481 inline void MacroAssembler::st_long( Register d, Register s1, RegisterOrConstant s2 ) { 482 #ifdef _LP64 483 Assembler::stx(d, s1, s2); 484 #else 485 Assembler::std(d, s1, s2); 486 #endif 487 } 488 489 inline void MacroAssembler::st_long( Register d, const Address& a, int offset ) { 490 #ifdef _LP64 491 Assembler::stx(d, a, offset); 492 #else 493 Assembler::std(d, a, offset); 494 #endif 495 } 496 497 // Functions for isolating 64 bit shifts for LP64 498 499 inline void MacroAssembler::sll_ptr( Register s1, Register s2, Register d ) { 500 #ifdef _LP64 501 Assembler::sllx(s1, s2, d); 502 #else 503 Assembler::sll( s1, s2, d); 504 #endif 505 } 506 507 inline void MacroAssembler::sll_ptr( Register s1, int imm6a, Register d ) { 508 #ifdef _LP64 509 Assembler::sllx(s1, imm6a, d); 510 #else 511 Assembler::sll( s1, imm6a, d); 512 #endif 513 } 514 515 inline void MacroAssembler::srl_ptr( Register s1, Register s2, Register d ) { 516 #ifdef _LP64 517 Assembler::srlx(s1, s2, d); 518 #else 519 Assembler::srl( s1, s2, d); 520 #endif 521 } 522 523 inline void MacroAssembler::srl_ptr( Register s1, int imm6a, Register d ) { 524 #ifdef _LP64 525 Assembler::srlx(s1, imm6a, d); 526 #else 527 Assembler::srl( s1, imm6a, d); 528 #endif 529 } 530 531 inline void MacroAssembler::sll_ptr( Register s1, RegisterOrConstant s2, Register d ) { 532 if (s2.is_register()) sll_ptr(s1, s2.as_register(), d); 533 else sll_ptr(s1, s2.as_constant(), d); 534 } 535 536 // Use the right branch for the platform 537 538 inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) { 539 if (VM_Version::v9_instructions_work()) 540 Assembler::bp(c, a, icc, p, d, rt); 541 else 542 Assembler::br(c, a, d, rt); 543 } 544 545 inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) { 546 br(c, a, p, target(L)); 547 } 548 549 550 // Branch that tests either xcc or icc depending on the 551 // architecture compiled (LP64 or not) 552 inline void MacroAssembler::brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) { 553 #ifdef _LP64 554 Assembler::bp(c, a, xcc, p, d, rt); 555 #else 556 MacroAssembler::br(c, a, p, d, rt); 557 #endif 558 } 559 560 inline void MacroAssembler::brx( Condition c, bool a, Predict p, Label& L ) { 561 brx(c, a, p, target(L)); 562 } 563 564 inline void MacroAssembler::ba( Label& L, bool emit_delayed_nop ) { 565 if (emit_delayed_nop && use_cbc(L)) { 566 Assembler::cbc(equal, icc, G0, G0, L); 567 return; 568 } 569 br(always, false, pt, L); 570 // Some callers can fill the delay slot. 571 if (emit_delayed_nop) { 572 delayed()->nop(); 573 } 574 } 575 576 // Warning: V9 only functions 577 inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { 578 Assembler::bp(c, a, cc, p, d, rt); 579 } 580 581 inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) { 582 Assembler::bp(c, a, cc, p, L); 583 } 584 585 inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) { 586 if (VM_Version::v9_instructions_work()) 587 fbp(c, a, fcc0, p, d, rt); 588 else 589 Assembler::fb(c, a, d, rt); 590 } 591 592 inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) { 593 fb(c, a, p, target(L)); 594 } 595 596 inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { 597 Assembler::fbp(c, a, cc, p, d, rt); 598 } 599 600 inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) { 601 Assembler::fbp(c, a, cc, p, L); 602 } 603 604 inline void MacroAssembler::jmp( Register s1, Register s2 ) { jmpl( s1, s2, G0 ); } 605 inline void MacroAssembler::jmp( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, G0, rspec); } 606 607 inline bool MacroAssembler::is_far_target(address d) { 608 return !is_in_wdisp30_range(d, CodeCache::low_bound()) || !is_in_wdisp30_range(d, CodeCache::high_bound()); 609 } 610 611 // Call with a check to see if we need to deal with the added 612 // expense of relocation and if we overflow the displacement 613 // of the quick call instruction. 614 inline void MacroAssembler::call( address d, relocInfo::relocType rt ) { 615 #ifdef _LP64 616 intptr_t disp; 617 // NULL is ok because it will be relocated later. 618 // Must change NULL to a reachable address in order to 619 // pass asserts here and in wdisp. 620 if ( d == NULL ) 621 d = pc(); 622 623 // Is this address within range of the call instruction? 624 // If not, use the expensive instruction sequence 625 if (is_far_target(d)) { 626 relocate(rt); 627 AddressLiteral dest(d); 628 jumpl_to(dest, O7, O7); 629 } else { 630 Assembler::call(d, rt); 631 } 632 #else 633 Assembler::call( d, rt ); 634 #endif 635 } 636 637 inline void MacroAssembler::call( Label& L, relocInfo::relocType rt ) { 638 MacroAssembler::call( target(L), rt); 639 } 640 641 642 643 inline void MacroAssembler::callr( Register s1, Register s2 ) { jmpl( s1, s2, O7 ); } 644 inline void MacroAssembler::callr( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, O7, rspec); } 645 646 // prefetch instruction 647 inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) { 648 if (VM_Version::v9_instructions_work()) 649 Assembler::bp( never, true, xcc, pt, d, rt ); 650 } 651 inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); } 652 653 654 // clobbers o7 on V8!! 655 // returns delta from gotten pc to addr after 656 inline int MacroAssembler::get_pc( Register d ) { 657 int x = offset(); 658 if (VM_Version::v9_instructions_work()) 659 rdpc(d); 660 else { 661 Label lbl; 662 Assembler::call(lbl, relocInfo::none); // No relocation as this is call to pc+0x8 663 if (d == O7) delayed()->nop(); 664 else delayed()->mov(O7, d); 665 bind(lbl); 666 } 667 return offset() - x; 668 } 669 670 671 // Note: All MacroAssembler::set_foo functions are defined out-of-line. 672 673 674 // Loads the current PC of the following instruction as an immediate value in 675 // 2 instructions. All PCs in the CodeCache are within 2 Gig of each other. 676 inline intptr_t MacroAssembler::load_pc_address( Register reg, int bytes_to_skip ) { 677 intptr_t thepc = (intptr_t)pc() + 2*BytesPerInstWord + bytes_to_skip; 678 #ifdef _LP64 679 Unimplemented(); 680 #else 681 Assembler::sethi( thepc & ~0x3ff, reg, internal_word_Relocation::spec((address)thepc)); 682 Assembler::add(reg,thepc & 0x3ff, reg, internal_word_Relocation::spec((address)thepc)); 683 #endif 684 return thepc; 685 } 686 687 688 inline void MacroAssembler::load_contents(const AddressLiteral& addrlit, Register d, int offset) { 689 assert_not_delayed(); 690 sethi(addrlit, d); 691 ld(d, addrlit.low10() + offset, d); 692 } 693 694 695 inline void MacroAssembler::load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset) { 696 assert_not_delayed(); 697 sethi(addrlit, d); 698 ld_ptr(d, addrlit.low10() + offset, d); 699 } 700 701 702 inline void MacroAssembler::store_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) { 703 assert_not_delayed(); 704 sethi(addrlit, temp); 705 st(s, temp, addrlit.low10() + offset); 706 } 707 708 709 inline void MacroAssembler::store_ptr_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) { 710 assert_not_delayed(); 711 sethi(addrlit, temp); 712 st_ptr(s, temp, addrlit.low10() + offset); 713 } 714 715 716 // This code sequence is relocatable to any address, even on LP64. 717 inline void MacroAssembler::jumpl_to(const AddressLiteral& addrlit, Register temp, Register d, int offset) { 718 assert_not_delayed(); 719 // Force fixed length sethi because NativeJump and NativeFarCall don't handle 720 // variable length instruction streams. 721 patchable_sethi(addrlit, temp); 722 jmpl(temp, addrlit.low10() + offset, d); 723 } 724 725 726 inline void MacroAssembler::jump_to(const AddressLiteral& addrlit, Register temp, int offset) { 727 jumpl_to(addrlit, temp, G0, offset); 728 } 729 730 731 inline void MacroAssembler::jump_indirect_to(Address& a, Register temp, 732 int ld_offset, int jmp_offset) { 733 assert_not_delayed(); 734 //sethi(al); // sethi is caller responsibility for this one 735 ld_ptr(a, temp, ld_offset); 736 jmp(temp, jmp_offset); 737 } 738 739 740 inline void MacroAssembler::set_oop(jobject obj, Register d) { 741 set_oop(allocate_oop_address(obj), d); 742 } 743 744 745 inline void MacroAssembler::set_oop_constant(jobject obj, Register d) { 746 set_oop(constant_oop_address(obj), d); 747 } 748 749 750 inline void MacroAssembler::set_oop(const AddressLiteral& obj_addr, Register d) { 751 assert(obj_addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc"); 752 set(obj_addr, d); 753 } 754 755 756 inline void MacroAssembler::load_argument( Argument& a, Register d ) { 757 if (a.is_register()) 758 mov(a.as_register(), d); 759 else 760 ld (a.as_address(), d); 761 } 762 763 inline void MacroAssembler::store_argument( Register s, Argument& a ) { 764 if (a.is_register()) 765 mov(s, a.as_register()); 766 else 767 st_ptr (s, a.as_address()); // ABI says everything is right justified. 768 } 769 770 inline void MacroAssembler::store_ptr_argument( Register s, Argument& a ) { 771 if (a.is_register()) 772 mov(s, a.as_register()); 773 else 774 st_ptr (s, a.as_address()); 775 } 776 777 778 #ifdef _LP64 779 inline void MacroAssembler::store_float_argument( FloatRegister s, Argument& a ) { 780 if (a.is_float_register()) 781 // V9 ABI has F1, F3, F5 are used to pass instead of O0, O1, O2 782 fmov(FloatRegisterImpl::S, s, a.as_float_register() ); 783 else 784 // Floats are stored in the high half of the stack entry 785 // The low half is undefined per the ABI. 786 stf(FloatRegisterImpl::S, s, a.as_address(), sizeof(jfloat)); 787 } 788 789 inline void MacroAssembler::store_double_argument( FloatRegister s, Argument& a ) { 790 if (a.is_float_register()) 791 // V9 ABI has D0, D2, D4 are used to pass instead of O0, O1, O2 792 fmov(FloatRegisterImpl::D, s, a.as_double_register() ); 793 else 794 stf(FloatRegisterImpl::D, s, a.as_address()); 795 } 796 797 inline void MacroAssembler::store_long_argument( Register s, Argument& a ) { 798 if (a.is_register()) 799 mov(s, a.as_register()); 800 else 801 stx(s, a.as_address()); 802 } 803 #endif 804 805 inline void MacroAssembler::clrb( Register s1, Register s2) { stb( G0, s1, s2 ); } 806 inline void MacroAssembler::clrh( Register s1, Register s2) { sth( G0, s1, s2 ); } 807 inline void MacroAssembler::clr( Register s1, Register s2) { stw( G0, s1, s2 ); } 808 inline void MacroAssembler::clrx( Register s1, Register s2) { stx( G0, s1, s2 ); } 809 810 inline void MacroAssembler::clrb( Register s1, int simm13a) { stb( G0, s1, simm13a); } 811 inline void MacroAssembler::clrh( Register s1, int simm13a) { sth( G0, s1, simm13a); } 812 inline void MacroAssembler::clr( Register s1, int simm13a) { stw( G0, s1, simm13a); } 813 inline void MacroAssembler::clrx( Register s1, int simm13a) { stx( G0, s1, simm13a); } 814 815 // returns if membar generates anything, obviously this code should mirror 816 // membar below. 817 inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) { 818 if( !os::is_MP() ) return false; // Not needed on single CPU 819 if( VM_Version::v9_instructions_work() ) { 820 const Membar_mask_bits effective_mask = 821 Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore)); 822 return (effective_mask != 0); 823 } else { 824 return true; 825 } 826 } 827 828 inline void MacroAssembler::membar( Membar_mask_bits const7a ) { 829 // Uniprocessors do not need memory barriers 830 if (!os::is_MP()) return; 831 // Weakened for current Sparcs and TSO. See the v9 manual, sections 8.4.3, 832 // 8.4.4.3, a.31 and a.50. 833 if( VM_Version::v9_instructions_work() ) { 834 // Under TSO, setting bit 3, 2, or 0 is redundant, so the only value 835 // of the mmask subfield of const7a that does anything that isn't done 836 // implicitly is StoreLoad. 837 const Membar_mask_bits effective_mask = 838 Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore)); 839 if ( effective_mask != 0 ) { 840 Assembler::membar( effective_mask ); 841 } 842 } else { 843 // stbar is the closest there is on v8. Equivalent to membar(StoreStore). We 844 // do not issue the stbar because to my knowledge all v8 machines implement TSO, 845 // which guarantees that all stores behave as if an stbar were issued just after 846 // each one of them. On these machines, stbar ought to be a nop. There doesn't 847 // appear to be an equivalent of membar(StoreLoad) on v8: TSO doesn't require it, 848 // it can't be specified by stbar, nor have I come up with a way to simulate it. 849 // 850 // Addendum. Dave says that ldstub guarantees a write buffer flush to coherent 851 // space. Put one here to be on the safe side. 852 Assembler::ldstub(SP, 0, G0); 853 } 854 } 855 856 #endif // CPU_SPARC_VM_ASSEMBLER_SPARC_INLINE_HPP