1 /* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_SPARC_VM_MACROASSEMBLER_SPARC_INLINE_HPP 26 #define CPU_SPARC_VM_MACROASSEMBLER_SPARC_INLINE_HPP 27 28 #include "asm/assembler.inline.hpp" 29 #include "asm/macroAssembler.hpp" 30 #include "asm/codeBuffer.hpp" 31 #include "code/codeCache.hpp" 32 33 inline bool Address::is_simm13(int offset) { return Assembler::is_simm13(disp() + offset); } 34 35 36 inline int AddressLiteral::low10() const { 37 return Assembler::low10(value()); 38 } 39 40 41 inline void MacroAssembler::pd_patch_instruction(address branch, address target) { 42 jint& stub_inst = *(jint*) branch; 43 stub_inst = patched_branch(target - branch, stub_inst, 0); 44 } 45 46 // Use the right loads/stores for the platform 47 inline void MacroAssembler::ld_ptr( Register s1, Register s2, Register d ) { 48 #ifdef _LP64 49 Assembler::ldx(s1, s2, d); 50 #else 51 ld( s1, s2, d); 52 #endif 53 } 54 55 inline void MacroAssembler::ld_ptr( Register s1, int simm13a, Register d ) { 56 #ifdef _LP64 57 Assembler::ldx(s1, simm13a, d); 58 #else 59 ld( s1, simm13a, d); 60 #endif 61 } 62 63 #ifdef ASSERT 64 // ByteSize is only a class when ASSERT is defined, otherwise it's an int. 65 inline void MacroAssembler::ld_ptr( Register s1, ByteSize simm13a, Register d ) { 66 ld_ptr(s1, in_bytes(simm13a), d); 67 } 68 #endif 69 70 inline void MacroAssembler::ld_ptr( Register s1, RegisterOrConstant s2, Register d ) { 71 #ifdef _LP64 72 ldx(s1, s2, d); 73 #else 74 ld( s1, s2, d); 75 #endif 76 } 77 78 inline void MacroAssembler::ld_ptr(const Address& a, Register d, int offset) { 79 #ifdef _LP64 80 ldx(a, d, offset); 81 #else 82 ld( a, d, offset); 83 #endif 84 } 85 86 inline void MacroAssembler::st_ptr( Register d, Register s1, Register s2 ) { 87 #ifdef _LP64 88 Assembler::stx(d, s1, s2); 89 #else 90 st( d, s1, s2); 91 #endif 92 } 93 94 inline void MacroAssembler::st_ptr( Register d, Register s1, int simm13a ) { 95 #ifdef _LP64 96 Assembler::stx(d, s1, simm13a); 97 #else 98 st( d, s1, simm13a); 99 #endif 100 } 101 102 #ifdef ASSERT 103 // ByteSize is only a class when ASSERT is defined, otherwise it's an int. 104 inline void MacroAssembler::st_ptr( Register d, Register s1, ByteSize simm13a ) { 105 st_ptr(d, s1, in_bytes(simm13a)); 106 } 107 #endif 108 109 inline void MacroAssembler::st_ptr( Register d, Register s1, RegisterOrConstant s2 ) { 110 #ifdef _LP64 111 stx(d, s1, s2); 112 #else 113 st( d, s1, s2); 114 #endif 115 } 116 117 inline void MacroAssembler::st_ptr(Register d, const Address& a, int offset) { 118 #ifdef _LP64 119 stx(d, a, offset); 120 #else 121 st( d, a, offset); 122 #endif 123 } 124 125 // Use the right loads/stores for the platform 126 inline void MacroAssembler::ld_long( Register s1, Register s2, Register d ) { 127 #ifdef _LP64 128 Assembler::ldx(s1, s2, d); 129 #else 130 Assembler::ldd(s1, s2, d); 131 #endif 132 } 133 134 inline void MacroAssembler::ld_long( Register s1, int simm13a, Register d ) { 135 #ifdef _LP64 136 Assembler::ldx(s1, simm13a, d); 137 #else 138 Assembler::ldd(s1, simm13a, d); 139 #endif 140 } 141 142 inline void MacroAssembler::ld_long( Register s1, RegisterOrConstant s2, Register d ) { 143 #ifdef _LP64 144 ldx(s1, s2, d); 145 #else 146 ldd(s1, s2, d); 147 #endif 148 } 149 150 inline void MacroAssembler::ld_long(const Address& a, Register d, int offset) { 151 #ifdef _LP64 152 ldx(a, d, offset); 153 #else 154 ldd(a, d, offset); 155 #endif 156 } 157 158 inline void MacroAssembler::st_long( Register d, Register s1, Register s2 ) { 159 #ifdef _LP64 160 Assembler::stx(d, s1, s2); 161 #else 162 Assembler::std(d, s1, s2); 163 #endif 164 } 165 166 inline void MacroAssembler::st_long( Register d, Register s1, int simm13a ) { 167 #ifdef _LP64 168 Assembler::stx(d, s1, simm13a); 169 #else 170 Assembler::std(d, s1, simm13a); 171 #endif 172 } 173 174 inline void MacroAssembler::st_long( Register d, Register s1, RegisterOrConstant s2 ) { 175 #ifdef _LP64 176 stx(d, s1, s2); 177 #else 178 std(d, s1, s2); 179 #endif 180 } 181 182 inline void MacroAssembler::st_long( Register d, const Address& a, int offset ) { 183 #ifdef _LP64 184 stx(d, a, offset); 185 #else 186 std(d, a, offset); 187 #endif 188 } 189 190 // Functions for isolating 64 bit shifts for LP64 191 192 inline void MacroAssembler::sll_ptr( Register s1, Register s2, Register d ) { 193 #ifdef _LP64 194 Assembler::sllx(s1, s2, d); 195 #else 196 Assembler::sll( s1, s2, d); 197 #endif 198 } 199 200 inline void MacroAssembler::sll_ptr( Register s1, int imm6a, Register d ) { 201 #ifdef _LP64 202 Assembler::sllx(s1, imm6a, d); 203 #else 204 Assembler::sll( s1, imm6a, d); 205 #endif 206 } 207 208 inline void MacroAssembler::srl_ptr( Register s1, Register s2, Register d ) { 209 #ifdef _LP64 210 Assembler::srlx(s1, s2, d); 211 #else 212 Assembler::srl( s1, s2, d); 213 #endif 214 } 215 216 inline void MacroAssembler::srl_ptr( Register s1, int imm6a, Register d ) { 217 #ifdef _LP64 218 Assembler::srlx(s1, imm6a, d); 219 #else 220 Assembler::srl( s1, imm6a, d); 221 #endif 222 } 223 224 inline void MacroAssembler::sll_ptr( Register s1, RegisterOrConstant s2, Register d ) { 225 if (s2.is_register()) sll_ptr(s1, s2.as_register(), d); 226 else sll_ptr(s1, s2.as_constant(), d); 227 } 228 229 // Use the right branch for the platform 230 231 inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) { 232 Assembler::bp(c, a, icc, p, d, rt); 233 } 234 235 inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) { 236 insert_nop_after_cbcond(); 237 br(c, a, p, target(L)); 238 } 239 240 241 // Branch that tests either xcc or icc depending on the 242 // architecture compiled (LP64 or not) 243 inline void MacroAssembler::brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) { 244 #ifdef _LP64 245 Assembler::bp(c, a, xcc, p, d, rt); 246 #else 247 MacroAssembler::br(c, a, p, d, rt); 248 #endif 249 } 250 251 inline void MacroAssembler::brx( Condition c, bool a, Predict p, Label& L ) { 252 insert_nop_after_cbcond(); 253 brx(c, a, p, target(L)); 254 } 255 256 inline void MacroAssembler::ba( Label& L ) { 257 br(always, false, pt, L); 258 } 259 260 // Warning: V9 only functions 261 inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { 262 Assembler::bp(c, a, cc, p, d, rt); 263 } 264 265 inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) { 266 Assembler::bp(c, a, cc, p, L); 267 } 268 269 inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) { 270 fbp(c, a, fcc0, p, d, rt); 271 } 272 273 inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) { 274 insert_nop_after_cbcond(); 275 fb(c, a, p, target(L)); 276 } 277 278 inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { 279 Assembler::fbp(c, a, cc, p, d, rt); 280 } 281 282 inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) { 283 Assembler::fbp(c, a, cc, p, L); 284 } 285 286 inline void MacroAssembler::jmp( Register s1, Register s2 ) { jmpl( s1, s2, G0 ); } 287 inline void MacroAssembler::jmp( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, G0, rspec); } 288 289 inline bool MacroAssembler::is_far_target(address d) { 290 if (ForceUnreachable) { 291 // References outside the code cache should be treated as far 292 return d < CodeCache::low_bound() || d > CodeCache::high_bound(); 293 } 294 return !is_in_wdisp30_range(d, CodeCache::low_bound()) || !is_in_wdisp30_range(d, CodeCache::high_bound()); 295 } 296 297 // Call with a check to see if we need to deal with the added 298 // expense of relocation and if we overflow the displacement 299 // of the quick call instruction. 300 inline void MacroAssembler::call( address d, relocInfo::relocType rt ) { 301 MacroAssembler::call(d, Relocation::spec_simple(rt)); 302 } 303 304 inline void MacroAssembler::call( address d, RelocationHolder const& rspec ) { 305 #ifdef _LP64 306 intptr_t disp; 307 // NULL is ok because it will be relocated later. 308 // Must change NULL to a reachable address in order to 309 // pass asserts here and in wdisp. 310 if ( d == NULL ) 311 d = pc(); 312 313 // Is this address within range of the call instruction? 314 // If not, use the expensive instruction sequence 315 if (is_far_target(d)) { 316 relocate(rspec); 317 AddressLiteral dest(d); 318 jumpl_to(dest, O7, O7); 319 } else { 320 Assembler::call(d, rspec); 321 } 322 #else 323 Assembler::call( d, rspec ); 324 #endif 325 } 326 327 inline void MacroAssembler::call( Label& L, relocInfo::relocType rt ) { 328 insert_nop_after_cbcond(); 329 MacroAssembler::call( target(L), rt); 330 } 331 332 333 334 inline void MacroAssembler::callr( Register s1, Register s2 ) { jmpl( s1, s2, O7 ); } 335 inline void MacroAssembler::callr( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, O7, rspec); } 336 337 // prefetch instruction 338 inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) { 339 Assembler::bp( never, true, xcc, pt, d, rt ); 340 Assembler::bp( never, true, xcc, pt, d, rt ); 341 } 342 inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); } 343 344 345 // clobbers o7 on V8!! 346 // returns delta from gotten pc to addr after 347 inline int MacroAssembler::get_pc( Register d ) { 348 int x = offset(); 349 rdpc(d); 350 return offset() - x; 351 } 352 353 354 // Note: All MacroAssembler::set_foo functions are defined out-of-line. 355 356 357 // Loads the current PC of the following instruction as an immediate value in 358 // 2 instructions. All PCs in the CodeCache are within 2 Gig of each other. 359 inline intptr_t MacroAssembler::load_pc_address( Register reg, int bytes_to_skip ) { 360 intptr_t thepc = (intptr_t)pc() + 2*BytesPerInstWord + bytes_to_skip; 361 #ifdef _LP64 362 Unimplemented(); 363 #else 364 Assembler::sethi( thepc & ~0x3ff, reg, internal_word_Relocation::spec((address)thepc)); 365 add(reg, thepc & 0x3ff, reg, internal_word_Relocation::spec((address)thepc)); 366 #endif 367 return thepc; 368 } 369 370 371 inline void MacroAssembler::load_contents(const AddressLiteral& addrlit, Register d, int offset) { 372 assert_not_delayed(); 373 if (ForceUnreachable) { 374 patchable_sethi(addrlit, d); 375 } else { 376 sethi(addrlit, d); 377 } 378 ld(d, addrlit.low10() + offset, d); 379 } 380 381 382 inline void MacroAssembler::load_bool_contents(const AddressLiteral& addrlit, Register d, int offset) { 383 assert_not_delayed(); 384 if (ForceUnreachable) { 385 patchable_sethi(addrlit, d); 386 } else { 387 sethi(addrlit, d); 388 } 389 ldub(d, addrlit.low10() + offset, d); 390 } 391 392 393 inline void MacroAssembler::load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset) { 394 assert_not_delayed(); 395 if (ForceUnreachable) { 396 patchable_sethi(addrlit, d); 397 } else { 398 sethi(addrlit, d); 399 } 400 ld_ptr(d, addrlit.low10() + offset, d); 401 } 402 403 404 inline void MacroAssembler::store_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) { 405 assert_not_delayed(); 406 if (ForceUnreachable) { 407 patchable_sethi(addrlit, temp); 408 } else { 409 sethi(addrlit, temp); 410 } 411 st(s, temp, addrlit.low10() + offset); 412 } 413 414 415 inline void MacroAssembler::store_ptr_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) { 416 assert_not_delayed(); 417 if (ForceUnreachable) { 418 patchable_sethi(addrlit, temp); 419 } else { 420 sethi(addrlit, temp); 421 } 422 st_ptr(s, temp, addrlit.low10() + offset); 423 } 424 425 426 // This code sequence is relocatable to any address, even on LP64. 427 inline void MacroAssembler::jumpl_to(const AddressLiteral& addrlit, Register temp, Register d, int offset) { 428 assert_not_delayed(); 429 // Force fixed length sethi because NativeJump and NativeFarCall don't handle 430 // variable length instruction streams. 431 patchable_sethi(addrlit, temp); 432 jmpl(temp, addrlit.low10() + offset, d); 433 } 434 435 436 inline void MacroAssembler::jump_to(const AddressLiteral& addrlit, Register temp, int offset) { 437 jumpl_to(addrlit, temp, G0, offset); 438 } 439 440 441 inline void MacroAssembler::jump_indirect_to(Address& a, Register temp, 442 int ld_offset, int jmp_offset) { 443 assert_not_delayed(); 444 //sethi(al); // sethi is caller responsibility for this one 445 ld_ptr(a, temp, ld_offset); 446 jmp(temp, jmp_offset); 447 } 448 449 450 inline void MacroAssembler::set_metadata(Metadata* obj, Register d) { 451 set_metadata(allocate_metadata_address(obj), d); 452 } 453 454 inline void MacroAssembler::set_metadata_constant(Metadata* obj, Register d) { 455 set_metadata(constant_metadata_address(obj), d); 456 } 457 458 inline void MacroAssembler::set_metadata(const AddressLiteral& obj_addr, Register d) { 459 assert(obj_addr.rspec().type() == relocInfo::metadata_type, "must be a metadata reloc"); 460 set(obj_addr, d); 461 } 462 463 inline void MacroAssembler::set_oop(jobject obj, Register d) { 464 set_oop(allocate_oop_address(obj), d); 465 } 466 467 468 inline void MacroAssembler::set_oop_constant(jobject obj, Register d) { 469 set_oop(constant_oop_address(obj), d); 470 } 471 472 473 inline void MacroAssembler::set_oop(const AddressLiteral& obj_addr, Register d) { 474 assert(obj_addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc"); 475 set(obj_addr, d); 476 } 477 478 479 inline void MacroAssembler::load_argument( Argument& a, Register d ) { 480 if (a.is_register()) 481 mov(a.as_register(), d); 482 else 483 ld (a.as_address(), d); 484 } 485 486 inline void MacroAssembler::store_argument( Register s, Argument& a ) { 487 if (a.is_register()) 488 mov(s, a.as_register()); 489 else 490 st_ptr (s, a.as_address()); // ABI says everything is right justified. 491 } 492 493 inline void MacroAssembler::store_ptr_argument( Register s, Argument& a ) { 494 if (a.is_register()) 495 mov(s, a.as_register()); 496 else 497 st_ptr (s, a.as_address()); 498 } 499 500 501 #ifdef _LP64 502 inline void MacroAssembler::store_float_argument( FloatRegister s, Argument& a ) { 503 if (a.is_float_register()) 504 // V9 ABI has F1, F3, F5 are used to pass instead of O0, O1, O2 505 fmov(FloatRegisterImpl::S, s, a.as_float_register() ); 506 else 507 // Floats are stored in the high half of the stack entry 508 // The low half is undefined per the ABI. 509 stf(FloatRegisterImpl::S, s, a.as_address(), sizeof(jfloat)); 510 } 511 512 inline void MacroAssembler::store_double_argument( FloatRegister s, Argument& a ) { 513 if (a.is_float_register()) 514 // V9 ABI has D0, D2, D4 are used to pass instead of O0, O1, O2 515 fmov(FloatRegisterImpl::D, s, a.as_double_register() ); 516 else 517 stf(FloatRegisterImpl::D, s, a.as_address()); 518 } 519 520 inline void MacroAssembler::store_long_argument( Register s, Argument& a ) { 521 if (a.is_register()) 522 mov(s, a.as_register()); 523 else 524 stx(s, a.as_address()); 525 } 526 #endif 527 528 inline void MacroAssembler::add(Register s1, int simm13a, Register d, relocInfo::relocType rtype) { 529 relocate(rtype); 530 add(s1, simm13a, d); 531 } 532 inline void MacroAssembler::add(Register s1, int simm13a, Register d, RelocationHolder const& rspec) { 533 relocate(rspec); 534 add(s1, simm13a, d); 535 } 536 537 // form effective addresses this way: 538 inline void MacroAssembler::add(const Address& a, Register d, int offset) { 539 if (a.has_index()) add(a.base(), a.index(), d); 540 else { add(a.base(), a.disp() + offset, d, a.rspec(offset)); offset = 0; } 541 if (offset != 0) add(d, offset, d); 542 } 543 inline void MacroAssembler::add(Register s1, RegisterOrConstant s2, Register d, int offset) { 544 if (s2.is_register()) add(s1, s2.as_register(), d); 545 else { add(s1, s2.as_constant() + offset, d); offset = 0; } 546 if (offset != 0) add(d, offset, d); 547 } 548 549 inline void MacroAssembler::andn(Register s1, RegisterOrConstant s2, Register d) { 550 if (s2.is_register()) andn(s1, s2.as_register(), d); 551 else andn(s1, s2.as_constant(), d); 552 } 553 554 inline void MacroAssembler::clrb( Register s1, Register s2) { stb( G0, s1, s2 ); } 555 inline void MacroAssembler::clrh( Register s1, Register s2) { sth( G0, s1, s2 ); } 556 inline void MacroAssembler::clr( Register s1, Register s2) { stw( G0, s1, s2 ); } 557 inline void MacroAssembler::clrx( Register s1, Register s2) { stx( G0, s1, s2 ); } 558 559 inline void MacroAssembler::clrb( Register s1, int simm13a) { stb( G0, s1, simm13a); } 560 inline void MacroAssembler::clrh( Register s1, int simm13a) { sth( G0, s1, simm13a); } 561 inline void MacroAssembler::clr( Register s1, int simm13a) { stw( G0, s1, simm13a); } 562 inline void MacroAssembler::clrx( Register s1, int simm13a) { stx( G0, s1, simm13a); } 563 564 #ifdef _LP64 565 // Make all 32 bit loads signed so 64 bit registers maintain proper sign 566 inline void MacroAssembler::ld( Register s1, Register s2, Register d) { ldsw( s1, s2, d); } 567 inline void MacroAssembler::ld( Register s1, int simm13a, Register d) { ldsw( s1, simm13a, d); } 568 #else 569 inline void MacroAssembler::ld( Register s1, Register s2, Register d) { lduw( s1, s2, d); } 570 inline void MacroAssembler::ld( Register s1, int simm13a, Register d) { lduw( s1, simm13a, d); } 571 #endif 572 573 #ifdef ASSERT 574 // ByteSize is only a class when ASSERT is defined, otherwise it's an int. 575 # ifdef _LP64 576 inline void MacroAssembler::ld(Register s1, ByteSize simm13a, Register d) { ldsw( s1, in_bytes(simm13a), d); } 577 # else 578 inline void MacroAssembler::ld(Register s1, ByteSize simm13a, Register d) { lduw( s1, in_bytes(simm13a), d); } 579 # endif 580 #endif 581 582 inline void MacroAssembler::ld( const Address& a, Register d, int offset) { 583 if (a.has_index()) { assert(offset == 0, ""); ld( a.base(), a.index(), d); } 584 else { ld( a.base(), a.disp() + offset, d); } 585 } 586 587 inline void MacroAssembler::ldsb(const Address& a, Register d, int offset) { 588 if (a.has_index()) { assert(offset == 0, ""); ldsb(a.base(), a.index(), d); } 589 else { ldsb(a.base(), a.disp() + offset, d); } 590 } 591 inline void MacroAssembler::ldsh(const Address& a, Register d, int offset) { 592 if (a.has_index()) { assert(offset == 0, ""); ldsh(a.base(), a.index(), d); } 593 else { ldsh(a.base(), a.disp() + offset, d); } 594 } 595 inline void MacroAssembler::ldsw(const Address& a, Register d, int offset) { 596 if (a.has_index()) { assert(offset == 0, ""); ldsw(a.base(), a.index(), d); } 597 else { ldsw(a.base(), a.disp() + offset, d); } 598 } 599 inline void MacroAssembler::ldub(const Address& a, Register d, int offset) { 600 if (a.has_index()) { assert(offset == 0, ""); ldub(a.base(), a.index(), d); } 601 else { ldub(a.base(), a.disp() + offset, d); } 602 } 603 inline void MacroAssembler::lduh(const Address& a, Register d, int offset) { 604 if (a.has_index()) { assert(offset == 0, ""); lduh(a.base(), a.index(), d); } 605 else { lduh(a.base(), a.disp() + offset, d); } 606 } 607 inline void MacroAssembler::lduw(const Address& a, Register d, int offset) { 608 if (a.has_index()) { assert(offset == 0, ""); lduw(a.base(), a.index(), d); } 609 else { lduw(a.base(), a.disp() + offset, d); } 610 } 611 inline void MacroAssembler::ldd( const Address& a, Register d, int offset) { 612 if (a.has_index()) { assert(offset == 0, ""); ldd( a.base(), a.index(), d); } 613 else { ldd( a.base(), a.disp() + offset, d); } 614 } 615 inline void MacroAssembler::ldx( const Address& a, Register d, int offset) { 616 if (a.has_index()) { assert(offset == 0, ""); ldx( a.base(), a.index(), d); } 617 else { ldx( a.base(), a.disp() + offset, d); } 618 } 619 620 inline void MacroAssembler::ldub(Register s1, RegisterOrConstant s2, Register d) { ldub(Address(s1, s2), d); } 621 inline void MacroAssembler::ldsb(Register s1, RegisterOrConstant s2, Register d) { ldsb(Address(s1, s2), d); } 622 inline void MacroAssembler::lduh(Register s1, RegisterOrConstant s2, Register d) { lduh(Address(s1, s2), d); } 623 inline void MacroAssembler::ldsh(Register s1, RegisterOrConstant s2, Register d) { ldsh(Address(s1, s2), d); } 624 inline void MacroAssembler::lduw(Register s1, RegisterOrConstant s2, Register d) { lduw(Address(s1, s2), d); } 625 inline void MacroAssembler::ldsw(Register s1, RegisterOrConstant s2, Register d) { ldsw(Address(s1, s2), d); } 626 inline void MacroAssembler::ldx( Register s1, RegisterOrConstant s2, Register d) { ldx( Address(s1, s2), d); } 627 inline void MacroAssembler::ld( Register s1, RegisterOrConstant s2, Register d) { ld( Address(s1, s2), d); } 628 inline void MacroAssembler::ldd( Register s1, RegisterOrConstant s2, Register d) { ldd( Address(s1, s2), d); } 629 630 inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, Register s1, RegisterOrConstant s2, FloatRegister d) { 631 if (s2.is_register()) ldf(w, s1, s2.as_register(), d); 632 else ldf(w, s1, s2.as_constant(), d); 633 } 634 635 inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset) { 636 relocate(a.rspec(offset)); 637 if (a.has_index()) { 638 assert(offset == 0, ""); 639 ldf(w, a.base(), a.index(), d); 640 } else { 641 ldf(w, a.base(), a.disp() + offset, d); 642 } 643 } 644 645 // returns if membar generates anything, obviously this code should mirror 646 // membar below. 647 inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) { 648 if (!os::is_MP()) 649 return false; // Not needed on single CPU 650 const Membar_mask_bits effective_mask = 651 Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore)); 652 return (effective_mask != 0); 653 } 654 655 inline void MacroAssembler::membar( Membar_mask_bits const7a ) { 656 // Uniprocessors do not need memory barriers 657 if (!os::is_MP()) 658 return; 659 // Weakened for current Sparcs and TSO. See the v9 manual, sections 8.4.3, 660 // 8.4.4.3, a.31 and a.50. 661 // Under TSO, setting bit 3, 2, or 0 is redundant, so the only value 662 // of the mmask subfield of const7a that does anything that isn't done 663 // implicitly is StoreLoad. 664 const Membar_mask_bits effective_mask = 665 Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore)); 666 if (effective_mask != 0) { 667 Assembler::membar(effective_mask); 668 } 669 } 670 671 inline void MacroAssembler::prefetch(const Address& a, PrefetchFcn f, int offset) { 672 relocate(a.rspec(offset)); 673 assert(!a.has_index(), ""); 674 prefetch(a.base(), a.disp() + offset, f); 675 } 676 677 inline void MacroAssembler::st(Register d, Register s1, Register s2) { stw(d, s1, s2); } 678 inline void MacroAssembler::st(Register d, Register s1, int simm13a) { stw(d, s1, simm13a); } 679 680 #ifdef ASSERT 681 // ByteSize is only a class when ASSERT is defined, otherwise it's an int. 682 inline void MacroAssembler::st(Register d, Register s1, ByteSize simm13a) { stw(d, s1, in_bytes(simm13a)); } 683 #endif 684 685 inline void MacroAssembler::st(Register d, const Address& a, int offset) { 686 if (a.has_index()) { assert(offset == 0, ""); st( d, a.base(), a.index() ); } 687 else { st( d, a.base(), a.disp() + offset); } 688 } 689 690 inline void MacroAssembler::stb(Register d, const Address& a, int offset) { 691 if (a.has_index()) { assert(offset == 0, ""); stb(d, a.base(), a.index() ); } 692 else { stb(d, a.base(), a.disp() + offset); } 693 } 694 inline void MacroAssembler::sth(Register d, const Address& a, int offset) { 695 if (a.has_index()) { assert(offset == 0, ""); sth(d, a.base(), a.index() ); } 696 else { sth(d, a.base(), a.disp() + offset); } 697 } 698 inline void MacroAssembler::stw(Register d, const Address& a, int offset) { 699 if (a.has_index()) { assert(offset == 0, ""); stw(d, a.base(), a.index() ); } 700 else { stw(d, a.base(), a.disp() + offset); } 701 } 702 inline void MacroAssembler::std(Register d, const Address& a, int offset) { 703 if (a.has_index()) { assert(offset == 0, ""); std(d, a.base(), a.index() ); } 704 else { std(d, a.base(), a.disp() + offset); } 705 } 706 inline void MacroAssembler::stx(Register d, const Address& a, int offset) { 707 if (a.has_index()) { assert(offset == 0, ""); stx(d, a.base(), a.index() ); } 708 else { stx(d, a.base(), a.disp() + offset); } 709 } 710 711 inline void MacroAssembler::stb(Register d, Register s1, RegisterOrConstant s2) { stb(d, Address(s1, s2)); } 712 inline void MacroAssembler::sth(Register d, Register s1, RegisterOrConstant s2) { sth(d, Address(s1, s2)); } 713 inline void MacroAssembler::stw(Register d, Register s1, RegisterOrConstant s2) { stw(d, Address(s1, s2)); } 714 inline void MacroAssembler::stx(Register d, Register s1, RegisterOrConstant s2) { stx(d, Address(s1, s2)); } 715 inline void MacroAssembler::std(Register d, Register s1, RegisterOrConstant s2) { std(d, Address(s1, s2)); } 716 inline void MacroAssembler::st( Register d, Register s1, RegisterOrConstant s2) { st( d, Address(s1, s2)); } 717 718 inline void MacroAssembler::stf(FloatRegisterImpl::Width w, FloatRegister d, Register s1, RegisterOrConstant s2) { 719 if (s2.is_register()) stf(w, d, s1, s2.as_register()); 720 else stf(w, d, s1, s2.as_constant()); 721 } 722 723 inline void MacroAssembler::stf(FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset) { 724 relocate(a.rspec(offset)); 725 if (a.has_index()) { assert(offset == 0, ""); stf(w, d, a.base(), a.index() ); } 726 else { stf(w, d, a.base(), a.disp() + offset); } 727 } 728 729 inline void MacroAssembler::sub(Register s1, RegisterOrConstant s2, Register d, int offset) { 730 if (s2.is_register()) sub(s1, s2.as_register(), d); 731 else { sub(s1, s2.as_constant() + offset, d); offset = 0; } 732 if (offset != 0) sub(d, offset, d); 733 } 734 735 inline void MacroAssembler::swap(const Address& a, Register d, int offset) { 736 relocate(a.rspec(offset)); 737 if (a.has_index()) { assert(offset == 0, ""); swap(a.base(), a.index(), d ); } 738 else { swap(a.base(), a.disp() + offset, d); } 739 } 740 741 #endif // CPU_SPARC_VM_MACROASSEMBLER_SPARC_INLINE_HPP