1 /* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.inline.hpp" 27 #include "compiler/disassembler.hpp" 28 #include "gc_interface/collectedHeap.inline.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "memory/cardTableModRefBS.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "memory/universe.hpp" 33 #include "prims/methodHandles.hpp" 34 #include "runtime/biasedLocking.hpp" 35 #include "runtime/interfaceSupport.hpp" 36 #include "runtime/objectMonitor.hpp" 37 #include "runtime/os.hpp" 38 #include "runtime/sharedRuntime.hpp" 39 #include "runtime/stubRoutines.hpp" 40 #include "utilities/macros.hpp" 41 #if INCLUDE_ALL_GCS 42 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 43 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" 44 #include "gc_implementation/g1/heapRegion.hpp" 45 #endif // INCLUDE_ALL_GCS 46 47 #ifdef PRODUCT 48 #define BLOCK_COMMENT(str) /* nothing */ 49 #define STOP(error) stop(error) 50 #else 51 #define BLOCK_COMMENT(str) block_comment(str) 52 #define STOP(error) block_comment(error); stop(error) 53 #endif 54 55 // Convert the raw encoding form into the form expected by the 56 // constructor for Address. 57 Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) { 58 assert(scale == 0, "not supported"); 59 RelocationHolder rspec; 60 if (disp_reloc != relocInfo::none) { 61 rspec = Relocation::spec_simple(disp_reloc); 62 } 63 64 Register rindex = as_Register(index); 65 if (rindex != G0) { 66 Address madr(as_Register(base), rindex); 67 madr._rspec = rspec; 68 return madr; 69 } else { 70 Address madr(as_Register(base), disp); 71 madr._rspec = rspec; 72 return madr; 73 } 74 } 75 76 Address Argument::address_in_frame() const { 77 // Warning: In LP64 mode disp will occupy more than 10 bits, but 78 // op codes such as ld or ldx, only access disp() to get 79 // their simm13 argument. 80 int disp = ((_number - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS; 81 if (is_in()) 82 return Address(FP, disp); // In argument. 83 else 84 return Address(SP, disp); // Out argument. 85 } 86 87 static const char* argumentNames[][2] = { 88 {"A0","P0"}, {"A1","P1"}, {"A2","P2"}, {"A3","P3"}, {"A4","P4"}, 89 {"A5","P5"}, {"A6","P6"}, {"A7","P7"}, {"A8","P8"}, {"A9","P9"}, 90 {"A(n>9)","P(n>9)"} 91 }; 92 93 const char* Argument::name() const { 94 int nofArgs = sizeof argumentNames / sizeof argumentNames[0]; 95 int num = number(); 96 if (num >= nofArgs) num = nofArgs - 1; 97 return argumentNames[num][is_in() ? 1 : 0]; 98 } 99 100 #ifdef ASSERT 101 // On RISC, there's no benefit to verifying instruction boundaries. 102 bool AbstractAssembler::pd_check_instruction_mark() { return false; } 103 #endif 104 105 // Patch instruction inst at offset inst_pos to refer to dest_pos 106 // and return the resulting instruction. 107 // We should have pcs, not offsets, but since all is relative, it will work out 108 // OK. 109 int MacroAssembler::patched_branch(int dest_pos, int inst, int inst_pos) { 110 int m; // mask for displacement field 111 int v; // new value for displacement field 112 const int word_aligned_ones = -4; 113 switch (inv_op(inst)) { 114 default: ShouldNotReachHere(); 115 case call_op: m = wdisp(word_aligned_ones, 0, 30); v = wdisp(dest_pos, inst_pos, 30); break; 116 case branch_op: 117 switch (inv_op2(inst)) { 118 case fbp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; 119 case bp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; 120 case fb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; 121 case br_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; 122 case bpr_op2: { 123 if (is_cbcond(inst)) { 124 m = wdisp10(word_aligned_ones, 0); 125 v = wdisp10(dest_pos, inst_pos); 126 } else { 127 m = wdisp16(word_aligned_ones, 0); 128 v = wdisp16(dest_pos, inst_pos); 129 } 130 break; 131 } 132 default: ShouldNotReachHere(); 133 } 134 } 135 return inst & ~m | v; 136 } 137 138 // Return the offset of the branch destionation of instruction inst 139 // at offset pos. 140 // Should have pcs, but since all is relative, it works out. 141 int MacroAssembler::branch_destination(int inst, int pos) { 142 int r; 143 switch (inv_op(inst)) { 144 default: ShouldNotReachHere(); 145 case call_op: r = inv_wdisp(inst, pos, 30); break; 146 case branch_op: 147 switch (inv_op2(inst)) { 148 case fbp_op2: r = inv_wdisp( inst, pos, 19); break; 149 case bp_op2: r = inv_wdisp( inst, pos, 19); break; 150 case fb_op2: r = inv_wdisp( inst, pos, 22); break; 151 case br_op2: r = inv_wdisp( inst, pos, 22); break; 152 case bpr_op2: { 153 if (is_cbcond(inst)) { 154 r = inv_wdisp10(inst, pos); 155 } else { 156 r = inv_wdisp16(inst, pos); 157 } 158 break; 159 } 160 default: ShouldNotReachHere(); 161 } 162 } 163 return r; 164 } 165 166 void MacroAssembler::null_check(Register reg, int offset) { 167 if (needs_explicit_null_check((intptr_t)offset)) { 168 // provoke OS NULL exception if reg = NULL by 169 // accessing M[reg] w/o changing any registers 170 ld_ptr(reg, 0, G0); 171 } 172 else { 173 // nothing to do, (later) access of M[reg + offset] 174 // will provoke OS NULL exception if reg = NULL 175 } 176 } 177 178 // Ring buffer jumps 179 180 #ifndef PRODUCT 181 void MacroAssembler::ret( bool trace ) { if (trace) { 182 mov(I7, O7); // traceable register 183 JMP(O7, 2 * BytesPerInstWord); 184 } else { 185 jmpl( I7, 2 * BytesPerInstWord, G0 ); 186 } 187 } 188 189 void MacroAssembler::retl( bool trace ) { if (trace) JMP(O7, 2 * BytesPerInstWord); 190 else jmpl( O7, 2 * BytesPerInstWord, G0 ); } 191 #endif /* PRODUCT */ 192 193 194 void MacroAssembler::jmp2(Register r1, Register r2, const char* file, int line ) { 195 assert_not_delayed(); 196 // This can only be traceable if r1 & r2 are visible after a window save 197 if (TraceJumps) { 198 #ifndef PRODUCT 199 save_frame(0); 200 verify_thread(); 201 ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0); 202 add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1); 203 sll(O0, exact_log2(4*sizeof(intptr_t)), O2); 204 add(O2, O1, O1); 205 206 add(r1->after_save(), r2->after_save(), O2); 207 set((intptr_t)file, O3); 208 set(line, O4); 209 Label L; 210 // get nearby pc, store jmp target 211 call(L, relocInfo::none); // No relocation for call to pc+0x8 212 delayed()->st(O2, O1, 0); 213 bind(L); 214 215 // store nearby pc 216 st(O7, O1, sizeof(intptr_t)); 217 // store file 218 st(O3, O1, 2*sizeof(intptr_t)); 219 // store line 220 st(O4, O1, 3*sizeof(intptr_t)); 221 add(O0, 1, O0); 222 and3(O0, JavaThread::jump_ring_buffer_size - 1, O0); 223 st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset())); 224 restore(); 225 #endif /* PRODUCT */ 226 } 227 jmpl(r1, r2, G0); 228 } 229 void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) { 230 assert_not_delayed(); 231 // This can only be traceable if r1 is visible after a window save 232 if (TraceJumps) { 233 #ifndef PRODUCT 234 save_frame(0); 235 verify_thread(); 236 ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0); 237 add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1); 238 sll(O0, exact_log2(4*sizeof(intptr_t)), O2); 239 add(O2, O1, O1); 240 241 add(r1->after_save(), offset, O2); 242 set((intptr_t)file, O3); 243 set(line, O4); 244 Label L; 245 // get nearby pc, store jmp target 246 call(L, relocInfo::none); // No relocation for call to pc+0x8 247 delayed()->st(O2, O1, 0); 248 bind(L); 249 250 // store nearby pc 251 st(O7, O1, sizeof(intptr_t)); 252 // store file 253 st(O3, O1, 2*sizeof(intptr_t)); 254 // store line 255 st(O4, O1, 3*sizeof(intptr_t)); 256 add(O0, 1, O0); 257 and3(O0, JavaThread::jump_ring_buffer_size - 1, O0); 258 st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset())); 259 restore(); 260 #endif /* PRODUCT */ 261 } 262 jmp(r1, offset); 263 } 264 265 // This code sequence is relocatable to any address, even on LP64. 266 void MacroAssembler::jumpl(const AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line) { 267 assert_not_delayed(); 268 // Force fixed length sethi because NativeJump and NativeFarCall don't handle 269 // variable length instruction streams. 270 patchable_sethi(addrlit, temp); 271 Address a(temp, addrlit.low10() + offset); // Add the offset to the displacement. 272 if (TraceJumps) { 273 #ifndef PRODUCT 274 // Must do the add here so relocation can find the remainder of the 275 // value to be relocated. 276 add(a.base(), a.disp(), a.base(), addrlit.rspec(offset)); 277 save_frame(0); 278 verify_thread(); 279 ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0); 280 add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1); 281 sll(O0, exact_log2(4*sizeof(intptr_t)), O2); 282 add(O2, O1, O1); 283 284 set((intptr_t)file, O3); 285 set(line, O4); 286 Label L; 287 288 // get nearby pc, store jmp target 289 call(L, relocInfo::none); // No relocation for call to pc+0x8 290 delayed()->st(a.base()->after_save(), O1, 0); 291 bind(L); 292 293 // store nearby pc 294 st(O7, O1, sizeof(intptr_t)); 295 // store file 296 st(O3, O1, 2*sizeof(intptr_t)); 297 // store line 298 st(O4, O1, 3*sizeof(intptr_t)); 299 add(O0, 1, O0); 300 and3(O0, JavaThread::jump_ring_buffer_size - 1, O0); 301 st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset())); 302 restore(); 303 jmpl(a.base(), G0, d); 304 #else 305 jmpl(a.base(), a.disp(), d); 306 #endif /* PRODUCT */ 307 } else { 308 jmpl(a.base(), a.disp(), d); 309 } 310 } 311 312 void MacroAssembler::jump(const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line) { 313 jumpl(addrlit, temp, G0, offset, file, line); 314 } 315 316 317 // Conditional breakpoint (for assertion checks in assembly code) 318 void MacroAssembler::breakpoint_trap(Condition c, CC cc) { 319 trap(c, cc, G0, ST_RESERVED_FOR_USER_0); 320 } 321 322 // We want to use ST_BREAKPOINT here, but the debugger is confused by it. 323 void MacroAssembler::breakpoint_trap() { 324 trap(ST_RESERVED_FOR_USER_0); 325 } 326 327 // Write serialization page so VM thread can do a pseudo remote membar 328 // We use the current thread pointer to calculate a thread specific 329 // offset to write to within the page. This minimizes bus traffic 330 // due to cache line collision. 331 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) { 332 srl(thread, os::get_serialize_page_shift_count(), tmp2); 333 if (Assembler::is_simm13(os::vm_page_size())) { 334 and3(tmp2, (os::vm_page_size() - sizeof(int)), tmp2); 335 } 336 else { 337 set((os::vm_page_size() - sizeof(int)), tmp1); 338 and3(tmp2, tmp1, tmp2); 339 } 340 set(os::get_memory_serialize_page(), tmp1); 341 st(G0, tmp1, tmp2); 342 } 343 344 345 346 void MacroAssembler::enter() { 347 Unimplemented(); 348 } 349 350 void MacroAssembler::leave() { 351 Unimplemented(); 352 } 353 354 // Calls to C land 355 356 #ifdef ASSERT 357 // a hook for debugging 358 static Thread* reinitialize_thread() { 359 return ThreadLocalStorage::thread(); 360 } 361 #else 362 #define reinitialize_thread ThreadLocalStorage::thread 363 #endif 364 365 #ifdef ASSERT 366 address last_get_thread = NULL; 367 #endif 368 369 // call this when G2_thread is not known to be valid 370 void MacroAssembler::get_thread() { 371 save_frame(0); // to avoid clobbering O0 372 mov(G1, L0); // avoid clobbering G1 373 mov(G5_method, L1); // avoid clobbering G5 374 mov(G3, L2); // avoid clobbering G3 also 375 mov(G4, L5); // avoid clobbering G4 376 #ifdef ASSERT 377 AddressLiteral last_get_thread_addrlit(&last_get_thread); 378 set(last_get_thread_addrlit, L3); 379 rdpc(L4); 380 inc(L4, 3 * BytesPerInstWord); // skip rdpc + inc + st_ptr to point L4 at call st_ptr(L4, L3, 0); 381 #endif 382 call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type); 383 delayed()->nop(); 384 mov(L0, G1); 385 mov(L1, G5_method); 386 mov(L2, G3); 387 mov(L5, G4); 388 restore(O0, 0, G2_thread); 389 } 390 391 static Thread* verify_thread_subroutine(Thread* gthread_value) { 392 Thread* correct_value = ThreadLocalStorage::thread(); 393 guarantee(gthread_value == correct_value, "G2_thread value must be the thread"); 394 return correct_value; 395 } 396 397 void MacroAssembler::verify_thread() { 398 if (VerifyThread) { 399 // NOTE: this chops off the heads of the 64-bit O registers. 400 #ifdef CC_INTERP 401 save_frame(0); 402 #else 403 // make sure G2_thread contains the right value 404 save_frame_and_mov(0, Lmethod, Lmethod); // to avoid clobbering O0 (and propagate Lmethod for -Xprof) 405 mov(G1, L1); // avoid clobbering G1 406 // G2 saved below 407 mov(G3, L3); // avoid clobbering G3 408 mov(G4, L4); // avoid clobbering G4 409 mov(G5_method, L5); // avoid clobbering G5_method 410 #endif /* CC_INTERP */ 411 #if defined(COMPILER2) && !defined(_LP64) 412 // Save & restore possible 64-bit Long arguments in G-regs 413 srlx(G1,32,L0); 414 srlx(G4,32,L6); 415 #endif 416 call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type); 417 delayed()->mov(G2_thread, O0); 418 419 mov(L1, G1); // Restore G1 420 // G2 restored below 421 mov(L3, G3); // restore G3 422 mov(L4, G4); // restore G4 423 mov(L5, G5_method); // restore G5_method 424 #if defined(COMPILER2) && !defined(_LP64) 425 // Save & restore possible 64-bit Long arguments in G-regs 426 sllx(L0,32,G2); // Move old high G1 bits high in G2 427 srl(G1, 0,G1); // Clear current high G1 bits 428 or3 (G1,G2,G1); // Recover 64-bit G1 429 sllx(L6,32,G2); // Move old high G4 bits high in G2 430 srl(G4, 0,G4); // Clear current high G4 bits 431 or3 (G4,G2,G4); // Recover 64-bit G4 432 #endif 433 restore(O0, 0, G2_thread); 434 } 435 } 436 437 438 void MacroAssembler::save_thread(const Register thread_cache) { 439 verify_thread(); 440 if (thread_cache->is_valid()) { 441 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); 442 mov(G2_thread, thread_cache); 443 } 444 if (VerifyThread) { 445 // smash G2_thread, as if the VM were about to anyway 446 set(0x67676767, G2_thread); 447 } 448 } 449 450 451 void MacroAssembler::restore_thread(const Register thread_cache) { 452 if (thread_cache->is_valid()) { 453 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); 454 mov(thread_cache, G2_thread); 455 verify_thread(); 456 } else { 457 // do it the slow way 458 get_thread(); 459 } 460 } 461 462 463 // %%% maybe get rid of [re]set_last_Java_frame 464 void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Java_pc) { 465 assert_not_delayed(); 466 Address flags(G2_thread, JavaThread::frame_anchor_offset() + 467 JavaFrameAnchor::flags_offset()); 468 Address pc_addr(G2_thread, JavaThread::last_Java_pc_offset()); 469 470 // Always set last_Java_pc and flags first because once last_Java_sp is visible 471 // has_last_Java_frame is true and users will look at the rest of the fields. 472 // (Note: flags should always be zero before we get here so doesn't need to be set.) 473 474 #ifdef ASSERT 475 // Verify that flags was zeroed on return to Java 476 Label PcOk; 477 save_frame(0); // to avoid clobbering O0 478 ld_ptr(pc_addr, L0); 479 br_null_short(L0, Assembler::pt, PcOk); 480 STOP("last_Java_pc not zeroed before leaving Java"); 481 bind(PcOk); 482 483 // Verify that flags was zeroed on return to Java 484 Label FlagsOk; 485 ld(flags, L0); 486 tst(L0); 487 br(Assembler::zero, false, Assembler::pt, FlagsOk); 488 delayed() -> restore(); 489 STOP("flags not zeroed before leaving Java"); 490 bind(FlagsOk); 491 #endif /* ASSERT */ 492 // 493 // When returning from calling out from Java mode the frame anchor's last_Java_pc 494 // will always be set to NULL. It is set here so that if we are doing a call to 495 // native (not VM) that we capture the known pc and don't have to rely on the 496 // native call having a standard frame linkage where we can find the pc. 497 498 if (last_Java_pc->is_valid()) { 499 st_ptr(last_Java_pc, pc_addr); 500 } 501 502 #ifdef _LP64 503 #ifdef ASSERT 504 // Make sure that we have an odd stack 505 Label StackOk; 506 andcc(last_java_sp, 0x01, G0); 507 br(Assembler::notZero, false, Assembler::pt, StackOk); 508 delayed()->nop(); 509 STOP("Stack Not Biased in set_last_Java_frame"); 510 bind(StackOk); 511 #endif // ASSERT 512 assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame"); 513 add( last_java_sp, STACK_BIAS, G4_scratch ); 514 st_ptr(G4_scratch, G2_thread, JavaThread::last_Java_sp_offset()); 515 #else 516 st_ptr(last_java_sp, G2_thread, JavaThread::last_Java_sp_offset()); 517 #endif // _LP64 518 } 519 520 void MacroAssembler::reset_last_Java_frame(void) { 521 assert_not_delayed(); 522 523 Address sp_addr(G2_thread, JavaThread::last_Java_sp_offset()); 524 Address pc_addr(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); 525 Address flags (G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); 526 527 #ifdef ASSERT 528 // check that it WAS previously set 529 #ifdef CC_INTERP 530 save_frame(0); 531 #else 532 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod to helper frame for -Xprof 533 #endif /* CC_INTERP */ 534 ld_ptr(sp_addr, L0); 535 tst(L0); 536 breakpoint_trap(Assembler::zero, Assembler::ptr_cc); 537 restore(); 538 #endif // ASSERT 539 540 st_ptr(G0, sp_addr); 541 // Always return last_Java_pc to zero 542 st_ptr(G0, pc_addr); 543 // Always null flags after return to Java 544 st(G0, flags); 545 } 546 547 548 void MacroAssembler::call_VM_base( 549 Register oop_result, 550 Register thread_cache, 551 Register last_java_sp, 552 address entry_point, 553 int number_of_arguments, 554 bool check_exceptions) 555 { 556 assert_not_delayed(); 557 558 // determine last_java_sp register 559 if (!last_java_sp->is_valid()) { 560 last_java_sp = SP; 561 } 562 // debugging support 563 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 564 565 // 64-bit last_java_sp is biased! 566 set_last_Java_frame(last_java_sp, noreg); 567 if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early 568 save_thread(thread_cache); 569 // do the call 570 call(entry_point, relocInfo::runtime_call_type); 571 if (!VerifyThread) 572 delayed()->mov(G2_thread, O0); // pass thread as first argument 573 else 574 delayed()->nop(); // (thread already passed) 575 restore_thread(thread_cache); 576 reset_last_Java_frame(); 577 578 // check for pending exceptions. use Gtemp as scratch register. 579 if (check_exceptions) { 580 check_and_forward_exception(Gtemp); 581 } 582 583 #ifdef ASSERT 584 set(badHeapWordVal, G3); 585 set(badHeapWordVal, G4); 586 set(badHeapWordVal, G5); 587 #endif 588 589 // get oop result if there is one and reset the value in the thread 590 if (oop_result->is_valid()) { 591 get_vm_result(oop_result); 592 } 593 } 594 595 void MacroAssembler::check_and_forward_exception(Register scratch_reg) 596 { 597 Label L; 598 599 check_and_handle_popframe(scratch_reg); 600 check_and_handle_earlyret(scratch_reg); 601 602 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 603 ld_ptr(exception_addr, scratch_reg); 604 br_null_short(scratch_reg, pt, L); 605 // we use O7 linkage so that forward_exception_entry has the issuing PC 606 call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 607 delayed()->nop(); 608 bind(L); 609 } 610 611 612 void MacroAssembler::check_and_handle_popframe(Register scratch_reg) { 613 } 614 615 616 void MacroAssembler::check_and_handle_earlyret(Register scratch_reg) { 617 } 618 619 620 void MacroAssembler::call_VM(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 621 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); 622 } 623 624 625 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) { 626 // O0 is reserved for the thread 627 mov(arg_1, O1); 628 call_VM(oop_result, entry_point, 1, check_exceptions); 629 } 630 631 632 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 633 // O0 is reserved for the thread 634 mov(arg_1, O1); 635 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 636 call_VM(oop_result, entry_point, 2, check_exceptions); 637 } 638 639 640 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { 641 // O0 is reserved for the thread 642 mov(arg_1, O1); 643 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 644 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); 645 call_VM(oop_result, entry_point, 3, check_exceptions); 646 } 647 648 649 650 // Note: The following call_VM overloadings are useful when a "save" 651 // has already been performed by a stub, and the last Java frame is 652 // the previous one. In that case, last_java_sp must be passed as FP 653 // instead of SP. 654 655 656 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments, bool check_exceptions) { 657 call_VM_base(oop_result, noreg, last_java_sp, entry_point, number_of_arguments, check_exceptions); 658 } 659 660 661 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) { 662 // O0 is reserved for the thread 663 mov(arg_1, O1); 664 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 665 } 666 667 668 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 669 // O0 is reserved for the thread 670 mov(arg_1, O1); 671 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 672 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 673 } 674 675 676 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { 677 // O0 is reserved for the thread 678 mov(arg_1, O1); 679 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 680 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); 681 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 682 } 683 684 685 686 void MacroAssembler::call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments) { 687 assert_not_delayed(); 688 save_thread(thread_cache); 689 // do the call 690 call(entry_point, relocInfo::runtime_call_type); 691 delayed()->nop(); 692 restore_thread(thread_cache); 693 #ifdef ASSERT 694 set(badHeapWordVal, G3); 695 set(badHeapWordVal, G4); 696 set(badHeapWordVal, G5); 697 #endif 698 } 699 700 701 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments) { 702 call_VM_leaf_base(thread_cache, entry_point, number_of_arguments); 703 } 704 705 706 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1) { 707 mov(arg_1, O0); 708 call_VM_leaf(thread_cache, entry_point, 1); 709 } 710 711 712 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) { 713 mov(arg_1, O0); 714 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); 715 call_VM_leaf(thread_cache, entry_point, 2); 716 } 717 718 719 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3) { 720 mov(arg_1, O0); 721 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); 722 mov(arg_3, O2); assert(arg_3 != O0 && arg_3 != O1, "smashed argument"); 723 call_VM_leaf(thread_cache, entry_point, 3); 724 } 725 726 727 void MacroAssembler::get_vm_result(Register oop_result) { 728 verify_thread(); 729 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 730 ld_ptr( vm_result_addr, oop_result); 731 st_ptr(G0, vm_result_addr); 732 verify_oop(oop_result); 733 } 734 735 736 void MacroAssembler::get_vm_result_2(Register metadata_result) { 737 verify_thread(); 738 Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset()); 739 ld_ptr(vm_result_addr_2, metadata_result); 740 st_ptr(G0, vm_result_addr_2); 741 } 742 743 744 // We require that C code which does not return a value in vm_result will 745 // leave it undisturbed. 746 void MacroAssembler::set_vm_result(Register oop_result) { 747 verify_thread(); 748 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 749 verify_oop(oop_result); 750 751 # ifdef ASSERT 752 // Check that we are not overwriting any other oop. 753 #ifdef CC_INTERP 754 save_frame(0); 755 #else 756 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod for -Xprof 757 #endif /* CC_INTERP */ 758 ld_ptr(vm_result_addr, L0); 759 tst(L0); 760 restore(); 761 breakpoint_trap(notZero, Assembler::ptr_cc); 762 // } 763 # endif 764 765 st_ptr(oop_result, vm_result_addr); 766 } 767 768 769 void MacroAssembler::ic_call(address entry, bool emit_delay) { 770 RelocationHolder rspec = virtual_call_Relocation::spec(pc()); 771 patchable_set((intptr_t)Universe::non_oop_word(), G5_inline_cache_reg); 772 relocate(rspec); 773 call(entry, relocInfo::none); 774 if (emit_delay) { 775 delayed()->nop(); 776 } 777 } 778 779 780 void MacroAssembler::card_table_write(jbyte* byte_map_base, 781 Register tmp, Register obj) { 782 #ifdef _LP64 783 srlx(obj, CardTableModRefBS::card_shift, obj); 784 #else 785 srl(obj, CardTableModRefBS::card_shift, obj); 786 #endif 787 assert(tmp != obj, "need separate temp reg"); 788 set((address) byte_map_base, tmp); 789 stb(G0, tmp, obj); 790 } 791 792 793 void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { 794 address save_pc; 795 int shiftcnt; 796 #ifdef _LP64 797 # ifdef CHECK_DELAY 798 assert_not_delayed((char*) "cannot put two instructions in delay slot"); 799 # endif 800 v9_dep(); 801 save_pc = pc(); 802 803 int msb32 = (int) (addrlit.value() >> 32); 804 int lsb32 = (int) (addrlit.value()); 805 806 if (msb32 == 0 && lsb32 >= 0) { 807 Assembler::sethi(lsb32, d, addrlit.rspec()); 808 } 809 else if (msb32 == -1) { 810 Assembler::sethi(~lsb32, d, addrlit.rspec()); 811 xor3(d, ~low10(~0), d); 812 } 813 else { 814 Assembler::sethi(msb32, d, addrlit.rspec()); // msb 22-bits 815 if (msb32 & 0x3ff) // Any bits? 816 or3(d, msb32 & 0x3ff, d); // msb 32-bits are now in lsb 32 817 if (lsb32 & 0xFFFFFC00) { // done? 818 if ((lsb32 >> 20) & 0xfff) { // Any bits set? 819 sllx(d, 12, d); // Make room for next 12 bits 820 or3(d, (lsb32 >> 20) & 0xfff, d); // Or in next 12 821 shiftcnt = 0; // We already shifted 822 } 823 else 824 shiftcnt = 12; 825 if ((lsb32 >> 10) & 0x3ff) { 826 sllx(d, shiftcnt + 10, d); // Make room for last 10 bits 827 or3(d, (lsb32 >> 10) & 0x3ff, d); // Or in next 10 828 shiftcnt = 0; 829 } 830 else 831 shiftcnt = 10; 832 sllx(d, shiftcnt + 10, d); // Shift leaving disp field 0'd 833 } 834 else 835 sllx(d, 32, d); 836 } 837 // Pad out the instruction sequence so it can be patched later. 838 if (ForceRelocatable || (addrlit.rtype() != relocInfo::none && 839 addrlit.rtype() != relocInfo::runtime_call_type)) { 840 while (pc() < (save_pc + (7 * BytesPerInstWord))) 841 nop(); 842 } 843 #else 844 Assembler::sethi(addrlit.value(), d, addrlit.rspec()); 845 #endif 846 } 847 848 849 void MacroAssembler::sethi(const AddressLiteral& addrlit, Register d) { 850 internal_sethi(addrlit, d, false); 851 } 852 853 854 void MacroAssembler::patchable_sethi(const AddressLiteral& addrlit, Register d) { 855 internal_sethi(addrlit, d, true); 856 } 857 858 859 int MacroAssembler::insts_for_sethi(address a, bool worst_case) { 860 #ifdef _LP64 861 if (worst_case) return 7; 862 intptr_t iaddr = (intptr_t) a; 863 int msb32 = (int) (iaddr >> 32); 864 int lsb32 = (int) (iaddr); 865 int count; 866 if (msb32 == 0 && lsb32 >= 0) 867 count = 1; 868 else if (msb32 == -1) 869 count = 2; 870 else { 871 count = 2; 872 if (msb32 & 0x3ff) 873 count++; 874 if (lsb32 & 0xFFFFFC00 ) { 875 if ((lsb32 >> 20) & 0xfff) count += 2; 876 if ((lsb32 >> 10) & 0x3ff) count += 2; 877 } 878 } 879 return count; 880 #else 881 return 1; 882 #endif 883 } 884 885 int MacroAssembler::worst_case_insts_for_set() { 886 return insts_for_sethi(NULL, true) + 1; 887 } 888 889 890 // Keep in sync with MacroAssembler::insts_for_internal_set 891 void MacroAssembler::internal_set(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { 892 intptr_t value = addrlit.value(); 893 894 if (!ForceRelocatable && addrlit.rspec().type() == relocInfo::none) { 895 // can optimize 896 if (-4096 <= value && value <= 4095) { 897 or3(G0, value, d); // setsw (this leaves upper 32 bits sign-extended) 898 return; 899 } 900 if (inv_hi22(hi22(value)) == value) { 901 sethi(addrlit, d); 902 return; 903 } 904 } 905 assert_not_delayed((char*) "cannot put two instructions in delay slot"); 906 internal_sethi(addrlit, d, ForceRelocatable); 907 if (ForceRelocatable || addrlit.rspec().type() != relocInfo::none || addrlit.low10() != 0) { 908 add(d, addrlit.low10(), d, addrlit.rspec()); 909 } 910 } 911 912 // Keep in sync with MacroAssembler::internal_set 913 int MacroAssembler::insts_for_internal_set(intptr_t value) { 914 // can optimize 915 if (-4096 <= value && value <= 4095) { 916 return 1; 917 } 918 if (inv_hi22(hi22(value)) == value) { 919 return insts_for_sethi((address) value); 920 } 921 int count = insts_for_sethi((address) value); 922 AddressLiteral al(value); 923 if (al.low10() != 0) { 924 count++; 925 } 926 return count; 927 } 928 929 void MacroAssembler::set(const AddressLiteral& al, Register d) { 930 internal_set(al, d, false); 931 } 932 933 void MacroAssembler::set(intptr_t value, Register d) { 934 AddressLiteral al(value); 935 internal_set(al, d, false); 936 } 937 938 void MacroAssembler::set(address addr, Register d, RelocationHolder const& rspec) { 939 AddressLiteral al(addr, rspec); 940 internal_set(al, d, false); 941 } 942 943 void MacroAssembler::patchable_set(const AddressLiteral& al, Register d) { 944 internal_set(al, d, true); 945 } 946 947 void MacroAssembler::patchable_set(intptr_t value, Register d) { 948 AddressLiteral al(value); 949 internal_set(al, d, true); 950 } 951 952 953 void MacroAssembler::set64(jlong value, Register d, Register tmp) { 954 assert_not_delayed(); 955 v9_dep(); 956 957 int hi = (int)(value >> 32); 958 int lo = (int)(value & ~0); 959 // (Matcher::isSimpleConstant64 knows about the following optimizations.) 960 if (Assembler::is_simm13(lo) && value == lo) { 961 or3(G0, lo, d); 962 } else if (hi == 0) { 963 Assembler::sethi(lo, d); // hardware version zero-extends to upper 32 964 if (low10(lo) != 0) 965 or3(d, low10(lo), d); 966 } 967 else if (hi == -1) { 968 Assembler::sethi(~lo, d); // hardware version zero-extends to upper 32 969 xor3(d, low10(lo) ^ ~low10(~0), d); 970 } 971 else if (lo == 0) { 972 if (Assembler::is_simm13(hi)) { 973 or3(G0, hi, d); 974 } else { 975 Assembler::sethi(hi, d); // hardware version zero-extends to upper 32 976 if (low10(hi) != 0) 977 or3(d, low10(hi), d); 978 } 979 sllx(d, 32, d); 980 } 981 else { 982 Assembler::sethi(hi, tmp); 983 Assembler::sethi(lo, d); // macro assembler version sign-extends 984 if (low10(hi) != 0) 985 or3 (tmp, low10(hi), tmp); 986 if (low10(lo) != 0) 987 or3 ( d, low10(lo), d); 988 sllx(tmp, 32, tmp); 989 or3 (d, tmp, d); 990 } 991 } 992 993 int MacroAssembler::insts_for_set64(jlong value) { 994 v9_dep(); 995 996 int hi = (int) (value >> 32); 997 int lo = (int) (value & ~0); 998 int count = 0; 999 1000 // (Matcher::isSimpleConstant64 knows about the following optimizations.) 1001 if (Assembler::is_simm13(lo) && value == lo) { 1002 count++; 1003 } else if (hi == 0) { 1004 count++; 1005 if (low10(lo) != 0) 1006 count++; 1007 } 1008 else if (hi == -1) { 1009 count += 2; 1010 } 1011 else if (lo == 0) { 1012 if (Assembler::is_simm13(hi)) { 1013 count++; 1014 } else { 1015 count++; 1016 if (low10(hi) != 0) 1017 count++; 1018 } 1019 count++; 1020 } 1021 else { 1022 count += 2; 1023 if (low10(hi) != 0) 1024 count++; 1025 if (low10(lo) != 0) 1026 count++; 1027 count += 2; 1028 } 1029 return count; 1030 } 1031 1032 // compute size in bytes of sparc frame, given 1033 // number of extraWords 1034 int MacroAssembler::total_frame_size_in_bytes(int extraWords) { 1035 1036 int nWords = frame::memory_parameter_word_sp_offset; 1037 1038 nWords += extraWords; 1039 1040 if (nWords & 1) ++nWords; // round up to double-word 1041 1042 return nWords * BytesPerWord; 1043 } 1044 1045 1046 // save_frame: given number of "extra" words in frame, 1047 // issue approp. save instruction (p 200, v8 manual) 1048 1049 void MacroAssembler::save_frame(int extraWords) { 1050 int delta = -total_frame_size_in_bytes(extraWords); 1051 if (is_simm13(delta)) { 1052 save(SP, delta, SP); 1053 } else { 1054 set(delta, G3_scratch); 1055 save(SP, G3_scratch, SP); 1056 } 1057 } 1058 1059 1060 void MacroAssembler::save_frame_c1(int size_in_bytes) { 1061 if (is_simm13(-size_in_bytes)) { 1062 save(SP, -size_in_bytes, SP); 1063 } else { 1064 set(-size_in_bytes, G3_scratch); 1065 save(SP, G3_scratch, SP); 1066 } 1067 } 1068 1069 1070 void MacroAssembler::save_frame_and_mov(int extraWords, 1071 Register s1, Register d1, 1072 Register s2, Register d2) { 1073 assert_not_delayed(); 1074 1075 // The trick here is to use precisely the same memory word 1076 // that trap handlers also use to save the register. 1077 // This word cannot be used for any other purpose, but 1078 // it works fine to save the register's value, whether or not 1079 // an interrupt flushes register windows at any given moment! 1080 Address s1_addr; 1081 if (s1->is_valid() && (s1->is_in() || s1->is_local())) { 1082 s1_addr = s1->address_in_saved_window(); 1083 st_ptr(s1, s1_addr); 1084 } 1085 1086 Address s2_addr; 1087 if (s2->is_valid() && (s2->is_in() || s2->is_local())) { 1088 s2_addr = s2->address_in_saved_window(); 1089 st_ptr(s2, s2_addr); 1090 } 1091 1092 save_frame(extraWords); 1093 1094 if (s1_addr.base() == SP) { 1095 ld_ptr(s1_addr.after_save(), d1); 1096 } else if (s1->is_valid()) { 1097 mov(s1->after_save(), d1); 1098 } 1099 1100 if (s2_addr.base() == SP) { 1101 ld_ptr(s2_addr.after_save(), d2); 1102 } else if (s2->is_valid()) { 1103 mov(s2->after_save(), d2); 1104 } 1105 } 1106 1107 1108 AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) { 1109 assert(oop_recorder() != NULL, "this assembler needs a Recorder"); 1110 int index = oop_recorder()->allocate_metadata_index(obj); 1111 RelocationHolder rspec = metadata_Relocation::spec(index); 1112 return AddressLiteral((address)obj, rspec); 1113 } 1114 1115 AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) { 1116 assert(oop_recorder() != NULL, "this assembler needs a Recorder"); 1117 int index = oop_recorder()->find_index(obj); 1118 RelocationHolder rspec = metadata_Relocation::spec(index); 1119 return AddressLiteral((address)obj, rspec); 1120 } 1121 1122 1123 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) { 1124 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1125 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop"); 1126 int oop_index = oop_recorder()->find_index(obj); 1127 return AddressLiteral(obj, oop_Relocation::spec(oop_index)); 1128 } 1129 1130 void MacroAssembler::set_narrow_oop(jobject obj, Register d) { 1131 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1132 int oop_index = oop_recorder()->find_index(obj); 1133 RelocationHolder rspec = oop_Relocation::spec(oop_index); 1134 1135 assert_not_delayed(); 1136 // Relocation with special format (see relocInfo_sparc.hpp). 1137 relocate(rspec, 1); 1138 // Assembler::sethi(0x3fffff, d); 1139 emit_int32( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(0x3fffff) ); 1140 // Don't add relocation for 'add'. Do patching during 'sethi' processing. 1141 add(d, 0x3ff, d); 1142 1143 } 1144 1145 void MacroAssembler::set_narrow_klass(Klass* k, Register d) { 1146 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1147 int klass_index = oop_recorder()->find_index(k); 1148 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 1149 narrowOop encoded_k = Klass::encode_klass(k); 1150 1151 assert_not_delayed(); 1152 // Relocation with special format (see relocInfo_sparc.hpp). 1153 relocate(rspec, 1); 1154 // Assembler::sethi(encoded_k, d); 1155 emit_int32( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(encoded_k) ); 1156 // Don't add relocation for 'add'. Do patching during 'sethi' processing. 1157 add(d, low10(encoded_k), d); 1158 1159 } 1160 1161 void MacroAssembler::align(int modulus) { 1162 while (offset() % modulus != 0) nop(); 1163 } 1164 1165 void RegistersForDebugging::print(outputStream* s) { 1166 FlagSetting fs(Debugging, true); 1167 int j; 1168 for (j = 0; j < 8; ++j) { 1169 if (j != 6) { s->print("i%d = ", j); os::print_location(s, i[j]); } 1170 else { s->print( "fp = " ); os::print_location(s, i[j]); } 1171 } 1172 s->cr(); 1173 1174 for (j = 0; j < 8; ++j) { 1175 s->print("l%d = ", j); os::print_location(s, l[j]); 1176 } 1177 s->cr(); 1178 1179 for (j = 0; j < 8; ++j) { 1180 if (j != 6) { s->print("o%d = ", j); os::print_location(s, o[j]); } 1181 else { s->print( "sp = " ); os::print_location(s, o[j]); } 1182 } 1183 s->cr(); 1184 1185 for (j = 0; j < 8; ++j) { 1186 s->print("g%d = ", j); os::print_location(s, g[j]); 1187 } 1188 s->cr(); 1189 1190 // print out floats with compression 1191 for (j = 0; j < 32; ) { 1192 jfloat val = f[j]; 1193 int last = j; 1194 for ( ; last+1 < 32; ++last ) { 1195 char b1[1024], b2[1024]; 1196 sprintf(b1, "%f", val); 1197 sprintf(b2, "%f", f[last+1]); 1198 if (strcmp(b1, b2)) 1199 break; 1200 } 1201 s->print("f%d", j); 1202 if ( j != last ) s->print(" - f%d", last); 1203 s->print(" = %f", val); 1204 s->fill_to(25); 1205 s->print_cr(" (0x%x)", val); 1206 j = last + 1; 1207 } 1208 s->cr(); 1209 1210 // and doubles (evens only) 1211 for (j = 0; j < 32; ) { 1212 jdouble val = d[j]; 1213 int last = j; 1214 for ( ; last+1 < 32; ++last ) { 1215 char b1[1024], b2[1024]; 1216 sprintf(b1, "%f", val); 1217 sprintf(b2, "%f", d[last+1]); 1218 if (strcmp(b1, b2)) 1219 break; 1220 } 1221 s->print("d%d", 2 * j); 1222 if ( j != last ) s->print(" - d%d", last); 1223 s->print(" = %f", val); 1224 s->fill_to(30); 1225 s->print("(0x%x)", *(int*)&val); 1226 s->fill_to(42); 1227 s->print_cr("(0x%x)", *(1 + (int*)&val)); 1228 j = last + 1; 1229 } 1230 s->cr(); 1231 } 1232 1233 void RegistersForDebugging::save_registers(MacroAssembler* a) { 1234 a->sub(FP, round_to(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0); 1235 a->flushw(); 1236 int i; 1237 for (i = 0; i < 8; ++i) { 1238 a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, i_offset(i)); 1239 a->ld_ptr(as_lRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, l_offset(i)); 1240 a->st_ptr(as_oRegister(i)->after_save(), O0, o_offset(i)); 1241 a->st_ptr(as_gRegister(i)->after_save(), O0, g_offset(i)); 1242 } 1243 for (i = 0; i < 32; ++i) { 1244 a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i)); 1245 } 1246 for (i = 0; i < 64; i += 2) { 1247 a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i)); 1248 } 1249 } 1250 1251 void RegistersForDebugging::restore_registers(MacroAssembler* a, Register r) { 1252 for (int i = 1; i < 8; ++i) { 1253 a->ld_ptr(r, g_offset(i), as_gRegister(i)); 1254 } 1255 for (int j = 0; j < 32; ++j) { 1256 a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j)); 1257 } 1258 for (int k = 0; k < 64; k += 2) { 1259 a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k)); 1260 } 1261 } 1262 1263 1264 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack 1265 void MacroAssembler::push_fTOS() { 1266 // %%%%%% need to implement this 1267 } 1268 1269 // pops double TOS element from CPU stack and pushes on FPU stack 1270 void MacroAssembler::pop_fTOS() { 1271 // %%%%%% need to implement this 1272 } 1273 1274 void MacroAssembler::empty_FPU_stack() { 1275 // %%%%%% need to implement this 1276 } 1277 1278 void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * file, int line) { 1279 // plausibility check for oops 1280 if (!VerifyOops) return; 1281 1282 if (reg == G0) return; // always NULL, which is always an oop 1283 1284 BLOCK_COMMENT("verify_oop {"); 1285 char buffer[64]; 1286 #ifdef COMPILER1 1287 if (CommentedAssembly) { 1288 snprintf(buffer, sizeof(buffer), "verify_oop at %d", offset()); 1289 block_comment(buffer); 1290 } 1291 #endif 1292 1293 const char* real_msg = NULL; 1294 { 1295 ResourceMark rm; 1296 stringStream ss; 1297 ss.print("%s at offset %d (%s:%d)", msg, offset(), file, line); 1298 real_msg = code_string(ss.as_string()); 1299 } 1300 1301 // Call indirectly to solve generation ordering problem 1302 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address()); 1303 1304 // Make some space on stack above the current register window. 1305 // Enough to hold 8 64-bit registers. 1306 add(SP,-8*8,SP); 1307 1308 // Save some 64-bit registers; a normal 'save' chops the heads off 1309 // of 64-bit longs in the 32-bit build. 1310 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); 1311 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); 1312 mov(reg,O0); // Move arg into O0; arg might be in O7 which is about to be crushed 1313 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); 1314 1315 // Size of set() should stay the same 1316 patchable_set((intptr_t)real_msg, O1); 1317 // Load address to call to into O7 1318 load_ptr_contents(a, O7); 1319 // Register call to verify_oop_subroutine 1320 callr(O7, G0); 1321 delayed()->nop(); 1322 // recover frame size 1323 add(SP, 8*8,SP); 1324 BLOCK_COMMENT("} verify_oop"); 1325 } 1326 1327 void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char * file, int line) { 1328 // plausibility check for oops 1329 if (!VerifyOops) return; 1330 1331 const char* real_msg = NULL; 1332 { 1333 ResourceMark rm; 1334 stringStream ss; 1335 ss.print("%s at SP+%d (%s:%d)", msg, addr.disp(), file, line); 1336 real_msg = code_string(ss.as_string()); 1337 } 1338 1339 // Call indirectly to solve generation ordering problem 1340 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address()); 1341 1342 // Make some space on stack above the current register window. 1343 // Enough to hold 8 64-bit registers. 1344 add(SP,-8*8,SP); 1345 1346 // Save some 64-bit registers; a normal 'save' chops the heads off 1347 // of 64-bit longs in the 32-bit build. 1348 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); 1349 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); 1350 ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed 1351 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); 1352 1353 // Size of set() should stay the same 1354 patchable_set((intptr_t)real_msg, O1); 1355 // Load address to call to into O7 1356 load_ptr_contents(a, O7); 1357 // Register call to verify_oop_subroutine 1358 callr(O7, G0); 1359 delayed()->nop(); 1360 // recover frame size 1361 add(SP, 8*8,SP); 1362 } 1363 1364 // side-door communication with signalHandler in os_solaris.cpp 1365 address MacroAssembler::_verify_oop_implicit_branch[3] = { NULL }; 1366 1367 // This macro is expanded just once; it creates shared code. Contract: 1368 // receives an oop in O0. Must restore O0 & O7 from TLS. Must not smash ANY 1369 // registers, including flags. May not use a register 'save', as this blows 1370 // the high bits of the O-regs if they contain Long values. Acts as a 'leaf' 1371 // call. 1372 void MacroAssembler::verify_oop_subroutine() { 1373 // Leaf call; no frame. 1374 Label succeed, fail, null_or_fail; 1375 1376 // O0 and O7 were saved already (O0 in O0's TLS home, O7 in O5's TLS home). 1377 // O0 is now the oop to be checked. O7 is the return address. 1378 Register O0_obj = O0; 1379 1380 // Save some more registers for temps. 1381 stx(O2,SP,frame::register_save_words*wordSize+STACK_BIAS+2*8); 1382 stx(O3,SP,frame::register_save_words*wordSize+STACK_BIAS+3*8); 1383 stx(O4,SP,frame::register_save_words*wordSize+STACK_BIAS+4*8); 1384 stx(O5,SP,frame::register_save_words*wordSize+STACK_BIAS+5*8); 1385 1386 // Save flags 1387 Register O5_save_flags = O5; 1388 rdccr( O5_save_flags ); 1389 1390 { // count number of verifies 1391 Register O2_adr = O2; 1392 Register O3_accum = O3; 1393 inc_counter(StubRoutines::verify_oop_count_addr(), O2_adr, O3_accum); 1394 } 1395 1396 Register O2_mask = O2; 1397 Register O3_bits = O3; 1398 Register O4_temp = O4; 1399 1400 // mark lower end of faulting range 1401 assert(_verify_oop_implicit_branch[0] == NULL, "set once"); 1402 _verify_oop_implicit_branch[0] = pc(); 1403 1404 // We can't check the mark oop because it could be in the process of 1405 // locking or unlocking while this is running. 1406 set(Universe::verify_oop_mask (), O2_mask); 1407 set(Universe::verify_oop_bits (), O3_bits); 1408 1409 // assert((obj & oop_mask) == oop_bits); 1410 and3(O0_obj, O2_mask, O4_temp); 1411 cmp_and_brx_short(O4_temp, O3_bits, notEqual, pn, null_or_fail); 1412 1413 if ((NULL_WORD & Universe::verify_oop_mask()) == Universe::verify_oop_bits()) { 1414 // the null_or_fail case is useless; must test for null separately 1415 br_null_short(O0_obj, pn, succeed); 1416 } 1417 1418 // Check the Klass* of this object for being in the right area of memory. 1419 // Cannot do the load in the delay above slot in case O0 is null 1420 load_klass(O0_obj, O0_obj); 1421 // assert((klass != NULL) 1422 br_null_short(O0_obj, pn, fail); 1423 1424 wrccr( O5_save_flags ); // Restore CCR's 1425 1426 // mark upper end of faulting range 1427 _verify_oop_implicit_branch[1] = pc(); 1428 1429 //----------------------- 1430 // all tests pass 1431 bind(succeed); 1432 1433 // Restore prior 64-bit registers 1434 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+0*8,O0); 1435 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+1*8,O1); 1436 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+2*8,O2); 1437 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+3*8,O3); 1438 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+4*8,O4); 1439 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+5*8,O5); 1440 1441 retl(); // Leaf return; restore prior O7 in delay slot 1442 delayed()->ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+7*8,O7); 1443 1444 //----------------------- 1445 bind(null_or_fail); // nulls are less common but OK 1446 br_null(O0_obj, false, pt, succeed); 1447 delayed()->wrccr( O5_save_flags ); // Restore CCR's 1448 1449 //----------------------- 1450 // report failure: 1451 bind(fail); 1452 _verify_oop_implicit_branch[2] = pc(); 1453 1454 wrccr( O5_save_flags ); // Restore CCR's 1455 1456 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1457 1458 // stop_subroutine expects message pointer in I1. 1459 mov(I1, O1); 1460 1461 // Restore prior 64-bit registers 1462 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+0*8,I0); 1463 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+1*8,I1); 1464 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+2*8,I2); 1465 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+3*8,I3); 1466 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+4*8,I4); 1467 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+5*8,I5); 1468 1469 // factor long stop-sequence into subroutine to save space 1470 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); 1471 1472 // call indirectly to solve generation ordering problem 1473 AddressLiteral al(StubRoutines::Sparc::stop_subroutine_entry_address()); 1474 load_ptr_contents(al, O5); 1475 jmpl(O5, 0, O7); 1476 delayed()->nop(); 1477 } 1478 1479 1480 void MacroAssembler::stop(const char* msg) { 1481 // save frame first to get O7 for return address 1482 // add one word to size in case struct is odd number of words long 1483 // It must be doubleword-aligned for storing doubles into it. 1484 1485 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1486 1487 // stop_subroutine expects message pointer in I1. 1488 // Size of set() should stay the same 1489 patchable_set((intptr_t)msg, O1); 1490 1491 // factor long stop-sequence into subroutine to save space 1492 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); 1493 1494 // call indirectly to solve generation ordering problem 1495 AddressLiteral a(StubRoutines::Sparc::stop_subroutine_entry_address()); 1496 load_ptr_contents(a, O5); 1497 jmpl(O5, 0, O7); 1498 delayed()->nop(); 1499 1500 breakpoint_trap(); // make stop actually stop rather than writing 1501 // unnoticeable results in the output files. 1502 1503 // restore(); done in callee to save space! 1504 } 1505 1506 1507 void MacroAssembler::warn(const char* msg) { 1508 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1509 RegistersForDebugging::save_registers(this); 1510 mov(O0, L0); 1511 // Size of set() should stay the same 1512 patchable_set((intptr_t)msg, O0); 1513 call( CAST_FROM_FN_PTR(address, warning) ); 1514 delayed()->nop(); 1515 // ret(); 1516 // delayed()->restore(); 1517 RegistersForDebugging::restore_registers(this, L0); 1518 restore(); 1519 } 1520 1521 1522 void MacroAssembler::untested(const char* what) { 1523 // We must be able to turn interactive prompting off 1524 // in order to run automated test scripts on the VM 1525 // Use the flag ShowMessageBoxOnError 1526 1527 const char* b = NULL; 1528 { 1529 ResourceMark rm; 1530 stringStream ss; 1531 ss.print("untested: %s", what); 1532 b = code_string(ss.as_string()); 1533 } 1534 if (ShowMessageBoxOnError) { STOP(b); } 1535 else { warn(b); } 1536 } 1537 1538 1539 void MacroAssembler::stop_subroutine() { 1540 RegistersForDebugging::save_registers(this); 1541 1542 // for the sake of the debugger, stick a PC on the current frame 1543 // (this assumes that the caller has performed an extra "save") 1544 mov(I7, L7); 1545 add(O7, -7 * BytesPerInt, I7); 1546 1547 save_frame(); // one more save to free up another O7 register 1548 mov(I0, O1); // addr of reg save area 1549 1550 // We expect pointer to message in I1. Caller must set it up in O1 1551 mov(I1, O0); // get msg 1552 call (CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type); 1553 delayed()->nop(); 1554 1555 restore(); 1556 1557 RegistersForDebugging::restore_registers(this, O0); 1558 1559 save_frame(0); 1560 call(CAST_FROM_FN_PTR(address,breakpoint)); 1561 delayed()->nop(); 1562 restore(); 1563 1564 mov(L7, I7); 1565 retl(); 1566 delayed()->restore(); // see stop above 1567 } 1568 1569 1570 void MacroAssembler::debug(char* msg, RegistersForDebugging* regs) { 1571 if ( ShowMessageBoxOnError ) { 1572 JavaThread* thread = JavaThread::current(); 1573 JavaThreadState saved_state = thread->thread_state(); 1574 thread->set_thread_state(_thread_in_vm); 1575 { 1576 // In order to get locks work, we need to fake a in_VM state 1577 ttyLocker ttyl; 1578 ::tty->print_cr("EXECUTION STOPPED: %s\n", msg); 1579 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 1580 BytecodeCounter::print(); 1581 } 1582 if (os::message_box(msg, "Execution stopped, print registers?")) 1583 regs->print(::tty); 1584 } 1585 BREAKPOINT; 1586 ThreadStateTransition::transition(JavaThread::current(), _thread_in_vm, saved_state); 1587 } 1588 else { 1589 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg); 1590 } 1591 assert(false, err_msg("DEBUG MESSAGE: %s", msg)); 1592 } 1593 1594 1595 void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresult) { 1596 subcc( Rparam_words, Argument::n_register_parameters, Rresult); // how many mem words? 1597 Label no_extras; 1598 br( negative, true, pt, no_extras ); // if neg, clear reg 1599 delayed()->set(0, Rresult); // annuled, so only if taken 1600 bind( no_extras ); 1601 } 1602 1603 1604 void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) { 1605 #ifdef _LP64 1606 add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult); 1607 #else 1608 add(Rextra_words, frame::memory_parameter_word_sp_offset + 1, Rresult); 1609 #endif 1610 bclr(1, Rresult); 1611 sll(Rresult, LogBytesPerWord, Rresult); // Rresult has total frame bytes 1612 } 1613 1614 1615 void MacroAssembler::calc_frame_size_and_save(Register Rextra_words, Register Rresult) { 1616 calc_frame_size(Rextra_words, Rresult); 1617 neg(Rresult); 1618 save(SP, Rresult, SP); 1619 } 1620 1621 1622 // --------------------------------------------------------- 1623 Assembler::RCondition cond2rcond(Assembler::Condition c) { 1624 switch (c) { 1625 /*case zero: */ 1626 case Assembler::equal: return Assembler::rc_z; 1627 case Assembler::lessEqual: return Assembler::rc_lez; 1628 case Assembler::less: return Assembler::rc_lz; 1629 /*case notZero:*/ 1630 case Assembler::notEqual: return Assembler::rc_nz; 1631 case Assembler::greater: return Assembler::rc_gz; 1632 case Assembler::greaterEqual: return Assembler::rc_gez; 1633 } 1634 ShouldNotReachHere(); 1635 return Assembler::rc_z; 1636 } 1637 1638 // compares (32 bit) register with zero and branches. NOT FOR USE WITH 64-bit POINTERS 1639 void MacroAssembler::cmp_zero_and_br(Condition c, Register s1, Label& L, bool a, Predict p) { 1640 tst(s1); 1641 br (c, a, p, L); 1642 } 1643 1644 // Compares a pointer register with zero and branches on null. 1645 // Does a test & branch on 32-bit systems and a register-branch on 64-bit. 1646 void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) { 1647 assert_not_delayed(); 1648 #ifdef _LP64 1649 bpr( rc_z, a, p, s1, L ); 1650 #else 1651 tst(s1); 1652 br ( zero, a, p, L ); 1653 #endif 1654 } 1655 1656 void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) { 1657 assert_not_delayed(); 1658 #ifdef _LP64 1659 bpr( rc_nz, a, p, s1, L ); 1660 #else 1661 tst(s1); 1662 br ( notZero, a, p, L ); 1663 #endif 1664 } 1665 1666 // Compare registers and branch with nop in delay slot or cbcond without delay slot. 1667 1668 // Compare integer (32 bit) values (icc only). 1669 void MacroAssembler::cmp_and_br_short(Register s1, Register s2, Condition c, 1670 Predict p, Label& L) { 1671 assert_not_delayed(); 1672 if (use_cbcond(L)) { 1673 Assembler::cbcond(c, icc, s1, s2, L); 1674 } else { 1675 cmp(s1, s2); 1676 br(c, false, p, L); 1677 delayed()->nop(); 1678 } 1679 } 1680 1681 // Compare integer (32 bit) values (icc only). 1682 void MacroAssembler::cmp_and_br_short(Register s1, int simm13a, Condition c, 1683 Predict p, Label& L) { 1684 assert_not_delayed(); 1685 if (is_simm(simm13a,5) && use_cbcond(L)) { 1686 Assembler::cbcond(c, icc, s1, simm13a, L); 1687 } else { 1688 cmp(s1, simm13a); 1689 br(c, false, p, L); 1690 delayed()->nop(); 1691 } 1692 } 1693 1694 // Branch that tests xcc in LP64 and icc in !LP64 1695 void MacroAssembler::cmp_and_brx_short(Register s1, Register s2, Condition c, 1696 Predict p, Label& L) { 1697 assert_not_delayed(); 1698 if (use_cbcond(L)) { 1699 Assembler::cbcond(c, ptr_cc, s1, s2, L); 1700 } else { 1701 cmp(s1, s2); 1702 brx(c, false, p, L); 1703 delayed()->nop(); 1704 } 1705 } 1706 1707 // Branch that tests xcc in LP64 and icc in !LP64 1708 void MacroAssembler::cmp_and_brx_short(Register s1, int simm13a, Condition c, 1709 Predict p, Label& L) { 1710 assert_not_delayed(); 1711 if (is_simm(simm13a,5) && use_cbcond(L)) { 1712 Assembler::cbcond(c, ptr_cc, s1, simm13a, L); 1713 } else { 1714 cmp(s1, simm13a); 1715 brx(c, false, p, L); 1716 delayed()->nop(); 1717 } 1718 } 1719 1720 // Short branch version for compares a pointer with zero. 1721 1722 void MacroAssembler::br_null_short(Register s1, Predict p, Label& L) { 1723 assert_not_delayed(); 1724 if (use_cbcond(L)) { 1725 Assembler::cbcond(zero, ptr_cc, s1, 0, L); 1726 return; 1727 } 1728 br_null(s1, false, p, L); 1729 delayed()->nop(); 1730 } 1731 1732 void MacroAssembler::br_notnull_short(Register s1, Predict p, Label& L) { 1733 assert_not_delayed(); 1734 if (use_cbcond(L)) { 1735 Assembler::cbcond(notZero, ptr_cc, s1, 0, L); 1736 return; 1737 } 1738 br_notnull(s1, false, p, L); 1739 delayed()->nop(); 1740 } 1741 1742 // Unconditional short branch 1743 void MacroAssembler::ba_short(Label& L) { 1744 if (use_cbcond(L)) { 1745 Assembler::cbcond(equal, icc, G0, G0, L); 1746 return; 1747 } 1748 br(always, false, pt, L); 1749 delayed()->nop(); 1750 } 1751 1752 // instruction sequences factored across compiler & interpreter 1753 1754 1755 void MacroAssembler::lcmp( Register Ra_hi, Register Ra_low, 1756 Register Rb_hi, Register Rb_low, 1757 Register Rresult) { 1758 1759 Label check_low_parts, done; 1760 1761 cmp(Ra_hi, Rb_hi ); // compare hi parts 1762 br(equal, true, pt, check_low_parts); 1763 delayed()->cmp(Ra_low, Rb_low); // test low parts 1764 1765 // And, with an unsigned comparison, it does not matter if the numbers 1766 // are negative or not. 1767 // E.g., -2 cmp -1: the low parts are 0xfffffffe and 0xffffffff. 1768 // The second one is bigger (unsignedly). 1769 1770 // Other notes: The first move in each triplet can be unconditional 1771 // (and therefore probably prefetchable). 1772 // And the equals case for the high part does not need testing, 1773 // since that triplet is reached only after finding the high halves differ. 1774 1775 mov(-1, Rresult); 1776 ba(done); 1777 delayed()->movcc(greater, false, icc, 1, Rresult); 1778 1779 bind(check_low_parts); 1780 1781 mov( -1, Rresult); 1782 movcc(equal, false, icc, 0, Rresult); 1783 movcc(greaterUnsigned, false, icc, 1, Rresult); 1784 1785 bind(done); 1786 } 1787 1788 void MacroAssembler::lneg( Register Rhi, Register Rlow ) { 1789 subcc( G0, Rlow, Rlow ); 1790 subc( G0, Rhi, Rhi ); 1791 } 1792 1793 void MacroAssembler::lshl( Register Rin_high, Register Rin_low, 1794 Register Rcount, 1795 Register Rout_high, Register Rout_low, 1796 Register Rtemp ) { 1797 1798 1799 Register Ralt_count = Rtemp; 1800 Register Rxfer_bits = Rtemp; 1801 1802 assert( Ralt_count != Rin_high 1803 && Ralt_count != Rin_low 1804 && Ralt_count != Rcount 1805 && Rxfer_bits != Rin_low 1806 && Rxfer_bits != Rin_high 1807 && Rxfer_bits != Rcount 1808 && Rxfer_bits != Rout_low 1809 && Rout_low != Rin_high, 1810 "register alias checks"); 1811 1812 Label big_shift, done; 1813 1814 // This code can be optimized to use the 64 bit shifts in V9. 1815 // Here we use the 32 bit shifts. 1816 1817 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1818 subcc(Rcount, 31, Ralt_count); 1819 br(greater, true, pn, big_shift); 1820 delayed()->dec(Ralt_count); 1821 1822 // shift < 32 bits, Ralt_count = Rcount-31 1823 1824 // We get the transfer bits by shifting right by 32-count the low 1825 // register. This is done by shifting right by 31-count and then by one 1826 // more to take care of the special (rare) case where count is zero 1827 // (shifting by 32 would not work). 1828 1829 neg(Ralt_count); 1830 1831 // The order of the next two instructions is critical in the case where 1832 // Rin and Rout are the same and should not be reversed. 1833 1834 srl(Rin_low, Ralt_count, Rxfer_bits); // shift right by 31-count 1835 if (Rcount != Rout_low) { 1836 sll(Rin_low, Rcount, Rout_low); // low half 1837 } 1838 sll(Rin_high, Rcount, Rout_high); 1839 if (Rcount == Rout_low) { 1840 sll(Rin_low, Rcount, Rout_low); // low half 1841 } 1842 srl(Rxfer_bits, 1, Rxfer_bits ); // shift right by one more 1843 ba(done); 1844 delayed()->or3(Rout_high, Rxfer_bits, Rout_high); // new hi value: or in shifted old hi part and xfer from low 1845 1846 // shift >= 32 bits, Ralt_count = Rcount-32 1847 bind(big_shift); 1848 sll(Rin_low, Ralt_count, Rout_high ); 1849 clr(Rout_low); 1850 1851 bind(done); 1852 } 1853 1854 1855 void MacroAssembler::lshr( Register Rin_high, Register Rin_low, 1856 Register Rcount, 1857 Register Rout_high, Register Rout_low, 1858 Register Rtemp ) { 1859 1860 Register Ralt_count = Rtemp; 1861 Register Rxfer_bits = Rtemp; 1862 1863 assert( Ralt_count != Rin_high 1864 && Ralt_count != Rin_low 1865 && Ralt_count != Rcount 1866 && Rxfer_bits != Rin_low 1867 && Rxfer_bits != Rin_high 1868 && Rxfer_bits != Rcount 1869 && Rxfer_bits != Rout_high 1870 && Rout_high != Rin_low, 1871 "register alias checks"); 1872 1873 Label big_shift, done; 1874 1875 // This code can be optimized to use the 64 bit shifts in V9. 1876 // Here we use the 32 bit shifts. 1877 1878 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1879 subcc(Rcount, 31, Ralt_count); 1880 br(greater, true, pn, big_shift); 1881 delayed()->dec(Ralt_count); 1882 1883 // shift < 32 bits, Ralt_count = Rcount-31 1884 1885 // We get the transfer bits by shifting left by 32-count the high 1886 // register. This is done by shifting left by 31-count and then by one 1887 // more to take care of the special (rare) case where count is zero 1888 // (shifting by 32 would not work). 1889 1890 neg(Ralt_count); 1891 if (Rcount != Rout_low) { 1892 srl(Rin_low, Rcount, Rout_low); 1893 } 1894 1895 // The order of the next two instructions is critical in the case where 1896 // Rin and Rout are the same and should not be reversed. 1897 1898 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count 1899 sra(Rin_high, Rcount, Rout_high ); // high half 1900 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more 1901 if (Rcount == Rout_low) { 1902 srl(Rin_low, Rcount, Rout_low); 1903 } 1904 ba(done); 1905 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high 1906 1907 // shift >= 32 bits, Ralt_count = Rcount-32 1908 bind(big_shift); 1909 1910 sra(Rin_high, Ralt_count, Rout_low); 1911 sra(Rin_high, 31, Rout_high); // sign into hi 1912 1913 bind( done ); 1914 } 1915 1916 1917 1918 void MacroAssembler::lushr( Register Rin_high, Register Rin_low, 1919 Register Rcount, 1920 Register Rout_high, Register Rout_low, 1921 Register Rtemp ) { 1922 1923 Register Ralt_count = Rtemp; 1924 Register Rxfer_bits = Rtemp; 1925 1926 assert( Ralt_count != Rin_high 1927 && Ralt_count != Rin_low 1928 && Ralt_count != Rcount 1929 && Rxfer_bits != Rin_low 1930 && Rxfer_bits != Rin_high 1931 && Rxfer_bits != Rcount 1932 && Rxfer_bits != Rout_high 1933 && Rout_high != Rin_low, 1934 "register alias checks"); 1935 1936 Label big_shift, done; 1937 1938 // This code can be optimized to use the 64 bit shifts in V9. 1939 // Here we use the 32 bit shifts. 1940 1941 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1942 subcc(Rcount, 31, Ralt_count); 1943 br(greater, true, pn, big_shift); 1944 delayed()->dec(Ralt_count); 1945 1946 // shift < 32 bits, Ralt_count = Rcount-31 1947 1948 // We get the transfer bits by shifting left by 32-count the high 1949 // register. This is done by shifting left by 31-count and then by one 1950 // more to take care of the special (rare) case where count is zero 1951 // (shifting by 32 would not work). 1952 1953 neg(Ralt_count); 1954 if (Rcount != Rout_low) { 1955 srl(Rin_low, Rcount, Rout_low); 1956 } 1957 1958 // The order of the next two instructions is critical in the case where 1959 // Rin and Rout are the same and should not be reversed. 1960 1961 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count 1962 srl(Rin_high, Rcount, Rout_high ); // high half 1963 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more 1964 if (Rcount == Rout_low) { 1965 srl(Rin_low, Rcount, Rout_low); 1966 } 1967 ba(done); 1968 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high 1969 1970 // shift >= 32 bits, Ralt_count = Rcount-32 1971 bind(big_shift); 1972 1973 srl(Rin_high, Ralt_count, Rout_low); 1974 clr(Rout_high); 1975 1976 bind( done ); 1977 } 1978 1979 #ifdef _LP64 1980 void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) { 1981 cmp(Ra, Rb); 1982 mov(-1, Rresult); 1983 movcc(equal, false, xcc, 0, Rresult); 1984 movcc(greater, false, xcc, 1, Rresult); 1985 } 1986 #endif 1987 1988 1989 void MacroAssembler::load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed) { 1990 switch (size_in_bytes) { 1991 case 8: ld_long(src, dst); break; 1992 case 4: ld( src, dst); break; 1993 case 2: is_signed ? ldsh(src, dst) : lduh(src, dst); break; 1994 case 1: is_signed ? ldsb(src, dst) : ldub(src, dst); break; 1995 default: ShouldNotReachHere(); 1996 } 1997 } 1998 1999 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) { 2000 switch (size_in_bytes) { 2001 case 8: st_long(src, dst); break; 2002 case 4: st( src, dst); break; 2003 case 2: sth( src, dst); break; 2004 case 1: stb( src, dst); break; 2005 default: ShouldNotReachHere(); 2006 } 2007 } 2008 2009 2010 void MacroAssembler::float_cmp( bool is_float, int unordered_result, 2011 FloatRegister Fa, FloatRegister Fb, 2012 Register Rresult) { 2013 if (is_float) { 2014 fcmp(FloatRegisterImpl::S, fcc0, Fa, Fb); 2015 } else { 2016 fcmp(FloatRegisterImpl::D, fcc0, Fa, Fb); 2017 } 2018 2019 if (unordered_result == 1) { 2020 mov( -1, Rresult); 2021 movcc(f_equal, true, fcc0, 0, Rresult); 2022 movcc(f_unorderedOrGreater, true, fcc0, 1, Rresult); 2023 } else { 2024 mov( -1, Rresult); 2025 movcc(f_equal, true, fcc0, 0, Rresult); 2026 movcc(f_greater, true, fcc0, 1, Rresult); 2027 } 2028 } 2029 2030 2031 void MacroAssembler::save_all_globals_into_locals() { 2032 mov(G1,L1); 2033 mov(G2,L2); 2034 mov(G3,L3); 2035 mov(G4,L4); 2036 mov(G5,L5); 2037 mov(G6,L6); 2038 mov(G7,L7); 2039 } 2040 2041 void MacroAssembler::restore_globals_from_locals() { 2042 mov(L1,G1); 2043 mov(L2,G2); 2044 mov(L3,G3); 2045 mov(L4,G4); 2046 mov(L5,G5); 2047 mov(L6,G6); 2048 mov(L7,G7); 2049 } 2050 2051 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, 2052 Register tmp, 2053 int offset) { 2054 intptr_t value = *delayed_value_addr; 2055 if (value != 0) 2056 return RegisterOrConstant(value + offset); 2057 2058 // load indirectly to solve generation ordering problem 2059 AddressLiteral a(delayed_value_addr); 2060 load_ptr_contents(a, tmp); 2061 2062 #ifdef ASSERT 2063 tst(tmp); 2064 breakpoint_trap(zero, xcc); 2065 #endif 2066 2067 if (offset != 0) 2068 add(tmp, offset, tmp); 2069 2070 return RegisterOrConstant(tmp); 2071 } 2072 2073 2074 RegisterOrConstant MacroAssembler::regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 2075 assert(d.register_or_noreg() != G0, "lost side effect"); 2076 if ((s2.is_constant() && s2.as_constant() == 0) || 2077 (s2.is_register() && s2.as_register() == G0)) { 2078 // Do nothing, just move value. 2079 if (s1.is_register()) { 2080 if (d.is_constant()) d = temp; 2081 mov(s1.as_register(), d.as_register()); 2082 return d; 2083 } else { 2084 return s1; 2085 } 2086 } 2087 2088 if (s1.is_register()) { 2089 assert_different_registers(s1.as_register(), temp); 2090 if (d.is_constant()) d = temp; 2091 andn(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 2092 return d; 2093 } else { 2094 if (s2.is_register()) { 2095 assert_different_registers(s2.as_register(), temp); 2096 if (d.is_constant()) d = temp; 2097 set(s1.as_constant(), temp); 2098 andn(temp, s2.as_register(), d.as_register()); 2099 return d; 2100 } else { 2101 intptr_t res = s1.as_constant() & ~s2.as_constant(); 2102 return res; 2103 } 2104 } 2105 } 2106 2107 RegisterOrConstant MacroAssembler::regcon_inc_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 2108 assert(d.register_or_noreg() != G0, "lost side effect"); 2109 if ((s2.is_constant() && s2.as_constant() == 0) || 2110 (s2.is_register() && s2.as_register() == G0)) { 2111 // Do nothing, just move value. 2112 if (s1.is_register()) { 2113 if (d.is_constant()) d = temp; 2114 mov(s1.as_register(), d.as_register()); 2115 return d; 2116 } else { 2117 return s1; 2118 } 2119 } 2120 2121 if (s1.is_register()) { 2122 assert_different_registers(s1.as_register(), temp); 2123 if (d.is_constant()) d = temp; 2124 add(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 2125 return d; 2126 } else { 2127 if (s2.is_register()) { 2128 assert_different_registers(s2.as_register(), temp); 2129 if (d.is_constant()) d = temp; 2130 add(s2.as_register(), ensure_simm13_or_reg(s1, temp), d.as_register()); 2131 return d; 2132 } else { 2133 intptr_t res = s1.as_constant() + s2.as_constant(); 2134 return res; 2135 } 2136 } 2137 } 2138 2139 RegisterOrConstant MacroAssembler::regcon_sll_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 2140 assert(d.register_or_noreg() != G0, "lost side effect"); 2141 if (!is_simm13(s2.constant_or_zero())) 2142 s2 = (s2.as_constant() & 0xFF); 2143 if ((s2.is_constant() && s2.as_constant() == 0) || 2144 (s2.is_register() && s2.as_register() == G0)) { 2145 // Do nothing, just move value. 2146 if (s1.is_register()) { 2147 if (d.is_constant()) d = temp; 2148 mov(s1.as_register(), d.as_register()); 2149 return d; 2150 } else { 2151 return s1; 2152 } 2153 } 2154 2155 if (s1.is_register()) { 2156 assert_different_registers(s1.as_register(), temp); 2157 if (d.is_constant()) d = temp; 2158 sll_ptr(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 2159 return d; 2160 } else { 2161 if (s2.is_register()) { 2162 assert_different_registers(s2.as_register(), temp); 2163 if (d.is_constant()) d = temp; 2164 set(s1.as_constant(), temp); 2165 sll_ptr(temp, s2.as_register(), d.as_register()); 2166 return d; 2167 } else { 2168 intptr_t res = s1.as_constant() << s2.as_constant(); 2169 return res; 2170 } 2171 } 2172 } 2173 2174 2175 // Look up the method for a megamorphic invokeinterface call. 2176 // The target method is determined by <intf_klass, itable_index>. 2177 // The receiver klass is in recv_klass. 2178 // On success, the result will be in method_result, and execution falls through. 2179 // On failure, execution transfers to the given label. 2180 void MacroAssembler::lookup_interface_method(Register recv_klass, 2181 Register intf_klass, 2182 RegisterOrConstant itable_index, 2183 Register method_result, 2184 Register scan_temp, 2185 Register sethi_temp, 2186 Label& L_no_such_interface) { 2187 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp); 2188 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 2189 "caller must use same register for non-constant itable index as for method"); 2190 2191 Label L_no_such_interface_restore; 2192 bool did_save = false; 2193 if (scan_temp == noreg || sethi_temp == noreg) { 2194 Register recv_2 = recv_klass->is_global() ? recv_klass : L0; 2195 Register intf_2 = intf_klass->is_global() ? intf_klass : L1; 2196 assert(method_result->is_global(), "must be able to return value"); 2197 scan_temp = L2; 2198 sethi_temp = L3; 2199 save_frame_and_mov(0, recv_klass, recv_2, intf_klass, intf_2); 2200 recv_klass = recv_2; 2201 intf_klass = intf_2; 2202 did_save = true; 2203 } 2204 2205 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 2206 int vtable_base = InstanceKlass::vtable_start_offset() * wordSize; 2207 int scan_step = itableOffsetEntry::size() * wordSize; 2208 int vte_size = vtableEntry::size() * wordSize; 2209 2210 lduw(recv_klass, InstanceKlass::vtable_length_offset() * wordSize, scan_temp); 2211 // %%% We should store the aligned, prescaled offset in the klassoop. 2212 // Then the next several instructions would fold away. 2213 2214 int round_to_unit = ((HeapWordsPerLong > 1) ? BytesPerLong : 0); 2215 int itb_offset = vtable_base; 2216 if (round_to_unit != 0) { 2217 // hoist first instruction of round_to(scan_temp, BytesPerLong): 2218 itb_offset += round_to_unit - wordSize; 2219 } 2220 int itb_scale = exact_log2(vtableEntry::size() * wordSize); 2221 sll(scan_temp, itb_scale, scan_temp); 2222 add(scan_temp, itb_offset, scan_temp); 2223 if (round_to_unit != 0) { 2224 // Round up to align_object_offset boundary 2225 // see code for InstanceKlass::start_of_itable! 2226 // Was: round_to(scan_temp, BytesPerLong); 2227 // Hoisted: add(scan_temp, BytesPerLong-1, scan_temp); 2228 and3(scan_temp, -round_to_unit, scan_temp); 2229 } 2230 add(recv_klass, scan_temp, scan_temp); 2231 2232 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 2233 RegisterOrConstant itable_offset = itable_index; 2234 itable_offset = regcon_sll_ptr(itable_index, exact_log2(itableMethodEntry::size() * wordSize), itable_offset); 2235 itable_offset = regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes(), itable_offset); 2236 add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass); 2237 2238 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) { 2239 // if (scan->interface() == intf) { 2240 // result = (klass + scan->offset() + itable_index); 2241 // } 2242 // } 2243 Label L_search, L_found_method; 2244 2245 for (int peel = 1; peel >= 0; peel--) { 2246 // %%%% Could load both offset and interface in one ldx, if they were 2247 // in the opposite order. This would save a load. 2248 ld_ptr(scan_temp, itableOffsetEntry::interface_offset_in_bytes(), method_result); 2249 2250 // Check that this entry is non-null. A null entry means that 2251 // the receiver class doesn't implement the interface, and wasn't the 2252 // same as when the caller was compiled. 2253 bpr(Assembler::rc_z, false, Assembler::pn, method_result, did_save ? L_no_such_interface_restore : L_no_such_interface); 2254 delayed()->cmp(method_result, intf_klass); 2255 2256 if (peel) { 2257 brx(Assembler::equal, false, Assembler::pt, L_found_method); 2258 } else { 2259 brx(Assembler::notEqual, false, Assembler::pn, L_search); 2260 // (invert the test to fall through to found_method...) 2261 } 2262 delayed()->add(scan_temp, scan_step, scan_temp); 2263 2264 if (!peel) break; 2265 2266 bind(L_search); 2267 } 2268 2269 bind(L_found_method); 2270 2271 // Got a hit. 2272 int ito_offset = itableOffsetEntry::offset_offset_in_bytes(); 2273 // scan_temp[-scan_step] points to the vtable offset we need 2274 ito_offset -= scan_step; 2275 lduw(scan_temp, ito_offset, scan_temp); 2276 ld_ptr(recv_klass, scan_temp, method_result); 2277 2278 if (did_save) { 2279 Label L_done; 2280 ba(L_done); 2281 delayed()->restore(); 2282 2283 bind(L_no_such_interface_restore); 2284 ba(L_no_such_interface); 2285 delayed()->restore(); 2286 2287 bind(L_done); 2288 } 2289 } 2290 2291 2292 // virtual method calling 2293 void MacroAssembler::lookup_virtual_method(Register recv_klass, 2294 RegisterOrConstant vtable_index, 2295 Register method_result) { 2296 assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg()); 2297 Register sethi_temp = method_result; 2298 const int base = (InstanceKlass::vtable_start_offset() * wordSize + 2299 // method pointer offset within the vtable entry: 2300 vtableEntry::method_offset_in_bytes()); 2301 RegisterOrConstant vtable_offset = vtable_index; 2302 // Each of the following three lines potentially generates an instruction. 2303 // But the total number of address formation instructions will always be 2304 // at most two, and will often be zero. In any case, it will be optimal. 2305 // If vtable_index is a register, we will have (sll_ptr N,x; inc_ptr B,x; ld_ptr k,x). 2306 // If vtable_index is a constant, we will have at most (set B+X<<N,t; ld_ptr k,t). 2307 vtable_offset = regcon_sll_ptr(vtable_index, exact_log2(vtableEntry::size() * wordSize), vtable_offset); 2308 vtable_offset = regcon_inc_ptr(vtable_offset, base, vtable_offset, sethi_temp); 2309 Address vtable_entry_addr(recv_klass, ensure_simm13_or_reg(vtable_offset, sethi_temp)); 2310 ld_ptr(vtable_entry_addr, method_result); 2311 } 2312 2313 2314 void MacroAssembler::check_klass_subtype(Register sub_klass, 2315 Register super_klass, 2316 Register temp_reg, 2317 Register temp2_reg, 2318 Label& L_success) { 2319 Register sub_2 = sub_klass; 2320 Register sup_2 = super_klass; 2321 if (!sub_2->is_global()) sub_2 = L0; 2322 if (!sup_2->is_global()) sup_2 = L1; 2323 bool did_save = false; 2324 if (temp_reg == noreg || temp2_reg == noreg) { 2325 temp_reg = L2; 2326 temp2_reg = L3; 2327 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); 2328 sub_klass = sub_2; 2329 super_klass = sup_2; 2330 did_save = true; 2331 } 2332 Label L_failure, L_pop_to_failure, L_pop_to_success; 2333 check_klass_subtype_fast_path(sub_klass, super_klass, 2334 temp_reg, temp2_reg, 2335 (did_save ? &L_pop_to_success : &L_success), 2336 (did_save ? &L_pop_to_failure : &L_failure), NULL); 2337 2338 if (!did_save) 2339 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); 2340 check_klass_subtype_slow_path(sub_2, sup_2, 2341 L2, L3, L4, L5, 2342 NULL, &L_pop_to_failure); 2343 2344 // on success: 2345 bind(L_pop_to_success); 2346 restore(); 2347 ba_short(L_success); 2348 2349 // on failure: 2350 bind(L_pop_to_failure); 2351 restore(); 2352 bind(L_failure); 2353 } 2354 2355 2356 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 2357 Register super_klass, 2358 Register temp_reg, 2359 Register temp2_reg, 2360 Label* L_success, 2361 Label* L_failure, 2362 Label* L_slow_path, 2363 RegisterOrConstant super_check_offset) { 2364 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 2365 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2366 2367 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 2368 bool need_slow_path = (must_load_sco || 2369 super_check_offset.constant_or_zero() == sco_offset); 2370 2371 assert_different_registers(sub_klass, super_klass, temp_reg); 2372 if (super_check_offset.is_register()) { 2373 assert_different_registers(sub_klass, super_klass, temp_reg, 2374 super_check_offset.as_register()); 2375 } else if (must_load_sco) { 2376 assert(temp2_reg != noreg, "supply either a temp or a register offset"); 2377 } 2378 2379 Label L_fallthrough; 2380 int label_nulls = 0; 2381 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 2382 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 2383 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } 2384 assert(label_nulls <= 1 || 2385 (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path), 2386 "at most one NULL in the batch, usually"); 2387 2388 // If the pointers are equal, we are done (e.g., String[] elements). 2389 // This self-check enables sharing of secondary supertype arrays among 2390 // non-primary types such as array-of-interface. Otherwise, each such 2391 // type would need its own customized SSA. 2392 // We move this check to the front of the fast path because many 2393 // type checks are in fact trivially successful in this manner, 2394 // so we get a nicely predicted branch right at the start of the check. 2395 cmp(super_klass, sub_klass); 2396 brx(Assembler::equal, false, Assembler::pn, *L_success); 2397 delayed()->nop(); 2398 2399 // Check the supertype display: 2400 if (must_load_sco) { 2401 // The super check offset is always positive... 2402 lduw(super_klass, sco_offset, temp2_reg); 2403 super_check_offset = RegisterOrConstant(temp2_reg); 2404 // super_check_offset is register. 2405 assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset.as_register()); 2406 } 2407 ld_ptr(sub_klass, super_check_offset, temp_reg); 2408 cmp(super_klass, temp_reg); 2409 2410 // This check has worked decisively for primary supers. 2411 // Secondary supers are sought in the super_cache ('super_cache_addr'). 2412 // (Secondary supers are interfaces and very deeply nested subtypes.) 2413 // This works in the same check above because of a tricky aliasing 2414 // between the super_cache and the primary super display elements. 2415 // (The 'super_check_addr' can address either, as the case requires.) 2416 // Note that the cache is updated below if it does not help us find 2417 // what we need immediately. 2418 // So if it was a primary super, we can just fail immediately. 2419 // Otherwise, it's the slow path for us (no success at this point). 2420 2421 // Hacked ba(), which may only be used just before L_fallthrough. 2422 #define FINAL_JUMP(label) \ 2423 if (&(label) != &L_fallthrough) { \ 2424 ba(label); delayed()->nop(); \ 2425 } 2426 2427 if (super_check_offset.is_register()) { 2428 brx(Assembler::equal, false, Assembler::pn, *L_success); 2429 delayed()->cmp(super_check_offset.as_register(), sc_offset); 2430 2431 if (L_failure == &L_fallthrough) { 2432 brx(Assembler::equal, false, Assembler::pt, *L_slow_path); 2433 delayed()->nop(); 2434 } else { 2435 brx(Assembler::notEqual, false, Assembler::pn, *L_failure); 2436 delayed()->nop(); 2437 FINAL_JUMP(*L_slow_path); 2438 } 2439 } else if (super_check_offset.as_constant() == sc_offset) { 2440 // Need a slow path; fast failure is impossible. 2441 if (L_slow_path == &L_fallthrough) { 2442 brx(Assembler::equal, false, Assembler::pt, *L_success); 2443 delayed()->nop(); 2444 } else { 2445 brx(Assembler::notEqual, false, Assembler::pn, *L_slow_path); 2446 delayed()->nop(); 2447 FINAL_JUMP(*L_success); 2448 } 2449 } else { 2450 // No slow path; it's a fast decision. 2451 if (L_failure == &L_fallthrough) { 2452 brx(Assembler::equal, false, Assembler::pt, *L_success); 2453 delayed()->nop(); 2454 } else { 2455 brx(Assembler::notEqual, false, Assembler::pn, *L_failure); 2456 delayed()->nop(); 2457 FINAL_JUMP(*L_success); 2458 } 2459 } 2460 2461 bind(L_fallthrough); 2462 2463 #undef FINAL_JUMP 2464 } 2465 2466 2467 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 2468 Register super_klass, 2469 Register count_temp, 2470 Register scan_temp, 2471 Register scratch_reg, 2472 Register coop_reg, 2473 Label* L_success, 2474 Label* L_failure) { 2475 assert_different_registers(sub_klass, super_klass, 2476 count_temp, scan_temp, scratch_reg, coop_reg); 2477 2478 Label L_fallthrough, L_loop; 2479 int label_nulls = 0; 2480 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 2481 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 2482 assert(label_nulls <= 1, "at most one NULL in the batch"); 2483 2484 // a couple of useful fields in sub_klass: 2485 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 2486 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 2487 2488 // Do a linear scan of the secondary super-klass chain. 2489 // This code is rarely used, so simplicity is a virtue here. 2490 2491 #ifndef PRODUCT 2492 int* pst_counter = &SharedRuntime::_partial_subtype_ctr; 2493 inc_counter((address) pst_counter, count_temp, scan_temp); 2494 #endif 2495 2496 // We will consult the secondary-super array. 2497 ld_ptr(sub_klass, ss_offset, scan_temp); 2498 2499 Register search_key = super_klass; 2500 2501 // Load the array length. (Positive movl does right thing on LP64.) 2502 lduw(scan_temp, Array<Klass*>::length_offset_in_bytes(), count_temp); 2503 2504 // Check for empty secondary super list 2505 tst(count_temp); 2506 2507 // In the array of super classes elements are pointer sized. 2508 int element_size = wordSize; 2509 2510 // Top of search loop 2511 bind(L_loop); 2512 br(Assembler::equal, false, Assembler::pn, *L_failure); 2513 delayed()->add(scan_temp, element_size, scan_temp); 2514 2515 // Skip the array header in all array accesses. 2516 int elem_offset = Array<Klass*>::base_offset_in_bytes(); 2517 elem_offset -= element_size; // the scan pointer was pre-incremented also 2518 2519 // Load next super to check 2520 ld_ptr( scan_temp, elem_offset, scratch_reg ); 2521 2522 // Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list 2523 cmp(scratch_reg, search_key); 2524 2525 // A miss means we are NOT a subtype and need to keep looping 2526 brx(Assembler::notEqual, false, Assembler::pn, L_loop); 2527 delayed()->deccc(count_temp); // decrement trip counter in delay slot 2528 2529 // Success. Cache the super we found and proceed in triumph. 2530 st_ptr(super_klass, sub_klass, sc_offset); 2531 2532 if (L_success != &L_fallthrough) { 2533 ba(*L_success); 2534 delayed()->nop(); 2535 } 2536 2537 bind(L_fallthrough); 2538 } 2539 2540 2541 RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot, 2542 Register temp_reg, 2543 int extra_slot_offset) { 2544 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 2545 int stackElementSize = Interpreter::stackElementSize; 2546 int offset = extra_slot_offset * stackElementSize; 2547 if (arg_slot.is_constant()) { 2548 offset += arg_slot.as_constant() * stackElementSize; 2549 return offset; 2550 } else { 2551 assert(temp_reg != noreg, "must specify"); 2552 sll_ptr(arg_slot.as_register(), exact_log2(stackElementSize), temp_reg); 2553 if (offset != 0) 2554 add(temp_reg, offset, temp_reg); 2555 return temp_reg; 2556 } 2557 } 2558 2559 2560 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 2561 Register temp_reg, 2562 int extra_slot_offset) { 2563 return Address(Gargs, argument_offset(arg_slot, temp_reg, extra_slot_offset)); 2564 } 2565 2566 2567 void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg, 2568 Register temp_reg, 2569 Label& done, Label* slow_case, 2570 BiasedLockingCounters* counters) { 2571 assert(UseBiasedLocking, "why call this otherwise?"); 2572 2573 if (PrintBiasedLockingStatistics) { 2574 assert_different_registers(obj_reg, mark_reg, temp_reg, O7); 2575 if (counters == NULL) 2576 counters = BiasedLocking::counters(); 2577 } 2578 2579 Label cas_label; 2580 2581 // Biased locking 2582 // See whether the lock is currently biased toward our thread and 2583 // whether the epoch is still valid 2584 // Note that the runtime guarantees sufficient alignment of JavaThread 2585 // pointers to allow age to be placed into low bits 2586 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); 2587 and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg); 2588 cmp_and_brx_short(temp_reg, markOopDesc::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label); 2589 2590 load_klass(obj_reg, temp_reg); 2591 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2592 or3(G2_thread, temp_reg, temp_reg); 2593 xor3(mark_reg, temp_reg, temp_reg); 2594 andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg); 2595 if (counters != NULL) { 2596 cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg); 2597 // Reload mark_reg as we may need it later 2598 ld_ptr(Address(obj_reg, oopDesc::mark_offset_in_bytes()), mark_reg); 2599 } 2600 brx(Assembler::equal, true, Assembler::pt, done); 2601 delayed()->nop(); 2602 2603 Label try_revoke_bias; 2604 Label try_rebias; 2605 Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes()); 2606 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2607 2608 // At this point we know that the header has the bias pattern and 2609 // that we are not the bias owner in the current epoch. We need to 2610 // figure out more details about the state of the header in order to 2611 // know what operations can be legally performed on the object's 2612 // header. 2613 2614 // If the low three bits in the xor result aren't clear, that means 2615 // the prototype header is no longer biased and we have to revoke 2616 // the bias on this object. 2617 btst(markOopDesc::biased_lock_mask_in_place, temp_reg); 2618 brx(Assembler::notZero, false, Assembler::pn, try_revoke_bias); 2619 2620 // Biasing is still enabled for this data type. See whether the 2621 // epoch of the current bias is still valid, meaning that the epoch 2622 // bits of the mark word are equal to the epoch bits of the 2623 // prototype header. (Note that the prototype header's epoch bits 2624 // only change at a safepoint.) If not, attempt to rebias the object 2625 // toward the current thread. Note that we must be absolutely sure 2626 // that the current epoch is invalid in order to do this because 2627 // otherwise the manipulations it performs on the mark word are 2628 // illegal. 2629 delayed()->btst(markOopDesc::epoch_mask_in_place, temp_reg); 2630 brx(Assembler::notZero, false, Assembler::pn, try_rebias); 2631 2632 // The epoch of the current bias is still valid but we know nothing 2633 // about the owner; it might be set or it might be clear. Try to 2634 // acquire the bias of the object using an atomic operation. If this 2635 // fails we will go in to the runtime to revoke the object's bias. 2636 // Note that we first construct the presumed unbiased header so we 2637 // don't accidentally blow away another thread's valid bias. 2638 delayed()->and3(mark_reg, 2639 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place, 2640 mark_reg); 2641 or3(G2_thread, mark_reg, temp_reg); 2642 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2643 // If the biasing toward our thread failed, this means that 2644 // another thread succeeded in biasing it toward itself and we 2645 // need to revoke that bias. The revocation will occur in the 2646 // interpreter runtime in the slow case. 2647 cmp(mark_reg, temp_reg); 2648 if (counters != NULL) { 2649 cond_inc(Assembler::zero, (address) counters->anonymously_biased_lock_entry_count_addr(), mark_reg, temp_reg); 2650 } 2651 if (slow_case != NULL) { 2652 brx(Assembler::notEqual, true, Assembler::pn, *slow_case); 2653 delayed()->nop(); 2654 } 2655 ba_short(done); 2656 2657 bind(try_rebias); 2658 // At this point we know the epoch has expired, meaning that the 2659 // current "bias owner", if any, is actually invalid. Under these 2660 // circumstances _only_, we are allowed to use the current header's 2661 // value as the comparison value when doing the cas to acquire the 2662 // bias in the current epoch. In other words, we allow transfer of 2663 // the bias from one thread to another directly in this situation. 2664 // 2665 // FIXME: due to a lack of registers we currently blow away the age 2666 // bits in this situation. Should attempt to preserve them. 2667 load_klass(obj_reg, temp_reg); 2668 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2669 or3(G2_thread, temp_reg, temp_reg); 2670 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2671 // If the biasing toward our thread failed, this means that 2672 // another thread succeeded in biasing it toward itself and we 2673 // need to revoke that bias. The revocation will occur in the 2674 // interpreter runtime in the slow case. 2675 cmp(mark_reg, temp_reg); 2676 if (counters != NULL) { 2677 cond_inc(Assembler::zero, (address) counters->rebiased_lock_entry_count_addr(), mark_reg, temp_reg); 2678 } 2679 if (slow_case != NULL) { 2680 brx(Assembler::notEqual, true, Assembler::pn, *slow_case); 2681 delayed()->nop(); 2682 } 2683 ba_short(done); 2684 2685 bind(try_revoke_bias); 2686 // The prototype mark in the klass doesn't have the bias bit set any 2687 // more, indicating that objects of this data type are not supposed 2688 // to be biased any more. We are going to try to reset the mark of 2689 // this object to the prototype value and fall through to the 2690 // CAS-based locking scheme. Note that if our CAS fails, it means 2691 // that another thread raced us for the privilege of revoking the 2692 // bias of this particular object, so it's okay to continue in the 2693 // normal locking code. 2694 // 2695 // FIXME: due to a lack of registers we currently blow away the age 2696 // bits in this situation. Should attempt to preserve them. 2697 load_klass(obj_reg, temp_reg); 2698 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2699 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2700 // Fall through to the normal CAS-based lock, because no matter what 2701 // the result of the above CAS, some thread must have succeeded in 2702 // removing the bias bit from the object's header. 2703 if (counters != NULL) { 2704 cmp(mark_reg, temp_reg); 2705 cond_inc(Assembler::zero, (address) counters->revoked_lock_entry_count_addr(), mark_reg, temp_reg); 2706 } 2707 2708 bind(cas_label); 2709 } 2710 2711 void MacroAssembler::biased_locking_exit (Address mark_addr, Register temp_reg, Label& done, 2712 bool allow_delay_slot_filling) { 2713 // Check for biased locking unlock case, which is a no-op 2714 // Note: we do not have to check the thread ID for two reasons. 2715 // First, the interpreter checks for IllegalMonitorStateException at 2716 // a higher level. Second, if the bias was revoked while we held the 2717 // lock, the object could not be rebiased toward another thread, so 2718 // the bias bit would be clear. 2719 ld_ptr(mark_addr, temp_reg); 2720 and3(temp_reg, markOopDesc::biased_lock_mask_in_place, temp_reg); 2721 cmp(temp_reg, markOopDesc::biased_lock_pattern); 2722 brx(Assembler::equal, allow_delay_slot_filling, Assembler::pt, done); 2723 delayed(); 2724 if (!allow_delay_slot_filling) { 2725 nop(); 2726 } 2727 } 2728 2729 2730 // compiler_lock_object() and compiler_unlock_object() are direct transliterations 2731 // of i486.ad fast_lock() and fast_unlock(). See those methods for detailed comments. 2732 // The code could be tightened up considerably. 2733 // 2734 // box->dhw disposition - post-conditions at DONE_LABEL. 2735 // - Successful inflated lock: box->dhw != 0. 2736 // Any non-zero value suffices. 2737 // Consider G2_thread, rsp, boxReg, or unused_mark() 2738 // - Successful Stack-lock: box->dhw == mark. 2739 // box->dhw must contain the displaced mark word value 2740 // - Failure -- icc.ZFlag == 0 and box->dhw is undefined. 2741 // The slow-path fast_enter() and slow_enter() operators 2742 // are responsible for setting box->dhw = NonZero (typically ::unused_mark). 2743 // - Biased: box->dhw is undefined 2744 // 2745 // SPARC refworkload performance - specifically jetstream and scimark - are 2746 // extremely sensitive to the size of the code emitted by compiler_lock_object 2747 // and compiler_unlock_object. Critically, the key factor is code size, not path 2748 // length. (Simply experiments to pad CLO with unexecuted NOPs demonstrte the 2749 // effect). 2750 2751 2752 void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark, 2753 Register Rbox, Register Rscratch, 2754 BiasedLockingCounters* counters, 2755 bool try_bias) { 2756 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); 2757 2758 verify_oop(Roop); 2759 Label done ; 2760 2761 if (counters != NULL) { 2762 inc_counter((address) counters->total_entry_count_addr(), Rmark, Rscratch); 2763 } 2764 2765 if (EmitSync & 1) { 2766 mov(3, Rscratch); 2767 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2768 cmp(SP, G0); 2769 return ; 2770 } 2771 2772 if (EmitSync & 2) { 2773 2774 // Fetch object's markword 2775 ld_ptr(mark_addr, Rmark); 2776 2777 if (try_bias) { 2778 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 2779 } 2780 2781 // Save Rbox in Rscratch to be used for the cas operation 2782 mov(Rbox, Rscratch); 2783 2784 // set Rmark to markOop | markOopDesc::unlocked_value 2785 or3(Rmark, markOopDesc::unlocked_value, Rmark); 2786 2787 // Initialize the box. (Must happen before we update the object mark!) 2788 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2789 2790 // compare object markOop with Rmark and if equal exchange Rscratch with object markOop 2791 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2792 cas_ptr(mark_addr.base(), Rmark, Rscratch); 2793 2794 // if compare/exchange succeeded we found an unlocked object and we now have locked it 2795 // hence we are done 2796 cmp(Rmark, Rscratch); 2797 #ifdef _LP64 2798 sub(Rscratch, STACK_BIAS, Rscratch); 2799 #endif 2800 brx(Assembler::equal, false, Assembler::pt, done); 2801 delayed()->sub(Rscratch, SP, Rscratch); //pull next instruction into delay slot 2802 2803 // we did not find an unlocked object so see if this is a recursive case 2804 // sub(Rscratch, SP, Rscratch); 2805 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 2806 andcc(Rscratch, 0xfffff003, Rscratch); 2807 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2808 bind (done); 2809 return ; 2810 } 2811 2812 Label Egress ; 2813 2814 if (EmitSync & 256) { 2815 Label IsInflated ; 2816 2817 ld_ptr(mark_addr, Rmark); // fetch obj->mark 2818 // Triage: biased, stack-locked, neutral, inflated 2819 if (try_bias) { 2820 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 2821 // Invariant: if control reaches this point in the emitted stream 2822 // then Rmark has not been modified. 2823 } 2824 2825 // Store mark into displaced mark field in the on-stack basic-lock "box" 2826 // Critically, this must happen before the CAS 2827 // Maximize the ST-CAS distance to minimize the ST-before-CAS penalty. 2828 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2829 andcc(Rmark, 2, G0); 2830 brx(Assembler::notZero, false, Assembler::pn, IsInflated); 2831 delayed()-> 2832 2833 // Try stack-lock acquisition. 2834 // Beware: the 1st instruction is in a delay slot 2835 mov(Rbox, Rscratch); 2836 or3(Rmark, markOopDesc::unlocked_value, Rmark); 2837 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2838 cas_ptr(mark_addr.base(), Rmark, Rscratch); 2839 cmp(Rmark, Rscratch); 2840 brx(Assembler::equal, false, Assembler::pt, done); 2841 delayed()->sub(Rscratch, SP, Rscratch); 2842 2843 // Stack-lock attempt failed - check for recursive stack-lock. 2844 // See the comments below about how we might remove this case. 2845 #ifdef _LP64 2846 sub(Rscratch, STACK_BIAS, Rscratch); 2847 #endif 2848 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 2849 andcc(Rscratch, 0xfffff003, Rscratch); 2850 br(Assembler::always, false, Assembler::pt, done); 2851 delayed()-> st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2852 2853 bind(IsInflated); 2854 if (EmitSync & 64) { 2855 // If m->owner != null goto IsLocked 2856 // Pessimistic form: Test-and-CAS vs CAS 2857 // The optimistic form avoids RTS->RTO cache line upgrades. 2858 ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch); 2859 andcc(Rscratch, Rscratch, G0); 2860 brx(Assembler::notZero, false, Assembler::pn, done); 2861 delayed()->nop(); 2862 // m->owner == null : it's unlocked. 2863 } 2864 2865 // Try to CAS m->owner from null to Self 2866 // Invariant: if we acquire the lock then _recursions should be 0. 2867 add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark); 2868 mov(G2_thread, Rscratch); 2869 cas_ptr(Rmark, G0, Rscratch); 2870 cmp(Rscratch, G0); 2871 // Intentional fall-through into done 2872 } else { 2873 // Aggressively avoid the Store-before-CAS penalty 2874 // Defer the store into box->dhw until after the CAS 2875 Label IsInflated, Recursive ; 2876 2877 // Anticipate CAS -- Avoid RTS->RTO upgrade 2878 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); 2879 2880 ld_ptr(mark_addr, Rmark); // fetch obj->mark 2881 // Triage: biased, stack-locked, neutral, inflated 2882 2883 if (try_bias) { 2884 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 2885 // Invariant: if control reaches this point in the emitted stream 2886 // then Rmark has not been modified. 2887 } 2888 andcc(Rmark, 2, G0); 2889 brx(Assembler::notZero, false, Assembler::pn, IsInflated); 2890 delayed()-> // Beware - dangling delay-slot 2891 2892 // Try stack-lock acquisition. 2893 // Transiently install BUSY (0) encoding in the mark word. 2894 // if the CAS of 0 into the mark was successful then we execute: 2895 // ST box->dhw = mark -- save fetched mark in on-stack basiclock box 2896 // ST obj->mark = box -- overwrite transient 0 value 2897 // This presumes TSO, of course. 2898 2899 mov(0, Rscratch); 2900 or3(Rmark, markOopDesc::unlocked_value, Rmark); 2901 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2902 cas_ptr(mark_addr.base(), Rmark, Rscratch); 2903 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); 2904 cmp(Rscratch, Rmark); 2905 brx(Assembler::notZero, false, Assembler::pn, Recursive); 2906 delayed()->st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2907 if (counters != NULL) { 2908 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); 2909 } 2910 ba(done); 2911 delayed()->st_ptr(Rbox, mark_addr); 2912 2913 bind(Recursive); 2914 // Stack-lock attempt failed - check for recursive stack-lock. 2915 // Tests show that we can remove the recursive case with no impact 2916 // on refworkload 0.83. If we need to reduce the size of the code 2917 // emitted by compiler_lock_object() the recursive case is perfect 2918 // candidate. 2919 // 2920 // A more extreme idea is to always inflate on stack-lock recursion. 2921 // This lets us eliminate the recursive checks in compiler_lock_object 2922 // and compiler_unlock_object and the (box->dhw == 0) encoding. 2923 // A brief experiment - requiring changes to synchronizer.cpp, interpreter, 2924 // and showed a performance *increase*. In the same experiment I eliminated 2925 // the fast-path stack-lock code from the interpreter and always passed 2926 // control to the "slow" operators in synchronizer.cpp. 2927 2928 // RScratch contains the fetched obj->mark value from the failed CAS. 2929 #ifdef _LP64 2930 sub(Rscratch, STACK_BIAS, Rscratch); 2931 #endif 2932 sub(Rscratch, SP, Rscratch); 2933 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 2934 andcc(Rscratch, 0xfffff003, Rscratch); 2935 if (counters != NULL) { 2936 // Accounting needs the Rscratch register 2937 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2938 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); 2939 ba_short(done); 2940 } else { 2941 ba(done); 2942 delayed()->st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2943 } 2944 2945 bind (IsInflated); 2946 if (EmitSync & 64) { 2947 // If m->owner != null goto IsLocked 2948 // Test-and-CAS vs CAS 2949 // Pessimistic form avoids futile (doomed) CAS attempts 2950 // The optimistic form avoids RTS->RTO cache line upgrades. 2951 ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch); 2952 andcc(Rscratch, Rscratch, G0); 2953 brx(Assembler::notZero, false, Assembler::pn, done); 2954 delayed()->nop(); 2955 // m->owner == null : it's unlocked. 2956 } 2957 2958 // Try to CAS m->owner from null to Self 2959 // Invariant: if we acquire the lock then _recursions should be 0. 2960 add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark); 2961 mov(G2_thread, Rscratch); 2962 cas_ptr(Rmark, G0, Rscratch); 2963 cmp(Rscratch, G0); 2964 // ST box->displaced_header = NonZero. 2965 // Any non-zero value suffices: 2966 // unused_mark(), G2_thread, RBox, RScratch, rsp, etc. 2967 st_ptr(Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2968 // Intentional fall-through into done 2969 } 2970 2971 bind (done); 2972 } 2973 2974 void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark, 2975 Register Rbox, Register Rscratch, 2976 bool try_bias) { 2977 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); 2978 2979 Label done ; 2980 2981 if (EmitSync & 4) { 2982 cmp(SP, G0); 2983 return ; 2984 } 2985 2986 if (EmitSync & 8) { 2987 if (try_bias) { 2988 biased_locking_exit(mark_addr, Rscratch, done); 2989 } 2990 2991 // Test first if it is a fast recursive unlock 2992 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rmark); 2993 br_null_short(Rmark, Assembler::pt, done); 2994 2995 // Check if it is still a light weight lock, this is is true if we see 2996 // the stack address of the basicLock in the markOop of the object 2997 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2998 cas_ptr(mark_addr.base(), Rbox, Rmark); 2999 ba(done); 3000 delayed()->cmp(Rbox, Rmark); 3001 bind(done); 3002 return ; 3003 } 3004 3005 // Beware ... If the aggregate size of the code emitted by CLO and CUO is 3006 // is too large performance rolls abruptly off a cliff. 3007 // This could be related to inlining policies, code cache management, or 3008 // I$ effects. 3009 Label LStacked ; 3010 3011 if (try_bias) { 3012 // TODO: eliminate redundant LDs of obj->mark 3013 biased_locking_exit(mark_addr, Rscratch, done); 3014 } 3015 3016 ld_ptr(Roop, oopDesc::mark_offset_in_bytes(), Rmark); 3017 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rscratch); 3018 andcc(Rscratch, Rscratch, G0); 3019 brx(Assembler::zero, false, Assembler::pn, done); 3020 delayed()->nop(); // consider: relocate fetch of mark, above, into this DS 3021 andcc(Rmark, 2, G0); 3022 brx(Assembler::zero, false, Assembler::pt, LStacked); 3023 delayed()->nop(); 3024 3025 // It's inflated 3026 // Conceptually we need a #loadstore|#storestore "release" MEMBAR before 3027 // the ST of 0 into _owner which releases the lock. This prevents loads 3028 // and stores within the critical section from reordering (floating) 3029 // past the store that releases the lock. But TSO is a strong memory model 3030 // and that particular flavor of barrier is a noop, so we can safely elide it. 3031 // Note that we use 1-0 locking by default for the inflated case. We 3032 // close the resultant (and rare) race by having contented threads in 3033 // monitorenter periodically poll _owner. 3034 ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch); 3035 ld_ptr(Rmark, ObjectMonitor::recursions_offset_in_bytes() - 2, Rbox); 3036 xor3(Rscratch, G2_thread, Rscratch); 3037 orcc(Rbox, Rscratch, Rbox); 3038 brx(Assembler::notZero, false, Assembler::pn, done); 3039 delayed()-> 3040 ld_ptr(Rmark, ObjectMonitor::EntryList_offset_in_bytes() - 2, Rscratch); 3041 ld_ptr(Rmark, ObjectMonitor::cxq_offset_in_bytes() - 2, Rbox); 3042 orcc(Rbox, Rscratch, G0); 3043 if (EmitSync & 65536) { 3044 Label LSucc ; 3045 brx(Assembler::notZero, false, Assembler::pn, LSucc); 3046 delayed()->nop(); 3047 ba(done); 3048 delayed()->st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2); 3049 3050 bind(LSucc); 3051 st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2); 3052 if (os::is_MP()) { membar (StoreLoad); } 3053 ld_ptr(Rmark, ObjectMonitor::succ_offset_in_bytes() - 2, Rscratch); 3054 andcc(Rscratch, Rscratch, G0); 3055 brx(Assembler::notZero, false, Assembler::pt, done); 3056 delayed()->andcc(G0, G0, G0); 3057 add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark); 3058 mov(G2_thread, Rscratch); 3059 cas_ptr(Rmark, G0, Rscratch); 3060 // invert icc.zf and goto done 3061 br_notnull(Rscratch, false, Assembler::pt, done); 3062 delayed()->cmp(G0, G0); 3063 ba(done); 3064 delayed()->cmp(G0, 1); 3065 } else { 3066 brx(Assembler::notZero, false, Assembler::pn, done); 3067 delayed()->nop(); 3068 ba(done); 3069 delayed()->st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2); 3070 } 3071 3072 bind (LStacked); 3073 // Consider: we could replace the expensive CAS in the exit 3074 // path with a simple ST of the displaced mark value fetched from 3075 // the on-stack basiclock box. That admits a race where a thread T2 3076 // in the slow lock path -- inflating with monitor M -- could race a 3077 // thread T1 in the fast unlock path, resulting in a missed wakeup for T2. 3078 // More precisely T1 in the stack-lock unlock path could "stomp" the 3079 // inflated mark value M installed by T2, resulting in an orphan 3080 // object monitor M and T2 becoming stranded. We can remedy that situation 3081 // by having T2 periodically poll the object's mark word using timed wait 3082 // operations. If T2 discovers that a stomp has occurred it vacates 3083 // the monitor M and wakes any other threads stranded on the now-orphan M. 3084 // In addition the monitor scavenger, which performs deflation, 3085 // would also need to check for orpan monitors and stranded threads. 3086 // 3087 // Finally, inflation is also used when T2 needs to assign a hashCode 3088 // to O and O is stack-locked by T1. The "stomp" race could cause 3089 // an assigned hashCode value to be lost. We can avoid that condition 3090 // and provide the necessary hashCode stability invariants by ensuring 3091 // that hashCode generation is idempotent between copying GCs. 3092 // For example we could compute the hashCode of an object O as 3093 // O's heap address XOR some high quality RNG value that is refreshed 3094 // at GC-time. The monitor scavenger would install the hashCode 3095 // found in any orphan monitors. Again, the mechanism admits a 3096 // lost-update "stomp" WAW race but detects and recovers as needed. 3097 // 3098 // A prototype implementation showed excellent results, although 3099 // the scavenger and timeout code was rather involved. 3100 3101 cas_ptr(mark_addr.base(), Rbox, Rscratch); 3102 cmp(Rbox, Rscratch); 3103 // Intentional fall through into done ... 3104 3105 bind(done); 3106 } 3107 3108 3109 3110 void MacroAssembler::print_CPU_state() { 3111 // %%%%% need to implement this 3112 } 3113 3114 void MacroAssembler::verify_FPU(int stack_depth, const char* s) { 3115 // %%%%% need to implement this 3116 } 3117 3118 void MacroAssembler::push_IU_state() { 3119 // %%%%% need to implement this 3120 } 3121 3122 3123 void MacroAssembler::pop_IU_state() { 3124 // %%%%% need to implement this 3125 } 3126 3127 3128 void MacroAssembler::push_FPU_state() { 3129 // %%%%% need to implement this 3130 } 3131 3132 3133 void MacroAssembler::pop_FPU_state() { 3134 // %%%%% need to implement this 3135 } 3136 3137 3138 void MacroAssembler::push_CPU_state() { 3139 // %%%%% need to implement this 3140 } 3141 3142 3143 void MacroAssembler::pop_CPU_state() { 3144 // %%%%% need to implement this 3145 } 3146 3147 3148 3149 void MacroAssembler::verify_tlab() { 3150 #ifdef ASSERT 3151 if (UseTLAB && VerifyOops) { 3152 Label next, next2, ok; 3153 Register t1 = L0; 3154 Register t2 = L1; 3155 Register t3 = L2; 3156 3157 save_frame(0); 3158 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); 3159 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2); 3160 or3(t1, t2, t3); 3161 cmp_and_br_short(t1, t2, Assembler::greaterEqual, Assembler::pn, next); 3162 STOP("assert(top >= start)"); 3163 should_not_reach_here(); 3164 3165 bind(next); 3166 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); 3167 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2); 3168 or3(t3, t2, t3); 3169 cmp_and_br_short(t1, t2, Assembler::lessEqual, Assembler::pn, next2); 3170 STOP("assert(top <= end)"); 3171 should_not_reach_here(); 3172 3173 bind(next2); 3174 and3(t3, MinObjAlignmentInBytesMask, t3); 3175 cmp_and_br_short(t3, 0, Assembler::lessEqual, Assembler::pn, ok); 3176 STOP("assert(aligned)"); 3177 should_not_reach_here(); 3178 3179 bind(ok); 3180 restore(); 3181 } 3182 #endif 3183 } 3184 3185 3186 void MacroAssembler::eden_allocate( 3187 Register obj, // result: pointer to object after successful allocation 3188 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 3189 int con_size_in_bytes, // object size in bytes if known at compile time 3190 Register t1, // temp register 3191 Register t2, // temp register 3192 Label& slow_case // continuation point if fast allocation fails 3193 ){ 3194 // make sure arguments make sense 3195 assert_different_registers(obj, var_size_in_bytes, t1, t2); 3196 assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size"); 3197 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); 3198 3199 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { 3200 // No allocation in the shared eden. 3201 ba(slow_case); 3202 delayed()->nop(); 3203 } else { 3204 // get eden boundaries 3205 // note: we need both top & top_addr! 3206 const Register top_addr = t1; 3207 const Register end = t2; 3208 3209 CollectedHeap* ch = Universe::heap(); 3210 set((intx)ch->top_addr(), top_addr); 3211 intx delta = (intx)ch->end_addr() - (intx)ch->top_addr(); 3212 ld_ptr(top_addr, delta, end); 3213 ld_ptr(top_addr, 0, obj); 3214 3215 // try to allocate 3216 Label retry; 3217 bind(retry); 3218 #ifdef ASSERT 3219 // make sure eden top is properly aligned 3220 { 3221 Label L; 3222 btst(MinObjAlignmentInBytesMask, obj); 3223 br(Assembler::zero, false, Assembler::pt, L); 3224 delayed()->nop(); 3225 STOP("eden top is not properly aligned"); 3226 bind(L); 3227 } 3228 #endif // ASSERT 3229 const Register free = end; 3230 sub(end, obj, free); // compute amount of free space 3231 if (var_size_in_bytes->is_valid()) { 3232 // size is unknown at compile time 3233 cmp(free, var_size_in_bytes); 3234 br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case 3235 delayed()->add(obj, var_size_in_bytes, end); 3236 } else { 3237 // size is known at compile time 3238 cmp(free, con_size_in_bytes); 3239 br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case 3240 delayed()->add(obj, con_size_in_bytes, end); 3241 } 3242 // Compare obj with the value at top_addr; if still equal, swap the value of 3243 // end with the value at top_addr. If not equal, read the value at top_addr 3244 // into end. 3245 cas_ptr(top_addr, obj, end); 3246 // if someone beat us on the allocation, try again, otherwise continue 3247 cmp(obj, end); 3248 brx(Assembler::notEqual, false, Assembler::pn, retry); 3249 delayed()->mov(end, obj); // nop if successfull since obj == end 3250 3251 #ifdef ASSERT 3252 // make sure eden top is properly aligned 3253 { 3254 Label L; 3255 const Register top_addr = t1; 3256 3257 set((intx)ch->top_addr(), top_addr); 3258 ld_ptr(top_addr, 0, top_addr); 3259 btst(MinObjAlignmentInBytesMask, top_addr); 3260 br(Assembler::zero, false, Assembler::pt, L); 3261 delayed()->nop(); 3262 STOP("eden top is not properly aligned"); 3263 bind(L); 3264 } 3265 #endif // ASSERT 3266 } 3267 } 3268 3269 3270 void MacroAssembler::tlab_allocate( 3271 Register obj, // result: pointer to object after successful allocation 3272 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 3273 int con_size_in_bytes, // object size in bytes if known at compile time 3274 Register t1, // temp register 3275 Label& slow_case // continuation point if fast allocation fails 3276 ){ 3277 // make sure arguments make sense 3278 assert_different_registers(obj, var_size_in_bytes, t1); 3279 assert(0 <= con_size_in_bytes && is_simm13(con_size_in_bytes), "illegal object size"); 3280 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); 3281 3282 const Register free = t1; 3283 3284 verify_tlab(); 3285 3286 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), obj); 3287 3288 // calculate amount of free space 3289 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), free); 3290 sub(free, obj, free); 3291 3292 Label done; 3293 if (var_size_in_bytes == noreg) { 3294 cmp(free, con_size_in_bytes); 3295 } else { 3296 cmp(free, var_size_in_bytes); 3297 } 3298 br(Assembler::less, false, Assembler::pn, slow_case); 3299 // calculate the new top pointer 3300 if (var_size_in_bytes == noreg) { 3301 delayed()->add(obj, con_size_in_bytes, free); 3302 } else { 3303 delayed()->add(obj, var_size_in_bytes, free); 3304 } 3305 3306 bind(done); 3307 3308 #ifdef ASSERT 3309 // make sure new free pointer is properly aligned 3310 { 3311 Label L; 3312 btst(MinObjAlignmentInBytesMask, free); 3313 br(Assembler::zero, false, Assembler::pt, L); 3314 delayed()->nop(); 3315 STOP("updated TLAB free is not properly aligned"); 3316 bind(L); 3317 } 3318 #endif // ASSERT 3319 3320 // update the tlab top pointer 3321 st_ptr(free, G2_thread, in_bytes(JavaThread::tlab_top_offset())); 3322 verify_tlab(); 3323 } 3324 3325 3326 void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case) { 3327 Register top = O0; 3328 Register t1 = G1; 3329 Register t2 = G3; 3330 Register t3 = O1; 3331 assert_different_registers(top, t1, t2, t3, G4, G5 /* preserve G4 and G5 */); 3332 Label do_refill, discard_tlab; 3333 3334 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { 3335 // No allocation in the shared eden. 3336 ba(slow_case); 3337 delayed()->nop(); 3338 } 3339 3340 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top); 3341 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t1); 3342 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), t2); 3343 3344 // calculate amount of free space 3345 sub(t1, top, t1); 3346 srl_ptr(t1, LogHeapWordSize, t1); 3347 3348 // Retain tlab and allocate object in shared space if 3349 // the amount free in the tlab is too large to discard. 3350 cmp(t1, t2); 3351 brx(Assembler::lessEqual, false, Assembler::pt, discard_tlab); 3352 3353 // increment waste limit to prevent getting stuck on this slow path 3354 delayed()->add(t2, ThreadLocalAllocBuffer::refill_waste_limit_increment(), t2); 3355 st_ptr(t2, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset())); 3356 if (TLABStats) { 3357 // increment number of slow_allocations 3358 ld(G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()), t2); 3359 add(t2, 1, t2); 3360 stw(t2, G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset())); 3361 } 3362 ba(try_eden); 3363 delayed()->nop(); 3364 3365 bind(discard_tlab); 3366 if (TLABStats) { 3367 // increment number of refills 3368 ld(G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()), t2); 3369 add(t2, 1, t2); 3370 stw(t2, G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset())); 3371 // accumulate wastage 3372 ld(G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()), t2); 3373 add(t2, t1, t2); 3374 stw(t2, G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset())); 3375 } 3376 3377 // if tlab is currently allocated (top or end != null) then 3378 // fill [top, end + alignment_reserve) with array object 3379 br_null_short(top, Assembler::pn, do_refill); 3380 3381 set((intptr_t)markOopDesc::prototype()->copy_set_hash(0x2), t2); 3382 st_ptr(t2, top, oopDesc::mark_offset_in_bytes()); // set up the mark word 3383 // set klass to intArrayKlass 3384 sub(t1, typeArrayOopDesc::header_size(T_INT), t1); 3385 add(t1, ThreadLocalAllocBuffer::alignment_reserve(), t1); 3386 sll_ptr(t1, log2_intptr(HeapWordSize/sizeof(jint)), t1); 3387 st(t1, top, arrayOopDesc::length_offset_in_bytes()); 3388 set((intptr_t)Universe::intArrayKlassObj_addr(), t2); 3389 ld_ptr(t2, 0, t2); 3390 // store klass last. concurrent gcs assumes klass length is valid if 3391 // klass field is not null. 3392 store_klass(t2, top); 3393 verify_oop(top); 3394 3395 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t1); 3396 sub(top, t1, t1); // size of tlab's allocated portion 3397 incr_allocated_bytes(t1, t2, t3); 3398 3399 // refill the tlab with an eden allocation 3400 bind(do_refill); 3401 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t1); 3402 sll_ptr(t1, LogHeapWordSize, t1); 3403 // allocate new tlab, address returned in top 3404 eden_allocate(top, t1, 0, t2, t3, slow_case); 3405 3406 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_start_offset())); 3407 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_top_offset())); 3408 #ifdef ASSERT 3409 // check that tlab_size (t1) is still valid 3410 { 3411 Label ok; 3412 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t2); 3413 sll_ptr(t2, LogHeapWordSize, t2); 3414 cmp_and_br_short(t1, t2, Assembler::equal, Assembler::pt, ok); 3415 STOP("assert(t1 == tlab_size)"); 3416 should_not_reach_here(); 3417 3418 bind(ok); 3419 } 3420 #endif // ASSERT 3421 add(top, t1, top); // t1 is tlab_size 3422 sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top); 3423 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset())); 3424 verify_tlab(); 3425 ba(retry); 3426 delayed()->nop(); 3427 } 3428 3429 void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes, 3430 Register t1, Register t2) { 3431 // Bump total bytes allocated by this thread 3432 assert(t1->is_global(), "must be global reg"); // so all 64 bits are saved on a context switch 3433 assert_different_registers(size_in_bytes.register_or_noreg(), t1, t2); 3434 // v8 support has gone the way of the dodo 3435 ldx(G2_thread, in_bytes(JavaThread::allocated_bytes_offset()), t1); 3436 add(t1, ensure_simm13_or_reg(size_in_bytes, t2), t1); 3437 stx(t1, G2_thread, in_bytes(JavaThread::allocated_bytes_offset())); 3438 } 3439 3440 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { 3441 switch (cond) { 3442 // Note some conditions are synonyms for others 3443 case Assembler::never: return Assembler::always; 3444 case Assembler::zero: return Assembler::notZero; 3445 case Assembler::lessEqual: return Assembler::greater; 3446 case Assembler::less: return Assembler::greaterEqual; 3447 case Assembler::lessEqualUnsigned: return Assembler::greaterUnsigned; 3448 case Assembler::lessUnsigned: return Assembler::greaterEqualUnsigned; 3449 case Assembler::negative: return Assembler::positive; 3450 case Assembler::overflowSet: return Assembler::overflowClear; 3451 case Assembler::always: return Assembler::never; 3452 case Assembler::notZero: return Assembler::zero; 3453 case Assembler::greater: return Assembler::lessEqual; 3454 case Assembler::greaterEqual: return Assembler::less; 3455 case Assembler::greaterUnsigned: return Assembler::lessEqualUnsigned; 3456 case Assembler::greaterEqualUnsigned: return Assembler::lessUnsigned; 3457 case Assembler::positive: return Assembler::negative; 3458 case Assembler::overflowClear: return Assembler::overflowSet; 3459 } 3460 3461 ShouldNotReachHere(); return Assembler::overflowClear; 3462 } 3463 3464 void MacroAssembler::cond_inc(Assembler::Condition cond, address counter_ptr, 3465 Register Rtmp1, Register Rtmp2 /*, Register Rtmp3, Register Rtmp4 */) { 3466 Condition negated_cond = negate_condition(cond); 3467 Label L; 3468 brx(negated_cond, false, Assembler::pt, L); 3469 delayed()->nop(); 3470 inc_counter(counter_ptr, Rtmp1, Rtmp2); 3471 bind(L); 3472 } 3473 3474 void MacroAssembler::inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2) { 3475 AddressLiteral addrlit(counter_addr); 3476 sethi(addrlit, Rtmp1); // Move hi22 bits into temporary register. 3477 Address addr(Rtmp1, addrlit.low10()); // Build an address with low10 bits. 3478 ld(addr, Rtmp2); 3479 inc(Rtmp2); 3480 st(Rtmp2, addr); 3481 } 3482 3483 void MacroAssembler::inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2) { 3484 inc_counter((address) counter_addr, Rtmp1, Rtmp2); 3485 } 3486 3487 SkipIfEqual::SkipIfEqual( 3488 MacroAssembler* masm, Register temp, const bool* flag_addr, 3489 Assembler::Condition condition) { 3490 _masm = masm; 3491 AddressLiteral flag(flag_addr); 3492 _masm->sethi(flag, temp); 3493 _masm->ldub(temp, flag.low10(), temp); 3494 _masm->tst(temp); 3495 _masm->br(condition, false, Assembler::pt, _label); 3496 _masm->delayed()->nop(); 3497 } 3498 3499 SkipIfEqual::~SkipIfEqual() { 3500 _masm->bind(_label); 3501 } 3502 3503 3504 // Writes to stack successive pages until offset reached to check for 3505 // stack overflow + shadow pages. This clobbers tsp and scratch. 3506 void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp, 3507 Register Rscratch) { 3508 // Use stack pointer in temp stack pointer 3509 mov(SP, Rtsp); 3510 3511 // Bang stack for total size given plus stack shadow page size. 3512 // Bang one page at a time because a large size can overflow yellow and 3513 // red zones (the bang will fail but stack overflow handling can't tell that 3514 // it was a stack overflow bang vs a regular segv). 3515 int offset = os::vm_page_size(); 3516 Register Roffset = Rscratch; 3517 3518 Label loop; 3519 bind(loop); 3520 set((-offset)+STACK_BIAS, Rscratch); 3521 st(G0, Rtsp, Rscratch); 3522 set(offset, Roffset); 3523 sub(Rsize, Roffset, Rsize); 3524 cmp(Rsize, G0); 3525 br(Assembler::greater, false, Assembler::pn, loop); 3526 delayed()->sub(Rtsp, Roffset, Rtsp); 3527 3528 // Bang down shadow pages too. 3529 // The -1 because we already subtracted 1 page. 3530 for (int i = 0; i< StackShadowPages-1; i++) { 3531 set((-i*offset)+STACK_BIAS, Rscratch); 3532 st(G0, Rtsp, Rscratch); 3533 } 3534 } 3535 3536 /////////////////////////////////////////////////////////////////////////////////// 3537 #if INCLUDE_ALL_GCS 3538 3539 static address satb_log_enqueue_with_frame = NULL; 3540 static u_char* satb_log_enqueue_with_frame_end = NULL; 3541 3542 static address satb_log_enqueue_frameless = NULL; 3543 static u_char* satb_log_enqueue_frameless_end = NULL; 3544 3545 static int EnqueueCodeSize = 128 DEBUG_ONLY( + 256); // Instructions? 3546 3547 static void generate_satb_log_enqueue(bool with_frame) { 3548 BufferBlob* bb = BufferBlob::create("enqueue_with_frame", EnqueueCodeSize); 3549 CodeBuffer buf(bb); 3550 MacroAssembler masm(&buf); 3551 3552 #define __ masm. 3553 3554 address start = __ pc(); 3555 Register pre_val; 3556 3557 Label refill, restart; 3558 if (with_frame) { 3559 __ save_frame(0); 3560 pre_val = I0; // Was O0 before the save. 3561 } else { 3562 pre_val = O0; 3563 } 3564 3565 int satb_q_index_byte_offset = 3566 in_bytes(JavaThread::satb_mark_queue_offset() + 3567 PtrQueue::byte_offset_of_index()); 3568 3569 int satb_q_buf_byte_offset = 3570 in_bytes(JavaThread::satb_mark_queue_offset() + 3571 PtrQueue::byte_offset_of_buf()); 3572 3573 assert(in_bytes(PtrQueue::byte_width_of_index()) == sizeof(intptr_t) && 3574 in_bytes(PtrQueue::byte_width_of_buf()) == sizeof(intptr_t), 3575 "check sizes in assembly below"); 3576 3577 __ bind(restart); 3578 3579 // Load the index into the SATB buffer. PtrQueue::_index is a size_t 3580 // so ld_ptr is appropriate. 3581 __ ld_ptr(G2_thread, satb_q_index_byte_offset, L0); 3582 3583 // index == 0? 3584 __ cmp_and_brx_short(L0, G0, Assembler::equal, Assembler::pn, refill); 3585 3586 __ ld_ptr(G2_thread, satb_q_buf_byte_offset, L1); 3587 __ sub(L0, oopSize, L0); 3588 3589 __ st_ptr(pre_val, L1, L0); // [_buf + index] := I0 3590 if (!with_frame) { 3591 // Use return-from-leaf 3592 __ retl(); 3593 __ delayed()->st_ptr(L0, G2_thread, satb_q_index_byte_offset); 3594 } else { 3595 // Not delayed. 3596 __ st_ptr(L0, G2_thread, satb_q_index_byte_offset); 3597 } 3598 if (with_frame) { 3599 __ ret(); 3600 __ delayed()->restore(); 3601 } 3602 __ bind(refill); 3603 3604 address handle_zero = 3605 CAST_FROM_FN_PTR(address, 3606 &SATBMarkQueueSet::handle_zero_index_for_thread); 3607 // This should be rare enough that we can afford to save all the 3608 // scratch registers that the calling context might be using. 3609 __ mov(G1_scratch, L0); 3610 __ mov(G3_scratch, L1); 3611 __ mov(G4, L2); 3612 // We need the value of O0 above (for the write into the buffer), so we 3613 // save and restore it. 3614 __ mov(O0, L3); 3615 // Since the call will overwrite O7, we save and restore that, as well. 3616 __ mov(O7, L4); 3617 __ call_VM_leaf(L5, handle_zero, G2_thread); 3618 __ mov(L0, G1_scratch); 3619 __ mov(L1, G3_scratch); 3620 __ mov(L2, G4); 3621 __ mov(L3, O0); 3622 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); 3623 __ delayed()->mov(L4, O7); 3624 3625 if (with_frame) { 3626 satb_log_enqueue_with_frame = start; 3627 satb_log_enqueue_with_frame_end = __ pc(); 3628 } else { 3629 satb_log_enqueue_frameless = start; 3630 satb_log_enqueue_frameless_end = __ pc(); 3631 } 3632 3633 #undef __ 3634 } 3635 3636 static inline void generate_satb_log_enqueue_if_necessary(bool with_frame) { 3637 if (with_frame) { 3638 if (satb_log_enqueue_with_frame == 0) { 3639 generate_satb_log_enqueue(with_frame); 3640 assert(satb_log_enqueue_with_frame != 0, "postcondition."); 3641 if (G1SATBPrintStubs) { 3642 tty->print_cr("Generated with-frame satb enqueue:"); 3643 Disassembler::decode((u_char*)satb_log_enqueue_with_frame, 3644 satb_log_enqueue_with_frame_end, 3645 tty); 3646 } 3647 } 3648 } else { 3649 if (satb_log_enqueue_frameless == 0) { 3650 generate_satb_log_enqueue(with_frame); 3651 assert(satb_log_enqueue_frameless != 0, "postcondition."); 3652 if (G1SATBPrintStubs) { 3653 tty->print_cr("Generated frameless satb enqueue:"); 3654 Disassembler::decode((u_char*)satb_log_enqueue_frameless, 3655 satb_log_enqueue_frameless_end, 3656 tty); 3657 } 3658 } 3659 } 3660 } 3661 3662 void MacroAssembler::g1_write_barrier_pre(Register obj, 3663 Register index, 3664 int offset, 3665 Register pre_val, 3666 Register tmp, 3667 bool preserve_o_regs) { 3668 Label filtered; 3669 3670 if (obj == noreg) { 3671 // We are not loading the previous value so make 3672 // sure that we don't trash the value in pre_val 3673 // with the code below. 3674 assert_different_registers(pre_val, tmp); 3675 } else { 3676 // We will be loading the previous value 3677 // in this code so... 3678 assert(offset == 0 || index == noreg, "choose one"); 3679 assert(pre_val == noreg, "check this code"); 3680 } 3681 3682 // Is marking active? 3683 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) { 3684 ld(G2, 3685 in_bytes(JavaThread::satb_mark_queue_offset() + 3686 PtrQueue::byte_offset_of_active()), 3687 tmp); 3688 } else { 3689 guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1, 3690 "Assumption"); 3691 ldsb(G2, 3692 in_bytes(JavaThread::satb_mark_queue_offset() + 3693 PtrQueue::byte_offset_of_active()), 3694 tmp); 3695 } 3696 3697 // Is marking active? 3698 cmp_and_br_short(tmp, G0, Assembler::equal, Assembler::pt, filtered); 3699 3700 // Do we need to load the previous value? 3701 if (obj != noreg) { 3702 // Load the previous value... 3703 if (index == noreg) { 3704 if (Assembler::is_simm13(offset)) { 3705 load_heap_oop(obj, offset, tmp); 3706 } else { 3707 set(offset, tmp); 3708 load_heap_oop(obj, tmp, tmp); 3709 } 3710 } else { 3711 load_heap_oop(obj, index, tmp); 3712 } 3713 // Previous value has been loaded into tmp 3714 pre_val = tmp; 3715 } 3716 3717 assert(pre_val != noreg, "must have a real register"); 3718 3719 // Is the previous value null? 3720 cmp_and_brx_short(pre_val, G0, Assembler::equal, Assembler::pt, filtered); 3721 3722 // OK, it's not filtered, so we'll need to call enqueue. In the normal 3723 // case, pre_val will be a scratch G-reg, but there are some cases in 3724 // which it's an O-reg. In the first case, do a normal call. In the 3725 // latter, do a save here and call the frameless version. 3726 3727 guarantee(pre_val->is_global() || pre_val->is_out(), 3728 "Or we need to think harder."); 3729 3730 if (pre_val->is_global() && !preserve_o_regs) { 3731 generate_satb_log_enqueue_if_necessary(true); // with frame 3732 3733 call(satb_log_enqueue_with_frame); 3734 delayed()->mov(pre_val, O0); 3735 } else { 3736 generate_satb_log_enqueue_if_necessary(false); // frameless 3737 3738 save_frame(0); 3739 call(satb_log_enqueue_frameless); 3740 delayed()->mov(pre_val->after_save(), O0); 3741 restore(); 3742 } 3743 3744 bind(filtered); 3745 } 3746 3747 static address dirty_card_log_enqueue = 0; 3748 static u_char* dirty_card_log_enqueue_end = 0; 3749 3750 // This gets to assume that o0 contains the object address. 3751 static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) { 3752 BufferBlob* bb = BufferBlob::create("dirty_card_enqueue", EnqueueCodeSize*2); 3753 CodeBuffer buf(bb); 3754 MacroAssembler masm(&buf); 3755 #define __ masm. 3756 address start = __ pc(); 3757 3758 Label not_already_dirty, restart, refill, young_card; 3759 3760 #ifdef _LP64 3761 __ srlx(O0, CardTableModRefBS::card_shift, O0); 3762 #else 3763 __ srl(O0, CardTableModRefBS::card_shift, O0); 3764 #endif 3765 AddressLiteral addrlit(byte_map_base); 3766 __ set(addrlit, O1); // O1 := <card table base> 3767 __ ldub(O0, O1, O2); // O2 := [O0 + O1] 3768 3769 __ cmp_and_br_short(O2, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card); 3770 3771 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 3772 __ ldub(O0, O1, O2); // O2 := [O0 + O1] 3773 3774 assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code"); 3775 __ cmp_and_br_short(O2, G0, Assembler::notEqual, Assembler::pt, not_already_dirty); 3776 3777 __ bind(young_card); 3778 // We didn't take the branch, so we're already dirty: return. 3779 // Use return-from-leaf 3780 __ retl(); 3781 __ delayed()->nop(); 3782 3783 // Not dirty. 3784 __ bind(not_already_dirty); 3785 3786 // Get O0 + O1 into a reg by itself 3787 __ add(O0, O1, O3); 3788 3789 // First, dirty it. 3790 __ stb(G0, O3, G0); // [cardPtr] := 0 (i.e., dirty). 3791 3792 int dirty_card_q_index_byte_offset = 3793 in_bytes(JavaThread::dirty_card_queue_offset() + 3794 PtrQueue::byte_offset_of_index()); 3795 int dirty_card_q_buf_byte_offset = 3796 in_bytes(JavaThread::dirty_card_queue_offset() + 3797 PtrQueue::byte_offset_of_buf()); 3798 __ bind(restart); 3799 3800 // Load the index into the update buffer. PtrQueue::_index is 3801 // a size_t so ld_ptr is appropriate here. 3802 __ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, L0); 3803 3804 // index == 0? 3805 __ cmp_and_brx_short(L0, G0, Assembler::equal, Assembler::pn, refill); 3806 3807 __ ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, L1); 3808 __ sub(L0, oopSize, L0); 3809 3810 __ st_ptr(O3, L1, L0); // [_buf + index] := I0 3811 // Use return-from-leaf 3812 __ retl(); 3813 __ delayed()->st_ptr(L0, G2_thread, dirty_card_q_index_byte_offset); 3814 3815 __ bind(refill); 3816 address handle_zero = 3817 CAST_FROM_FN_PTR(address, 3818 &DirtyCardQueueSet::handle_zero_index_for_thread); 3819 // This should be rare enough that we can afford to save all the 3820 // scratch registers that the calling context might be using. 3821 __ mov(G1_scratch, L3); 3822 __ mov(G3_scratch, L5); 3823 // We need the value of O3 above (for the write into the buffer), so we 3824 // save and restore it. 3825 __ mov(O3, L6); 3826 // Since the call will overwrite O7, we save and restore that, as well. 3827 __ mov(O7, L4); 3828 3829 __ call_VM_leaf(L7_thread_cache, handle_zero, G2_thread); 3830 __ mov(L3, G1_scratch); 3831 __ mov(L5, G3_scratch); 3832 __ mov(L6, O3); 3833 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); 3834 __ delayed()->mov(L4, O7); 3835 3836 dirty_card_log_enqueue = start; 3837 dirty_card_log_enqueue_end = __ pc(); 3838 // XXX Should have a guarantee here about not going off the end! 3839 // Does it already do so? Do an experiment... 3840 3841 #undef __ 3842 3843 } 3844 3845 static inline void 3846 generate_dirty_card_log_enqueue_if_necessary(jbyte* byte_map_base) { 3847 if (dirty_card_log_enqueue == 0) { 3848 generate_dirty_card_log_enqueue(byte_map_base); 3849 assert(dirty_card_log_enqueue != 0, "postcondition."); 3850 if (G1SATBPrintStubs) { 3851 tty->print_cr("Generated dirty_card enqueue:"); 3852 Disassembler::decode((u_char*)dirty_card_log_enqueue, 3853 dirty_card_log_enqueue_end, 3854 tty); 3855 } 3856 } 3857 } 3858 3859 3860 void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val, Register tmp) { 3861 3862 Label filtered; 3863 MacroAssembler* post_filter_masm = this; 3864 3865 if (new_val == G0) return; 3866 3867 G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set(); 3868 assert(bs->kind() == BarrierSet::G1SATBCT || 3869 bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier"); 3870 3871 if (G1RSBarrierRegionFilter) { 3872 xor3(store_addr, new_val, tmp); 3873 #ifdef _LP64 3874 srlx(tmp, HeapRegion::LogOfHRGrainBytes, tmp); 3875 #else 3876 srl(tmp, HeapRegion::LogOfHRGrainBytes, tmp); 3877 #endif 3878 3879 // XXX Should I predict this taken or not? Does it matter? 3880 cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pt, filtered); 3881 } 3882 3883 // If the "store_addr" register is an "in" or "local" register, move it to 3884 // a scratch reg so we can pass it as an argument. 3885 bool use_scr = !(store_addr->is_global() || store_addr->is_out()); 3886 // Pick a scratch register different from "tmp". 3887 Register scr = (tmp == G1_scratch ? G3_scratch : G1_scratch); 3888 // Make sure we use up the delay slot! 3889 if (use_scr) { 3890 post_filter_masm->mov(store_addr, scr); 3891 } else { 3892 post_filter_masm->nop(); 3893 } 3894 generate_dirty_card_log_enqueue_if_necessary(bs->byte_map_base); 3895 save_frame(0); 3896 call(dirty_card_log_enqueue); 3897 if (use_scr) { 3898 delayed()->mov(scr, O0); 3899 } else { 3900 delayed()->mov(store_addr->after_save(), O0); 3901 } 3902 restore(); 3903 3904 bind(filtered); 3905 } 3906 3907 #endif // INCLUDE_ALL_GCS 3908 /////////////////////////////////////////////////////////////////////////////////// 3909 3910 void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_val, Register tmp) { 3911 // If we're writing constant NULL, we can skip the write barrier. 3912 if (new_val == G0) return; 3913 CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set(); 3914 assert(bs->kind() == BarrierSet::CardTableModRef || 3915 bs->kind() == BarrierSet::CardTableExtension, "wrong barrier"); 3916 card_table_write(bs->byte_map_base, tmp, store_addr); 3917 } 3918 3919 void MacroAssembler::load_klass(Register src_oop, Register klass) { 3920 // The number of bytes in this code is used by 3921 // MachCallDynamicJavaNode::ret_addr_offset() 3922 // if this changes, change that. 3923 if (UseCompressedClassPointers) { 3924 lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass); 3925 decode_klass_not_null(klass); 3926 } else { 3927 ld_ptr(src_oop, oopDesc::klass_offset_in_bytes(), klass); 3928 } 3929 } 3930 3931 void MacroAssembler::store_klass(Register klass, Register dst_oop) { 3932 if (UseCompressedClassPointers) { 3933 assert(dst_oop != klass, "not enough registers"); 3934 encode_klass_not_null(klass); 3935 st(klass, dst_oop, oopDesc::klass_offset_in_bytes()); 3936 } else { 3937 st_ptr(klass, dst_oop, oopDesc::klass_offset_in_bytes()); 3938 } 3939 } 3940 3941 void MacroAssembler::store_klass_gap(Register s, Register d) { 3942 if (UseCompressedClassPointers) { 3943 assert(s != d, "not enough registers"); 3944 st(s, d, oopDesc::klass_gap_offset_in_bytes()); 3945 } 3946 } 3947 3948 void MacroAssembler::load_heap_oop(const Address& s, Register d) { 3949 if (UseCompressedOops) { 3950 lduw(s, d); 3951 decode_heap_oop(d); 3952 } else { 3953 ld_ptr(s, d); 3954 } 3955 } 3956 3957 void MacroAssembler::load_heap_oop(Register s1, Register s2, Register d) { 3958 if (UseCompressedOops) { 3959 lduw(s1, s2, d); 3960 decode_heap_oop(d, d); 3961 } else { 3962 ld_ptr(s1, s2, d); 3963 } 3964 } 3965 3966 void MacroAssembler::load_heap_oop(Register s1, int simm13a, Register d) { 3967 if (UseCompressedOops) { 3968 lduw(s1, simm13a, d); 3969 decode_heap_oop(d, d); 3970 } else { 3971 ld_ptr(s1, simm13a, d); 3972 } 3973 } 3974 3975 void MacroAssembler::load_heap_oop(Register s1, RegisterOrConstant s2, Register d) { 3976 if (s2.is_constant()) load_heap_oop(s1, s2.as_constant(), d); 3977 else load_heap_oop(s1, s2.as_register(), d); 3978 } 3979 3980 void MacroAssembler::store_heap_oop(Register d, Register s1, Register s2) { 3981 if (UseCompressedOops) { 3982 assert(s1 != d && s2 != d, "not enough registers"); 3983 encode_heap_oop(d); 3984 st(d, s1, s2); 3985 } else { 3986 st_ptr(d, s1, s2); 3987 } 3988 } 3989 3990 void MacroAssembler::store_heap_oop(Register d, Register s1, int simm13a) { 3991 if (UseCompressedOops) { 3992 assert(s1 != d, "not enough registers"); 3993 encode_heap_oop(d); 3994 st(d, s1, simm13a); 3995 } else { 3996 st_ptr(d, s1, simm13a); 3997 } 3998 } 3999 4000 void MacroAssembler::store_heap_oop(Register d, const Address& a, int offset) { 4001 if (UseCompressedOops) { 4002 assert(a.base() != d, "not enough registers"); 4003 encode_heap_oop(d); 4004 st(d, a, offset); 4005 } else { 4006 st_ptr(d, a, offset); 4007 } 4008 } 4009 4010 4011 void MacroAssembler::encode_heap_oop(Register src, Register dst) { 4012 assert (UseCompressedOops, "must be compressed"); 4013 assert (Universe::heap() != NULL, "java heap should be initialized"); 4014 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4015 verify_oop(src); 4016 if (Universe::narrow_oop_base() == NULL) { 4017 srlx(src, LogMinObjAlignmentInBytes, dst); 4018 return; 4019 } 4020 Label done; 4021 if (src == dst) { 4022 // optimize for frequent case src == dst 4023 bpr(rc_nz, true, Assembler::pt, src, done); 4024 delayed() -> sub(src, G6_heapbase, dst); // annuled if not taken 4025 bind(done); 4026 srlx(src, LogMinObjAlignmentInBytes, dst); 4027 } else { 4028 bpr(rc_z, false, Assembler::pn, src, done); 4029 delayed() -> mov(G0, dst); 4030 // could be moved before branch, and annulate delay, 4031 // but may add some unneeded work decoding null 4032 sub(src, G6_heapbase, dst); 4033 srlx(dst, LogMinObjAlignmentInBytes, dst); 4034 bind(done); 4035 } 4036 } 4037 4038 4039 void MacroAssembler::encode_heap_oop_not_null(Register r) { 4040 assert (UseCompressedOops, "must be compressed"); 4041 assert (Universe::heap() != NULL, "java heap should be initialized"); 4042 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4043 verify_oop(r); 4044 if (Universe::narrow_oop_base() != NULL) 4045 sub(r, G6_heapbase, r); 4046 srlx(r, LogMinObjAlignmentInBytes, r); 4047 } 4048 4049 void MacroAssembler::encode_heap_oop_not_null(Register src, Register dst) { 4050 assert (UseCompressedOops, "must be compressed"); 4051 assert (Universe::heap() != NULL, "java heap should be initialized"); 4052 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4053 verify_oop(src); 4054 if (Universe::narrow_oop_base() == NULL) { 4055 srlx(src, LogMinObjAlignmentInBytes, dst); 4056 } else { 4057 sub(src, G6_heapbase, dst); 4058 srlx(dst, LogMinObjAlignmentInBytes, dst); 4059 } 4060 } 4061 4062 // Same algorithm as oops.inline.hpp decode_heap_oop. 4063 void MacroAssembler::decode_heap_oop(Register src, Register dst) { 4064 assert (UseCompressedOops, "must be compressed"); 4065 assert (Universe::heap() != NULL, "java heap should be initialized"); 4066 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4067 sllx(src, LogMinObjAlignmentInBytes, dst); 4068 if (Universe::narrow_oop_base() != NULL) { 4069 Label done; 4070 bpr(rc_nz, true, Assembler::pt, dst, done); 4071 delayed() -> add(dst, G6_heapbase, dst); // annuled if not taken 4072 bind(done); 4073 } 4074 verify_oop(dst); 4075 } 4076 4077 void MacroAssembler::decode_heap_oop_not_null(Register r) { 4078 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 4079 // pd_code_size_limit. 4080 // Also do not verify_oop as this is called by verify_oop. 4081 assert (UseCompressedOops, "must be compressed"); 4082 assert (Universe::heap() != NULL, "java heap should be initialized"); 4083 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4084 sllx(r, LogMinObjAlignmentInBytes, r); 4085 if (Universe::narrow_oop_base() != NULL) 4086 add(r, G6_heapbase, r); 4087 } 4088 4089 void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) { 4090 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 4091 // pd_code_size_limit. 4092 // Also do not verify_oop as this is called by verify_oop. 4093 assert (UseCompressedOops, "must be compressed"); 4094 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4095 sllx(src, LogMinObjAlignmentInBytes, dst); 4096 if (Universe::narrow_oop_base() != NULL) 4097 add(dst, G6_heapbase, dst); 4098 } 4099 4100 void MacroAssembler::encode_klass_not_null(Register r) { 4101 assert (UseCompressedClassPointers, "must be compressed"); 4102 assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized"); 4103 assert(r != G6_heapbase, "bad register choice"); 4104 set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); 4105 sub(r, G6_heapbase, r); 4106 if (Universe::narrow_klass_shift() != 0) { 4107 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 4108 srlx(r, LogKlassAlignmentInBytes, r); 4109 } 4110 reinit_heapbase(); 4111 } 4112 4113 void MacroAssembler::encode_klass_not_null(Register src, Register dst) { 4114 if (src == dst) { 4115 encode_klass_not_null(src); 4116 } else { 4117 assert (UseCompressedClassPointers, "must be compressed"); 4118 assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized"); 4119 set((intptr_t)Universe::narrow_klass_base(), dst); 4120 sub(src, dst, dst); 4121 if (Universe::narrow_klass_shift() != 0) { 4122 srlx(dst, LogKlassAlignmentInBytes, dst); 4123 } 4124 } 4125 } 4126 4127 // Function instr_size_for_decode_klass_not_null() counts the instructions 4128 // generated by decode_klass_not_null() and reinit_heapbase(). Hence, if 4129 // the instructions they generate change, then this method needs to be updated. 4130 int MacroAssembler::instr_size_for_decode_klass_not_null() { 4131 assert (UseCompressedClassPointers, "only for compressed klass ptrs"); 4132 // set + add + set 4133 int num_instrs = insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) + 1 + 4134 insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base()); 4135 if (Universe::narrow_klass_shift() == 0) { 4136 return num_instrs * BytesPerInstWord; 4137 } else { // sllx 4138 return (num_instrs + 1) * BytesPerInstWord; 4139 } 4140 } 4141 4142 // !!! If the instructions that get generated here change then function 4143 // instr_size_for_decode_klass_not_null() needs to get updated. 4144 void MacroAssembler::decode_klass_not_null(Register r) { 4145 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 4146 // pd_code_size_limit. 4147 assert (UseCompressedClassPointers, "must be compressed"); 4148 assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized"); 4149 assert(r != G6_heapbase, "bad register choice"); 4150 set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); 4151 if (Universe::narrow_klass_shift() != 0) 4152 sllx(r, LogKlassAlignmentInBytes, r); 4153 add(r, G6_heapbase, r); 4154 reinit_heapbase(); 4155 } 4156 4157 void MacroAssembler::decode_klass_not_null(Register src, Register dst) { 4158 if (src == dst) { 4159 decode_klass_not_null(src); 4160 } else { 4161 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 4162 // pd_code_size_limit. 4163 assert (UseCompressedClassPointers, "must be compressed"); 4164 assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized"); 4165 if (Universe::narrow_klass_shift() != 0) { 4166 assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice"); 4167 set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); 4168 sllx(src, LogKlassAlignmentInBytes, dst); 4169 add(dst, G6_heapbase, dst); 4170 reinit_heapbase(); 4171 } else { 4172 set((intptr_t)Universe::narrow_klass_base(), dst); 4173 add(src, dst, dst); 4174 } 4175 } 4176 } 4177 4178 void MacroAssembler::reinit_heapbase() { 4179 if (UseCompressedOops || UseCompressedClassPointers) { 4180 if (Universe::heap() != NULL) { 4181 set((intptr_t)Universe::narrow_ptrs_base(), G6_heapbase); 4182 } else { 4183 AddressLiteral base(Universe::narrow_ptrs_base_addr()); 4184 load_ptr_contents(base, G6_heapbase); 4185 } 4186 } 4187 } 4188 4189 // Compare char[] arrays aligned to 4 bytes. 4190 void MacroAssembler::char_arrays_equals(Register ary1, Register ary2, 4191 Register limit, Register result, 4192 Register chr1, Register chr2, Label& Ldone) { 4193 Label Lvector, Lloop; 4194 assert(chr1 == result, "should be the same"); 4195 4196 // Note: limit contains number of bytes (2*char_elements) != 0. 4197 andcc(limit, 0x2, chr1); // trailing character ? 4198 br(Assembler::zero, false, Assembler::pt, Lvector); 4199 delayed()->nop(); 4200 4201 // compare the trailing char 4202 sub(limit, sizeof(jchar), limit); 4203 lduh(ary1, limit, chr1); 4204 lduh(ary2, limit, chr2); 4205 cmp(chr1, chr2); 4206 br(Assembler::notEqual, true, Assembler::pt, Ldone); 4207 delayed()->mov(G0, result); // not equal 4208 4209 // only one char ? 4210 cmp_zero_and_br(zero, limit, Ldone, true, Assembler::pn); 4211 delayed()->add(G0, 1, result); // zero-length arrays are equal 4212 4213 // word by word compare, dont't need alignment check 4214 bind(Lvector); 4215 // Shift ary1 and ary2 to the end of the arrays, negate limit 4216 add(ary1, limit, ary1); 4217 add(ary2, limit, ary2); 4218 neg(limit, limit); 4219 4220 lduw(ary1, limit, chr1); 4221 bind(Lloop); 4222 lduw(ary2, limit, chr2); 4223 cmp(chr1, chr2); 4224 br(Assembler::notEqual, true, Assembler::pt, Ldone); 4225 delayed()->mov(G0, result); // not equal 4226 inccc(limit, 2*sizeof(jchar)); 4227 // annul LDUW if branch is not taken to prevent access past end of array 4228 br(Assembler::notZero, true, Assembler::pt, Lloop); 4229 delayed()->lduw(ary1, limit, chr1); // hoisted 4230 4231 // Caller should set it: 4232 // add(G0, 1, result); // equals 4233 } 4234 4235 // Use BIS for zeroing (count is in bytes). 4236 void MacroAssembler::bis_zeroing(Register to, Register count, Register temp, Label& Ldone) { 4237 assert(UseBlockZeroing && VM_Version::has_block_zeroing(), "only works with BIS zeroing"); 4238 Register end = count; 4239 int cache_line_size = VM_Version::prefetch_data_size(); 4240 // Minimum count when BIS zeroing can be used since 4241 // it needs membar which is expensive. 4242 int block_zero_size = MAX2(cache_line_size*3, (int)BlockZeroingLowLimit); 4243 4244 Label small_loop; 4245 // Check if count is negative (dead code) or zero. 4246 // Note, count uses 64bit in 64 bit VM. 4247 cmp_and_brx_short(count, 0, Assembler::lessEqual, Assembler::pn, Ldone); 4248 4249 // Use BIS zeroing only for big arrays since it requires membar. 4250 if (Assembler::is_simm13(block_zero_size)) { // < 4096 4251 cmp(count, block_zero_size); 4252 } else { 4253 set(block_zero_size, temp); 4254 cmp(count, temp); 4255 } 4256 br(Assembler::lessUnsigned, false, Assembler::pt, small_loop); 4257 delayed()->add(to, count, end); 4258 4259 // Note: size is >= three (32 bytes) cache lines. 4260 4261 // Clean the beginning of space up to next cache line. 4262 for (int offs = 0; offs < cache_line_size; offs += 8) { 4263 stx(G0, to, offs); 4264 } 4265 4266 // align to next cache line 4267 add(to, cache_line_size, to); 4268 and3(to, -cache_line_size, to); 4269 4270 // Note: size left >= two (32 bytes) cache lines. 4271 4272 // BIS should not be used to zero tail (64 bytes) 4273 // to avoid zeroing a header of the following object. 4274 sub(end, (cache_line_size*2)-8, end); 4275 4276 Label bis_loop; 4277 bind(bis_loop); 4278 stxa(G0, to, G0, Assembler::ASI_ST_BLKINIT_PRIMARY); 4279 add(to, cache_line_size, to); 4280 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, bis_loop); 4281 4282 // BIS needs membar. 4283 membar(Assembler::StoreLoad); 4284 4285 add(end, (cache_line_size*2)-8, end); // restore end 4286 cmp_and_brx_short(to, end, Assembler::greaterEqualUnsigned, Assembler::pn, Ldone); 4287 4288 // Clean the tail. 4289 bind(small_loop); 4290 stx(G0, to, 0); 4291 add(to, 8, to); 4292 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, small_loop); 4293 nop(); // Separate short branches 4294 }