1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.inline.hpp" 27 #include "compiler/disassembler.hpp" 28 #include "gc/shared/cardTableModRefBS.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "memory/universe.hpp" 33 #include "oops/klass.inline.hpp" 34 #include "prims/methodHandles.hpp" 35 #include "runtime/biasedLocking.hpp" 36 #include "runtime/interfaceSupport.hpp" 37 #include "runtime/objectMonitor.hpp" 38 #include "runtime/os.inline.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "runtime/stubRoutines.hpp" 41 #include "utilities/macros.hpp" 42 #if INCLUDE_ALL_GCS 43 #include "gc/g1/g1CollectedHeap.inline.hpp" 44 #include "gc/g1/g1SATBCardTableModRefBS.hpp" 45 #include "gc/g1/heapRegion.hpp" 46 #endif // INCLUDE_ALL_GCS 47 48 #ifdef PRODUCT 49 #define BLOCK_COMMENT(str) /* nothing */ 50 #define STOP(error) stop(error) 51 #else 52 #define BLOCK_COMMENT(str) block_comment(str) 53 #define STOP(error) block_comment(error); stop(error) 54 #endif 55 56 // Convert the raw encoding form into the form expected by the 57 // constructor for Address. 58 Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) { 59 assert(scale == 0, "not supported"); 60 RelocationHolder rspec; 61 if (disp_reloc != relocInfo::none) { 62 rspec = Relocation::spec_simple(disp_reloc); 63 } 64 65 Register rindex = as_Register(index); 66 if (rindex != G0) { 67 Address madr(as_Register(base), rindex); 68 madr._rspec = rspec; 69 return madr; 70 } else { 71 Address madr(as_Register(base), disp); 72 madr._rspec = rspec; 73 return madr; 74 } 75 } 76 77 Address Argument::address_in_frame() const { 78 // Warning: In LP64 mode disp will occupy more than 10 bits, but 79 // op codes such as ld or ldx, only access disp() to get 80 // their simm13 argument. 81 int disp = ((_number - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS; 82 if (is_in()) 83 return Address(FP, disp); // In argument. 84 else 85 return Address(SP, disp); // Out argument. 86 } 87 88 static const char* argumentNames[][2] = { 89 {"A0","P0"}, {"A1","P1"}, {"A2","P2"}, {"A3","P3"}, {"A4","P4"}, 90 {"A5","P5"}, {"A6","P6"}, {"A7","P7"}, {"A8","P8"}, {"A9","P9"}, 91 {"A(n>9)","P(n>9)"} 92 }; 93 94 const char* Argument::name() const { 95 int nofArgs = sizeof argumentNames / sizeof argumentNames[0]; 96 int num = number(); 97 if (num >= nofArgs) num = nofArgs - 1; 98 return argumentNames[num][is_in() ? 1 : 0]; 99 } 100 101 #ifdef ASSERT 102 // On RISC, there's no benefit to verifying instruction boundaries. 103 bool AbstractAssembler::pd_check_instruction_mark() { return false; } 104 #endif 105 106 // Patch instruction inst at offset inst_pos to refer to dest_pos 107 // and return the resulting instruction. 108 // We should have pcs, not offsets, but since all is relative, it will work out 109 // OK. 110 int MacroAssembler::patched_branch(int dest_pos, int inst, int inst_pos) { 111 int m; // mask for displacement field 112 int v; // new value for displacement field 113 const int word_aligned_ones = -4; 114 switch (inv_op(inst)) { 115 default: ShouldNotReachHere(); 116 case call_op: m = wdisp(word_aligned_ones, 0, 30); v = wdisp(dest_pos, inst_pos, 30); break; 117 case branch_op: 118 switch (inv_op2(inst)) { 119 case fbp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; 120 case bp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; 121 case fb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; 122 case br_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; 123 case bpr_op2: { 124 if (is_cbcond(inst)) { 125 m = wdisp10(word_aligned_ones, 0); 126 v = wdisp10(dest_pos, inst_pos); 127 } else { 128 m = wdisp16(word_aligned_ones, 0); 129 v = wdisp16(dest_pos, inst_pos); 130 } 131 break; 132 } 133 default: ShouldNotReachHere(); 134 } 135 } 136 return inst & ~m | v; 137 } 138 139 // Return the offset of the branch destionation of instruction inst 140 // at offset pos. 141 // Should have pcs, but since all is relative, it works out. 142 int MacroAssembler::branch_destination(int inst, int pos) { 143 int r; 144 switch (inv_op(inst)) { 145 default: ShouldNotReachHere(); 146 case call_op: r = inv_wdisp(inst, pos, 30); break; 147 case branch_op: 148 switch (inv_op2(inst)) { 149 case fbp_op2: r = inv_wdisp( inst, pos, 19); break; 150 case bp_op2: r = inv_wdisp( inst, pos, 19); break; 151 case fb_op2: r = inv_wdisp( inst, pos, 22); break; 152 case br_op2: r = inv_wdisp( inst, pos, 22); break; 153 case bpr_op2: { 154 if (is_cbcond(inst)) { 155 r = inv_wdisp10(inst, pos); 156 } else { 157 r = inv_wdisp16(inst, pos); 158 } 159 break; 160 } 161 default: ShouldNotReachHere(); 162 } 163 } 164 return r; 165 } 166 167 void MacroAssembler::null_check(Register reg, int offset) { 168 if (needs_explicit_null_check((intptr_t)offset)) { 169 // provoke OS NULL exception if reg = NULL by 170 // accessing M[reg] w/o changing any registers 171 ld_ptr(reg, 0, G0); 172 } 173 else { 174 // nothing to do, (later) access of M[reg + offset] 175 // will provoke OS NULL exception if reg = NULL 176 } 177 } 178 179 // Ring buffer jumps 180 181 #ifndef PRODUCT 182 void MacroAssembler::ret( bool trace ) { if (trace) { 183 mov(I7, O7); // traceable register 184 JMP(O7, 2 * BytesPerInstWord); 185 } else { 186 jmpl( I7, 2 * BytesPerInstWord, G0 ); 187 } 188 } 189 190 void MacroAssembler::retl( bool trace ) { if (trace) JMP(O7, 2 * BytesPerInstWord); 191 else jmpl( O7, 2 * BytesPerInstWord, G0 ); } 192 #endif /* PRODUCT */ 193 194 195 void MacroAssembler::jmp2(Register r1, Register r2, const char* file, int line ) { 196 assert_not_delayed(); 197 // This can only be traceable if r1 & r2 are visible after a window save 198 if (TraceJumps) { 199 #ifndef PRODUCT 200 save_frame(0); 201 verify_thread(); 202 ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0); 203 add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1); 204 sll(O0, exact_log2(4*sizeof(intptr_t)), O2); 205 add(O2, O1, O1); 206 207 add(r1->after_save(), r2->after_save(), O2); 208 set((intptr_t)file, O3); 209 set(line, O4); 210 Label L; 211 // get nearby pc, store jmp target 212 call(L, relocInfo::none); // No relocation for call to pc+0x8 213 delayed()->st(O2, O1, 0); 214 bind(L); 215 216 // store nearby pc 217 st(O7, O1, sizeof(intptr_t)); 218 // store file 219 st(O3, O1, 2*sizeof(intptr_t)); 220 // store line 221 st(O4, O1, 3*sizeof(intptr_t)); 222 add(O0, 1, O0); 223 and3(O0, JavaThread::jump_ring_buffer_size - 1, O0); 224 st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset())); 225 restore(); 226 #endif /* PRODUCT */ 227 } 228 jmpl(r1, r2, G0); 229 } 230 void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) { 231 assert_not_delayed(); 232 // This can only be traceable if r1 is visible after a window save 233 if (TraceJumps) { 234 #ifndef PRODUCT 235 save_frame(0); 236 verify_thread(); 237 ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0); 238 add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1); 239 sll(O0, exact_log2(4*sizeof(intptr_t)), O2); 240 add(O2, O1, O1); 241 242 add(r1->after_save(), offset, O2); 243 set((intptr_t)file, O3); 244 set(line, O4); 245 Label L; 246 // get nearby pc, store jmp target 247 call(L, relocInfo::none); // No relocation for call to pc+0x8 248 delayed()->st(O2, O1, 0); 249 bind(L); 250 251 // store nearby pc 252 st(O7, O1, sizeof(intptr_t)); 253 // store file 254 st(O3, O1, 2*sizeof(intptr_t)); 255 // store line 256 st(O4, O1, 3*sizeof(intptr_t)); 257 add(O0, 1, O0); 258 and3(O0, JavaThread::jump_ring_buffer_size - 1, O0); 259 st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset())); 260 restore(); 261 #endif /* PRODUCT */ 262 } 263 jmp(r1, offset); 264 } 265 266 // This code sequence is relocatable to any address, even on LP64. 267 void MacroAssembler::jumpl(const AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line) { 268 assert_not_delayed(); 269 // Force fixed length sethi because NativeJump and NativeFarCall don't handle 270 // variable length instruction streams. 271 patchable_sethi(addrlit, temp); 272 Address a(temp, addrlit.low10() + offset); // Add the offset to the displacement. 273 if (TraceJumps) { 274 #ifndef PRODUCT 275 // Must do the add here so relocation can find the remainder of the 276 // value to be relocated. 277 add(a.base(), a.disp(), a.base(), addrlit.rspec(offset)); 278 save_frame(0); 279 verify_thread(); 280 ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0); 281 add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1); 282 sll(O0, exact_log2(4*sizeof(intptr_t)), O2); 283 add(O2, O1, O1); 284 285 set((intptr_t)file, O3); 286 set(line, O4); 287 Label L; 288 289 // get nearby pc, store jmp target 290 call(L, relocInfo::none); // No relocation for call to pc+0x8 291 delayed()->st(a.base()->after_save(), O1, 0); 292 bind(L); 293 294 // store nearby pc 295 st(O7, O1, sizeof(intptr_t)); 296 // store file 297 st(O3, O1, 2*sizeof(intptr_t)); 298 // store line 299 st(O4, O1, 3*sizeof(intptr_t)); 300 add(O0, 1, O0); 301 and3(O0, JavaThread::jump_ring_buffer_size - 1, O0); 302 st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset())); 303 restore(); 304 jmpl(a.base(), G0, d); 305 #else 306 jmpl(a.base(), a.disp(), d); 307 #endif /* PRODUCT */ 308 } else { 309 jmpl(a.base(), a.disp(), d); 310 } 311 } 312 313 void MacroAssembler::jump(const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line) { 314 jumpl(addrlit, temp, G0, offset, file, line); 315 } 316 317 318 // Conditional breakpoint (for assertion checks in assembly code) 319 void MacroAssembler::breakpoint_trap(Condition c, CC cc) { 320 trap(c, cc, G0, ST_RESERVED_FOR_USER_0); 321 } 322 323 // We want to use ST_BREAKPOINT here, but the debugger is confused by it. 324 void MacroAssembler::breakpoint_trap() { 325 trap(ST_RESERVED_FOR_USER_0); 326 } 327 328 // Write serialization page so VM thread can do a pseudo remote membar 329 // We use the current thread pointer to calculate a thread specific 330 // offset to write to within the page. This minimizes bus traffic 331 // due to cache line collision. 332 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) { 333 srl(thread, os::get_serialize_page_shift_count(), tmp2); 334 if (Assembler::is_simm13(os::vm_page_size())) { 335 and3(tmp2, (os::vm_page_size() - sizeof(int)), tmp2); 336 } 337 else { 338 set((os::vm_page_size() - sizeof(int)), tmp1); 339 and3(tmp2, tmp1, tmp2); 340 } 341 set(os::get_memory_serialize_page(), tmp1); 342 st(G0, tmp1, tmp2); 343 } 344 345 346 347 void MacroAssembler::enter() { 348 Unimplemented(); 349 } 350 351 void MacroAssembler::leave() { 352 Unimplemented(); 353 } 354 355 // Calls to C land 356 357 #ifdef ASSERT 358 // a hook for debugging 359 static Thread* reinitialize_thread() { 360 return ThreadLocalStorage::thread(); 361 } 362 #else 363 #define reinitialize_thread ThreadLocalStorage::thread 364 #endif 365 366 #ifdef ASSERT 367 address last_get_thread = NULL; 368 #endif 369 370 // call this when G2_thread is not known to be valid 371 void MacroAssembler::get_thread() { 372 save_frame(0); // to avoid clobbering O0 373 mov(G1, L0); // avoid clobbering G1 374 mov(G5_method, L1); // avoid clobbering G5 375 mov(G3, L2); // avoid clobbering G3 also 376 mov(G4, L5); // avoid clobbering G4 377 #ifdef ASSERT 378 AddressLiteral last_get_thread_addrlit(&last_get_thread); 379 set(last_get_thread_addrlit, L3); 380 rdpc(L4); 381 inc(L4, 3 * BytesPerInstWord); // skip rdpc + inc + st_ptr to point L4 at call st_ptr(L4, L3, 0); 382 #endif 383 call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type); 384 delayed()->nop(); 385 mov(L0, G1); 386 mov(L1, G5_method); 387 mov(L2, G3); 388 mov(L5, G4); 389 restore(O0, 0, G2_thread); 390 } 391 392 static Thread* verify_thread_subroutine(Thread* gthread_value) { 393 Thread* correct_value = ThreadLocalStorage::thread(); 394 guarantee(gthread_value == correct_value, "G2_thread value must be the thread"); 395 return correct_value; 396 } 397 398 void MacroAssembler::verify_thread() { 399 if (VerifyThread) { 400 // NOTE: this chops off the heads of the 64-bit O registers. 401 #ifdef CC_INTERP 402 save_frame(0); 403 #else 404 // make sure G2_thread contains the right value 405 save_frame_and_mov(0, Lmethod, Lmethod); // to avoid clobbering O0 (and propagate Lmethod for -Xprof) 406 mov(G1, L1); // avoid clobbering G1 407 // G2 saved below 408 mov(G3, L3); // avoid clobbering G3 409 mov(G4, L4); // avoid clobbering G4 410 mov(G5_method, L5); // avoid clobbering G5_method 411 #endif /* CC_INTERP */ 412 #if defined(COMPILER2) && !defined(_LP64) 413 // Save & restore possible 64-bit Long arguments in G-regs 414 srlx(G1,32,L0); 415 srlx(G4,32,L6); 416 #endif 417 call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type); 418 delayed()->mov(G2_thread, O0); 419 420 mov(L1, G1); // Restore G1 421 // G2 restored below 422 mov(L3, G3); // restore G3 423 mov(L4, G4); // restore G4 424 mov(L5, G5_method); // restore G5_method 425 #if defined(COMPILER2) && !defined(_LP64) 426 // Save & restore possible 64-bit Long arguments in G-regs 427 sllx(L0,32,G2); // Move old high G1 bits high in G2 428 srl(G1, 0,G1); // Clear current high G1 bits 429 or3 (G1,G2,G1); // Recover 64-bit G1 430 sllx(L6,32,G2); // Move old high G4 bits high in G2 431 srl(G4, 0,G4); // Clear current high G4 bits 432 or3 (G4,G2,G4); // Recover 64-bit G4 433 #endif 434 restore(O0, 0, G2_thread); 435 } 436 } 437 438 439 void MacroAssembler::save_thread(const Register thread_cache) { 440 verify_thread(); 441 if (thread_cache->is_valid()) { 442 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); 443 mov(G2_thread, thread_cache); 444 } 445 if (VerifyThread) { 446 // smash G2_thread, as if the VM were about to anyway 447 set(0x67676767, G2_thread); 448 } 449 } 450 451 452 void MacroAssembler::restore_thread(const Register thread_cache) { 453 if (thread_cache->is_valid()) { 454 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); 455 mov(thread_cache, G2_thread); 456 verify_thread(); 457 } else { 458 // do it the slow way 459 get_thread(); 460 } 461 } 462 463 464 // %%% maybe get rid of [re]set_last_Java_frame 465 void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Java_pc) { 466 assert_not_delayed(); 467 Address flags(G2_thread, JavaThread::frame_anchor_offset() + 468 JavaFrameAnchor::flags_offset()); 469 Address pc_addr(G2_thread, JavaThread::last_Java_pc_offset()); 470 471 // Always set last_Java_pc and flags first because once last_Java_sp is visible 472 // has_last_Java_frame is true and users will look at the rest of the fields. 473 // (Note: flags should always be zero before we get here so doesn't need to be set.) 474 475 #ifdef ASSERT 476 // Verify that flags was zeroed on return to Java 477 Label PcOk; 478 save_frame(0); // to avoid clobbering O0 479 ld_ptr(pc_addr, L0); 480 br_null_short(L0, Assembler::pt, PcOk); 481 STOP("last_Java_pc not zeroed before leaving Java"); 482 bind(PcOk); 483 484 // Verify that flags was zeroed on return to Java 485 Label FlagsOk; 486 ld(flags, L0); 487 tst(L0); 488 br(Assembler::zero, false, Assembler::pt, FlagsOk); 489 delayed() -> restore(); 490 STOP("flags not zeroed before leaving Java"); 491 bind(FlagsOk); 492 #endif /* ASSERT */ 493 // 494 // When returning from calling out from Java mode the frame anchor's last_Java_pc 495 // will always be set to NULL. It is set here so that if we are doing a call to 496 // native (not VM) that we capture the known pc and don't have to rely on the 497 // native call having a standard frame linkage where we can find the pc. 498 499 if (last_Java_pc->is_valid()) { 500 st_ptr(last_Java_pc, pc_addr); 501 } 502 503 #ifdef _LP64 504 #ifdef ASSERT 505 // Make sure that we have an odd stack 506 Label StackOk; 507 andcc(last_java_sp, 0x01, G0); 508 br(Assembler::notZero, false, Assembler::pt, StackOk); 509 delayed()->nop(); 510 STOP("Stack Not Biased in set_last_Java_frame"); 511 bind(StackOk); 512 #endif // ASSERT 513 assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame"); 514 add( last_java_sp, STACK_BIAS, G4_scratch ); 515 st_ptr(G4_scratch, G2_thread, JavaThread::last_Java_sp_offset()); 516 #else 517 st_ptr(last_java_sp, G2_thread, JavaThread::last_Java_sp_offset()); 518 #endif // _LP64 519 } 520 521 void MacroAssembler::reset_last_Java_frame(void) { 522 assert_not_delayed(); 523 524 Address sp_addr(G2_thread, JavaThread::last_Java_sp_offset()); 525 Address pc_addr(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); 526 Address flags (G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); 527 528 #ifdef ASSERT 529 // check that it WAS previously set 530 #ifdef CC_INTERP 531 save_frame(0); 532 #else 533 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod to helper frame for -Xprof 534 #endif /* CC_INTERP */ 535 ld_ptr(sp_addr, L0); 536 tst(L0); 537 breakpoint_trap(Assembler::zero, Assembler::ptr_cc); 538 restore(); 539 #endif // ASSERT 540 541 st_ptr(G0, sp_addr); 542 // Always return last_Java_pc to zero 543 st_ptr(G0, pc_addr); 544 // Always null flags after return to Java 545 st(G0, flags); 546 } 547 548 549 void MacroAssembler::call_VM_base( 550 Register oop_result, 551 Register thread_cache, 552 Register last_java_sp, 553 address entry_point, 554 int number_of_arguments, 555 bool check_exceptions) 556 { 557 assert_not_delayed(); 558 559 // determine last_java_sp register 560 if (!last_java_sp->is_valid()) { 561 last_java_sp = SP; 562 } 563 // debugging support 564 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 565 566 // 64-bit last_java_sp is biased! 567 set_last_Java_frame(last_java_sp, noreg); 568 if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early 569 save_thread(thread_cache); 570 // do the call 571 call(entry_point, relocInfo::runtime_call_type); 572 if (!VerifyThread) 573 delayed()->mov(G2_thread, O0); // pass thread as first argument 574 else 575 delayed()->nop(); // (thread already passed) 576 restore_thread(thread_cache); 577 reset_last_Java_frame(); 578 579 // check for pending exceptions. use Gtemp as scratch register. 580 if (check_exceptions) { 581 check_and_forward_exception(Gtemp); 582 } 583 584 #ifdef ASSERT 585 set(badHeapWordVal, G3); 586 set(badHeapWordVal, G4); 587 set(badHeapWordVal, G5); 588 #endif 589 590 // get oop result if there is one and reset the value in the thread 591 if (oop_result->is_valid()) { 592 get_vm_result(oop_result); 593 } 594 } 595 596 void MacroAssembler::check_and_forward_exception(Register scratch_reg) 597 { 598 Label L; 599 600 check_and_handle_popframe(scratch_reg); 601 check_and_handle_earlyret(scratch_reg); 602 603 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 604 ld_ptr(exception_addr, scratch_reg); 605 br_null_short(scratch_reg, pt, L); 606 // we use O7 linkage so that forward_exception_entry has the issuing PC 607 call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 608 delayed()->nop(); 609 bind(L); 610 } 611 612 613 void MacroAssembler::check_and_handle_popframe(Register scratch_reg) { 614 } 615 616 617 void MacroAssembler::check_and_handle_earlyret(Register scratch_reg) { 618 } 619 620 621 void MacroAssembler::call_VM(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 622 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); 623 } 624 625 626 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) { 627 // O0 is reserved for the thread 628 mov(arg_1, O1); 629 call_VM(oop_result, entry_point, 1, check_exceptions); 630 } 631 632 633 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 634 // O0 is reserved for the thread 635 mov(arg_1, O1); 636 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 637 call_VM(oop_result, entry_point, 2, check_exceptions); 638 } 639 640 641 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { 642 // O0 is reserved for the thread 643 mov(arg_1, O1); 644 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 645 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); 646 call_VM(oop_result, entry_point, 3, check_exceptions); 647 } 648 649 650 651 // Note: The following call_VM overloadings are useful when a "save" 652 // has already been performed by a stub, and the last Java frame is 653 // the previous one. In that case, last_java_sp must be passed as FP 654 // instead of SP. 655 656 657 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments, bool check_exceptions) { 658 call_VM_base(oop_result, noreg, last_java_sp, entry_point, number_of_arguments, check_exceptions); 659 } 660 661 662 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) { 663 // O0 is reserved for the thread 664 mov(arg_1, O1); 665 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 666 } 667 668 669 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 670 // O0 is reserved for the thread 671 mov(arg_1, O1); 672 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 673 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 674 } 675 676 677 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { 678 // O0 is reserved for the thread 679 mov(arg_1, O1); 680 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 681 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); 682 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 683 } 684 685 686 687 void MacroAssembler::call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments) { 688 assert_not_delayed(); 689 save_thread(thread_cache); 690 // do the call 691 call(entry_point, relocInfo::runtime_call_type); 692 delayed()->nop(); 693 restore_thread(thread_cache); 694 #ifdef ASSERT 695 set(badHeapWordVal, G3); 696 set(badHeapWordVal, G4); 697 set(badHeapWordVal, G5); 698 #endif 699 } 700 701 702 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments) { 703 call_VM_leaf_base(thread_cache, entry_point, number_of_arguments); 704 } 705 706 707 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1) { 708 mov(arg_1, O0); 709 call_VM_leaf(thread_cache, entry_point, 1); 710 } 711 712 713 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) { 714 mov(arg_1, O0); 715 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); 716 call_VM_leaf(thread_cache, entry_point, 2); 717 } 718 719 720 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3) { 721 mov(arg_1, O0); 722 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); 723 mov(arg_3, O2); assert(arg_3 != O0 && arg_3 != O1, "smashed argument"); 724 call_VM_leaf(thread_cache, entry_point, 3); 725 } 726 727 728 void MacroAssembler::get_vm_result(Register oop_result) { 729 verify_thread(); 730 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 731 ld_ptr( vm_result_addr, oop_result); 732 st_ptr(G0, vm_result_addr); 733 verify_oop(oop_result); 734 } 735 736 737 void MacroAssembler::get_vm_result_2(Register metadata_result) { 738 verify_thread(); 739 Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset()); 740 ld_ptr(vm_result_addr_2, metadata_result); 741 st_ptr(G0, vm_result_addr_2); 742 } 743 744 745 // We require that C code which does not return a value in vm_result will 746 // leave it undisturbed. 747 void MacroAssembler::set_vm_result(Register oop_result) { 748 verify_thread(); 749 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 750 verify_oop(oop_result); 751 752 # ifdef ASSERT 753 // Check that we are not overwriting any other oop. 754 #ifdef CC_INTERP 755 save_frame(0); 756 #else 757 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod for -Xprof 758 #endif /* CC_INTERP */ 759 ld_ptr(vm_result_addr, L0); 760 tst(L0); 761 restore(); 762 breakpoint_trap(notZero, Assembler::ptr_cc); 763 // } 764 # endif 765 766 st_ptr(oop_result, vm_result_addr); 767 } 768 769 770 void MacroAssembler::ic_call(address entry, bool emit_delay, jint method_index) { 771 RelocationHolder rspec = virtual_call_Relocation::spec(pc(), method_index); 772 patchable_set((intptr_t)Universe::non_oop_word(), G5_inline_cache_reg); 773 relocate(rspec); 774 call(entry, relocInfo::none); 775 if (emit_delay) { 776 delayed()->nop(); 777 } 778 } 779 780 void MacroAssembler::card_table_write(jbyte* byte_map_base, 781 Register tmp, Register obj) { 782 #ifdef _LP64 783 srlx(obj, CardTableModRefBS::card_shift, obj); 784 #else 785 srl(obj, CardTableModRefBS::card_shift, obj); 786 #endif 787 assert(tmp != obj, "need separate temp reg"); 788 set((address) byte_map_base, tmp); 789 stb(G0, tmp, obj); 790 } 791 792 793 void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { 794 address save_pc; 795 int shiftcnt; 796 #ifdef _LP64 797 # ifdef CHECK_DELAY 798 assert_not_delayed((char*) "cannot put two instructions in delay slot"); 799 # endif 800 v9_dep(); 801 save_pc = pc(); 802 803 int msb32 = (int) (addrlit.value() >> 32); 804 int lsb32 = (int) (addrlit.value()); 805 806 if (msb32 == 0 && lsb32 >= 0) { 807 Assembler::sethi(lsb32, d, addrlit.rspec()); 808 } 809 else if (msb32 == -1) { 810 Assembler::sethi(~lsb32, d, addrlit.rspec()); 811 xor3(d, ~low10(~0), d); 812 } 813 else { 814 Assembler::sethi(msb32, d, addrlit.rspec()); // msb 22-bits 815 if (msb32 & 0x3ff) // Any bits? 816 or3(d, msb32 & 0x3ff, d); // msb 32-bits are now in lsb 32 817 if (lsb32 & 0xFFFFFC00) { // done? 818 if ((lsb32 >> 20) & 0xfff) { // Any bits set? 819 sllx(d, 12, d); // Make room for next 12 bits 820 or3(d, (lsb32 >> 20) & 0xfff, d); // Or in next 12 821 shiftcnt = 0; // We already shifted 822 } 823 else 824 shiftcnt = 12; 825 if ((lsb32 >> 10) & 0x3ff) { 826 sllx(d, shiftcnt + 10, d); // Make room for last 10 bits 827 or3(d, (lsb32 >> 10) & 0x3ff, d); // Or in next 10 828 shiftcnt = 0; 829 } 830 else 831 shiftcnt = 10; 832 sllx(d, shiftcnt + 10, d); // Shift leaving disp field 0'd 833 } 834 else 835 sllx(d, 32, d); 836 } 837 // Pad out the instruction sequence so it can be patched later. 838 if (ForceRelocatable || (addrlit.rtype() != relocInfo::none && 839 addrlit.rtype() != relocInfo::runtime_call_type)) { 840 while (pc() < (save_pc + (7 * BytesPerInstWord))) 841 nop(); 842 } 843 #else 844 Assembler::sethi(addrlit.value(), d, addrlit.rspec()); 845 #endif 846 } 847 848 849 void MacroAssembler::sethi(const AddressLiteral& addrlit, Register d) { 850 internal_sethi(addrlit, d, false); 851 } 852 853 854 void MacroAssembler::patchable_sethi(const AddressLiteral& addrlit, Register d) { 855 internal_sethi(addrlit, d, true); 856 } 857 858 859 int MacroAssembler::insts_for_sethi(address a, bool worst_case) { 860 #ifdef _LP64 861 if (worst_case) return 7; 862 intptr_t iaddr = (intptr_t) a; 863 int msb32 = (int) (iaddr >> 32); 864 int lsb32 = (int) (iaddr); 865 int count; 866 if (msb32 == 0 && lsb32 >= 0) 867 count = 1; 868 else if (msb32 == -1) 869 count = 2; 870 else { 871 count = 2; 872 if (msb32 & 0x3ff) 873 count++; 874 if (lsb32 & 0xFFFFFC00 ) { 875 if ((lsb32 >> 20) & 0xfff) count += 2; 876 if ((lsb32 >> 10) & 0x3ff) count += 2; 877 } 878 } 879 return count; 880 #else 881 return 1; 882 #endif 883 } 884 885 int MacroAssembler::worst_case_insts_for_set() { 886 return insts_for_sethi(NULL, true) + 1; 887 } 888 889 890 // Keep in sync with MacroAssembler::insts_for_internal_set 891 void MacroAssembler::internal_set(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { 892 intptr_t value = addrlit.value(); 893 894 if (!ForceRelocatable && addrlit.rspec().type() == relocInfo::none) { 895 // can optimize 896 if (-4096 <= value && value <= 4095) { 897 or3(G0, value, d); // setsw (this leaves upper 32 bits sign-extended) 898 return; 899 } 900 if (inv_hi22(hi22(value)) == value) { 901 sethi(addrlit, d); 902 return; 903 } 904 } 905 assert_not_delayed((char*) "cannot put two instructions in delay slot"); 906 internal_sethi(addrlit, d, ForceRelocatable); 907 if (ForceRelocatable || addrlit.rspec().type() != relocInfo::none || addrlit.low10() != 0) { 908 add(d, addrlit.low10(), d, addrlit.rspec()); 909 } 910 } 911 912 // Keep in sync with MacroAssembler::internal_set 913 int MacroAssembler::insts_for_internal_set(intptr_t value) { 914 // can optimize 915 if (-4096 <= value && value <= 4095) { 916 return 1; 917 } 918 if (inv_hi22(hi22(value)) == value) { 919 return insts_for_sethi((address) value); 920 } 921 int count = insts_for_sethi((address) value); 922 AddressLiteral al(value); 923 if (al.low10() != 0) { 924 count++; 925 } 926 return count; 927 } 928 929 void MacroAssembler::set(const AddressLiteral& al, Register d) { 930 internal_set(al, d, false); 931 } 932 933 void MacroAssembler::set(intptr_t value, Register d) { 934 AddressLiteral al(value); 935 internal_set(al, d, false); 936 } 937 938 void MacroAssembler::set(address addr, Register d, RelocationHolder const& rspec) { 939 AddressLiteral al(addr, rspec); 940 internal_set(al, d, false); 941 } 942 943 void MacroAssembler::patchable_set(const AddressLiteral& al, Register d) { 944 internal_set(al, d, true); 945 } 946 947 void MacroAssembler::patchable_set(intptr_t value, Register d) { 948 AddressLiteral al(value); 949 internal_set(al, d, true); 950 } 951 952 953 void MacroAssembler::set64(jlong value, Register d, Register tmp) { 954 assert_not_delayed(); 955 v9_dep(); 956 957 int hi = (int)(value >> 32); 958 int lo = (int)(value & ~0); 959 int bits_33to2 = (int)((value >> 2) & ~0); 960 // (Matcher::isSimpleConstant64 knows about the following optimizations.) 961 if (Assembler::is_simm13(lo) && value == lo) { 962 or3(G0, lo, d); 963 } else if (hi == 0) { 964 Assembler::sethi(lo, d); // hardware version zero-extends to upper 32 965 if (low10(lo) != 0) 966 or3(d, low10(lo), d); 967 } 968 else if ((hi >> 2) == 0) { 969 Assembler::sethi(bits_33to2, d); // hardware version zero-extends to upper 32 970 sllx(d, 2, d); 971 if (low12(lo) != 0) 972 or3(d, low12(lo), d); 973 } 974 else if (hi == -1) { 975 Assembler::sethi(~lo, d); // hardware version zero-extends to upper 32 976 xor3(d, low10(lo) ^ ~low10(~0), d); 977 } 978 else if (lo == 0) { 979 if (Assembler::is_simm13(hi)) { 980 or3(G0, hi, d); 981 } else { 982 Assembler::sethi(hi, d); // hardware version zero-extends to upper 32 983 if (low10(hi) != 0) 984 or3(d, low10(hi), d); 985 } 986 sllx(d, 32, d); 987 } 988 else { 989 Assembler::sethi(hi, tmp); 990 Assembler::sethi(lo, d); // macro assembler version sign-extends 991 if (low10(hi) != 0) 992 or3 (tmp, low10(hi), tmp); 993 if (low10(lo) != 0) 994 or3 ( d, low10(lo), d); 995 sllx(tmp, 32, tmp); 996 or3 (d, tmp, d); 997 } 998 } 999 1000 int MacroAssembler::insts_for_set64(jlong value) { 1001 v9_dep(); 1002 1003 int hi = (int) (value >> 32); 1004 int lo = (int) (value & ~0); 1005 int count = 0; 1006 1007 // (Matcher::isSimpleConstant64 knows about the following optimizations.) 1008 if (Assembler::is_simm13(lo) && value == lo) { 1009 count++; 1010 } else if (hi == 0) { 1011 count++; 1012 if (low10(lo) != 0) 1013 count++; 1014 } 1015 else if (hi == -1) { 1016 count += 2; 1017 } 1018 else if (lo == 0) { 1019 if (Assembler::is_simm13(hi)) { 1020 count++; 1021 } else { 1022 count++; 1023 if (low10(hi) != 0) 1024 count++; 1025 } 1026 count++; 1027 } 1028 else { 1029 count += 2; 1030 if (low10(hi) != 0) 1031 count++; 1032 if (low10(lo) != 0) 1033 count++; 1034 count += 2; 1035 } 1036 return count; 1037 } 1038 1039 // compute size in bytes of sparc frame, given 1040 // number of extraWords 1041 int MacroAssembler::total_frame_size_in_bytes(int extraWords) { 1042 1043 int nWords = frame::memory_parameter_word_sp_offset; 1044 1045 nWords += extraWords; 1046 1047 if (nWords & 1) ++nWords; // round up to double-word 1048 1049 return nWords * BytesPerWord; 1050 } 1051 1052 1053 // save_frame: given number of "extra" words in frame, 1054 // issue approp. save instruction (p 200, v8 manual) 1055 1056 void MacroAssembler::save_frame(int extraWords) { 1057 int delta = -total_frame_size_in_bytes(extraWords); 1058 if (is_simm13(delta)) { 1059 save(SP, delta, SP); 1060 } else { 1061 set(delta, G3_scratch); 1062 save(SP, G3_scratch, SP); 1063 } 1064 } 1065 1066 1067 void MacroAssembler::save_frame_c1(int size_in_bytes) { 1068 if (is_simm13(-size_in_bytes)) { 1069 save(SP, -size_in_bytes, SP); 1070 } else { 1071 set(-size_in_bytes, G3_scratch); 1072 save(SP, G3_scratch, SP); 1073 } 1074 } 1075 1076 1077 void MacroAssembler::save_frame_and_mov(int extraWords, 1078 Register s1, Register d1, 1079 Register s2, Register d2) { 1080 assert_not_delayed(); 1081 1082 // The trick here is to use precisely the same memory word 1083 // that trap handlers also use to save the register. 1084 // This word cannot be used for any other purpose, but 1085 // it works fine to save the register's value, whether or not 1086 // an interrupt flushes register windows at any given moment! 1087 Address s1_addr; 1088 if (s1->is_valid() && (s1->is_in() || s1->is_local())) { 1089 s1_addr = s1->address_in_saved_window(); 1090 st_ptr(s1, s1_addr); 1091 } 1092 1093 Address s2_addr; 1094 if (s2->is_valid() && (s2->is_in() || s2->is_local())) { 1095 s2_addr = s2->address_in_saved_window(); 1096 st_ptr(s2, s2_addr); 1097 } 1098 1099 save_frame(extraWords); 1100 1101 if (s1_addr.base() == SP) { 1102 ld_ptr(s1_addr.after_save(), d1); 1103 } else if (s1->is_valid()) { 1104 mov(s1->after_save(), d1); 1105 } 1106 1107 if (s2_addr.base() == SP) { 1108 ld_ptr(s2_addr.after_save(), d2); 1109 } else if (s2->is_valid()) { 1110 mov(s2->after_save(), d2); 1111 } 1112 } 1113 1114 1115 AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) { 1116 assert(oop_recorder() != NULL, "this assembler needs a Recorder"); 1117 int index = oop_recorder()->allocate_metadata_index(obj); 1118 RelocationHolder rspec = metadata_Relocation::spec(index); 1119 return AddressLiteral((address)obj, rspec); 1120 } 1121 1122 AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) { 1123 assert(oop_recorder() != NULL, "this assembler needs a Recorder"); 1124 int index = oop_recorder()->find_index(obj); 1125 RelocationHolder rspec = metadata_Relocation::spec(index); 1126 return AddressLiteral((address)obj, rspec); 1127 } 1128 1129 1130 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) { 1131 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1132 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop"); 1133 int oop_index = oop_recorder()->find_index(obj); 1134 return AddressLiteral(obj, oop_Relocation::spec(oop_index)); 1135 } 1136 1137 void MacroAssembler::set_narrow_oop(jobject obj, Register d) { 1138 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1139 int oop_index = oop_recorder()->find_index(obj); 1140 RelocationHolder rspec = oop_Relocation::spec(oop_index); 1141 1142 assert_not_delayed(); 1143 // Relocation with special format (see relocInfo_sparc.hpp). 1144 relocate(rspec, 1); 1145 // Assembler::sethi(0x3fffff, d); 1146 emit_int32( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(0x3fffff) ); 1147 // Don't add relocation for 'add'. Do patching during 'sethi' processing. 1148 add(d, 0x3ff, d); 1149 1150 } 1151 1152 void MacroAssembler::set_narrow_klass(Klass* k, Register d) { 1153 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1154 int klass_index = oop_recorder()->find_index(k); 1155 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 1156 narrowOop encoded_k = Klass::encode_klass(k); 1157 1158 assert_not_delayed(); 1159 // Relocation with special format (see relocInfo_sparc.hpp). 1160 relocate(rspec, 1); 1161 // Assembler::sethi(encoded_k, d); 1162 emit_int32( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(encoded_k) ); 1163 // Don't add relocation for 'add'. Do patching during 'sethi' processing. 1164 add(d, low10(encoded_k), d); 1165 1166 } 1167 1168 void MacroAssembler::align(int modulus) { 1169 while (offset() % modulus != 0) nop(); 1170 } 1171 1172 void RegistersForDebugging::print(outputStream* s) { 1173 FlagSetting fs(Debugging, true); 1174 int j; 1175 for (j = 0; j < 8; ++j) { 1176 if (j != 6) { s->print("i%d = ", j); os::print_location(s, i[j]); } 1177 else { s->print( "fp = " ); os::print_location(s, i[j]); } 1178 } 1179 s->cr(); 1180 1181 for (j = 0; j < 8; ++j) { 1182 s->print("l%d = ", j); os::print_location(s, l[j]); 1183 } 1184 s->cr(); 1185 1186 for (j = 0; j < 8; ++j) { 1187 if (j != 6) { s->print("o%d = ", j); os::print_location(s, o[j]); } 1188 else { s->print( "sp = " ); os::print_location(s, o[j]); } 1189 } 1190 s->cr(); 1191 1192 for (j = 0; j < 8; ++j) { 1193 s->print("g%d = ", j); os::print_location(s, g[j]); 1194 } 1195 s->cr(); 1196 1197 // print out floats with compression 1198 for (j = 0; j < 32; ) { 1199 jfloat val = f[j]; 1200 int last = j; 1201 for ( ; last+1 < 32; ++last ) { 1202 char b1[1024], b2[1024]; 1203 sprintf(b1, "%f", val); 1204 sprintf(b2, "%f", f[last+1]); 1205 if (strcmp(b1, b2)) 1206 break; 1207 } 1208 s->print("f%d", j); 1209 if ( j != last ) s->print(" - f%d", last); 1210 s->print(" = %f", val); 1211 s->fill_to(25); 1212 s->print_cr(" (0x%x)", *(int*)&val); 1213 j = last + 1; 1214 } 1215 s->cr(); 1216 1217 // and doubles (evens only) 1218 for (j = 0; j < 32; ) { 1219 jdouble val = d[j]; 1220 int last = j; 1221 for ( ; last+1 < 32; ++last ) { 1222 char b1[1024], b2[1024]; 1223 sprintf(b1, "%f", val); 1224 sprintf(b2, "%f", d[last+1]); 1225 if (strcmp(b1, b2)) 1226 break; 1227 } 1228 s->print("d%d", 2 * j); 1229 if ( j != last ) s->print(" - d%d", last); 1230 s->print(" = %f", val); 1231 s->fill_to(30); 1232 s->print("(0x%x)", *(int*)&val); 1233 s->fill_to(42); 1234 s->print_cr("(0x%x)", *(1 + (int*)&val)); 1235 j = last + 1; 1236 } 1237 s->cr(); 1238 } 1239 1240 void RegistersForDebugging::save_registers(MacroAssembler* a) { 1241 a->sub(FP, round_to(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0); 1242 a->flushw(); 1243 int i; 1244 for (i = 0; i < 8; ++i) { 1245 a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, i_offset(i)); 1246 a->ld_ptr(as_lRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, l_offset(i)); 1247 a->st_ptr(as_oRegister(i)->after_save(), O0, o_offset(i)); 1248 a->st_ptr(as_gRegister(i)->after_save(), O0, g_offset(i)); 1249 } 1250 for (i = 0; i < 32; ++i) { 1251 a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i)); 1252 } 1253 for (i = 0; i < 64; i += 2) { 1254 a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i)); 1255 } 1256 } 1257 1258 void RegistersForDebugging::restore_registers(MacroAssembler* a, Register r) { 1259 for (int i = 1; i < 8; ++i) { 1260 a->ld_ptr(r, g_offset(i), as_gRegister(i)); 1261 } 1262 for (int j = 0; j < 32; ++j) { 1263 a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j)); 1264 } 1265 for (int k = 0; k < 64; k += 2) { 1266 a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k)); 1267 } 1268 } 1269 1270 1271 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack 1272 void MacroAssembler::push_fTOS() { 1273 // %%%%%% need to implement this 1274 } 1275 1276 // pops double TOS element from CPU stack and pushes on FPU stack 1277 void MacroAssembler::pop_fTOS() { 1278 // %%%%%% need to implement this 1279 } 1280 1281 void MacroAssembler::empty_FPU_stack() { 1282 // %%%%%% need to implement this 1283 } 1284 1285 void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * file, int line) { 1286 // plausibility check for oops 1287 if (!VerifyOops) return; 1288 1289 if (reg == G0) return; // always NULL, which is always an oop 1290 1291 BLOCK_COMMENT("verify_oop {"); 1292 char buffer[64]; 1293 #ifdef COMPILER1 1294 if (CommentedAssembly) { 1295 snprintf(buffer, sizeof(buffer), "verify_oop at %d", offset()); 1296 block_comment(buffer); 1297 } 1298 #endif 1299 1300 const char* real_msg = NULL; 1301 { 1302 ResourceMark rm; 1303 stringStream ss; 1304 ss.print("%s at offset %d (%s:%d)", msg, offset(), file, line); 1305 real_msg = code_string(ss.as_string()); 1306 } 1307 1308 // Call indirectly to solve generation ordering problem 1309 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address()); 1310 1311 // Make some space on stack above the current register window. 1312 // Enough to hold 8 64-bit registers. 1313 add(SP,-8*8,SP); 1314 1315 // Save some 64-bit registers; a normal 'save' chops the heads off 1316 // of 64-bit longs in the 32-bit build. 1317 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); 1318 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); 1319 mov(reg,O0); // Move arg into O0; arg might be in O7 which is about to be crushed 1320 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); 1321 1322 // Size of set() should stay the same 1323 patchable_set((intptr_t)real_msg, O1); 1324 // Load address to call to into O7 1325 load_ptr_contents(a, O7); 1326 // Register call to verify_oop_subroutine 1327 callr(O7, G0); 1328 delayed()->nop(); 1329 // recover frame size 1330 add(SP, 8*8,SP); 1331 BLOCK_COMMENT("} verify_oop"); 1332 } 1333 1334 void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char * file, int line) { 1335 // plausibility check for oops 1336 if (!VerifyOops) return; 1337 1338 const char* real_msg = NULL; 1339 { 1340 ResourceMark rm; 1341 stringStream ss; 1342 ss.print("%s at SP+%d (%s:%d)", msg, addr.disp(), file, line); 1343 real_msg = code_string(ss.as_string()); 1344 } 1345 1346 // Call indirectly to solve generation ordering problem 1347 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address()); 1348 1349 // Make some space on stack above the current register window. 1350 // Enough to hold 8 64-bit registers. 1351 add(SP,-8*8,SP); 1352 1353 // Save some 64-bit registers; a normal 'save' chops the heads off 1354 // of 64-bit longs in the 32-bit build. 1355 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); 1356 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); 1357 ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed 1358 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); 1359 1360 // Size of set() should stay the same 1361 patchable_set((intptr_t)real_msg, O1); 1362 // Load address to call to into O7 1363 load_ptr_contents(a, O7); 1364 // Register call to verify_oop_subroutine 1365 callr(O7, G0); 1366 delayed()->nop(); 1367 // recover frame size 1368 add(SP, 8*8,SP); 1369 } 1370 1371 // side-door communication with signalHandler in os_solaris.cpp 1372 address MacroAssembler::_verify_oop_implicit_branch[3] = { NULL }; 1373 1374 // This macro is expanded just once; it creates shared code. Contract: 1375 // receives an oop in O0. Must restore O0 & O7 from TLS. Must not smash ANY 1376 // registers, including flags. May not use a register 'save', as this blows 1377 // the high bits of the O-regs if they contain Long values. Acts as a 'leaf' 1378 // call. 1379 void MacroAssembler::verify_oop_subroutine() { 1380 // Leaf call; no frame. 1381 Label succeed, fail, null_or_fail; 1382 1383 // O0 and O7 were saved already (O0 in O0's TLS home, O7 in O5's TLS home). 1384 // O0 is now the oop to be checked. O7 is the return address. 1385 Register O0_obj = O0; 1386 1387 // Save some more registers for temps. 1388 stx(O2,SP,frame::register_save_words*wordSize+STACK_BIAS+2*8); 1389 stx(O3,SP,frame::register_save_words*wordSize+STACK_BIAS+3*8); 1390 stx(O4,SP,frame::register_save_words*wordSize+STACK_BIAS+4*8); 1391 stx(O5,SP,frame::register_save_words*wordSize+STACK_BIAS+5*8); 1392 1393 // Save flags 1394 Register O5_save_flags = O5; 1395 rdccr( O5_save_flags ); 1396 1397 { // count number of verifies 1398 Register O2_adr = O2; 1399 Register O3_accum = O3; 1400 inc_counter(StubRoutines::verify_oop_count_addr(), O2_adr, O3_accum); 1401 } 1402 1403 Register O2_mask = O2; 1404 Register O3_bits = O3; 1405 Register O4_temp = O4; 1406 1407 // mark lower end of faulting range 1408 assert(_verify_oop_implicit_branch[0] == NULL, "set once"); 1409 _verify_oop_implicit_branch[0] = pc(); 1410 1411 // We can't check the mark oop because it could be in the process of 1412 // locking or unlocking while this is running. 1413 set(Universe::verify_oop_mask (), O2_mask); 1414 set(Universe::verify_oop_bits (), O3_bits); 1415 1416 // assert((obj & oop_mask) == oop_bits); 1417 and3(O0_obj, O2_mask, O4_temp); 1418 cmp_and_brx_short(O4_temp, O3_bits, notEqual, pn, null_or_fail); 1419 1420 if ((NULL_WORD & Universe::verify_oop_mask()) == Universe::verify_oop_bits()) { 1421 // the null_or_fail case is useless; must test for null separately 1422 br_null_short(O0_obj, pn, succeed); 1423 } 1424 1425 // Check the Klass* of this object for being in the right area of memory. 1426 // Cannot do the load in the delay above slot in case O0 is null 1427 load_klass(O0_obj, O0_obj); 1428 // assert((klass != NULL) 1429 br_null_short(O0_obj, pn, fail); 1430 1431 wrccr( O5_save_flags ); // Restore CCR's 1432 1433 // mark upper end of faulting range 1434 _verify_oop_implicit_branch[1] = pc(); 1435 1436 //----------------------- 1437 // all tests pass 1438 bind(succeed); 1439 1440 // Restore prior 64-bit registers 1441 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+0*8,O0); 1442 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+1*8,O1); 1443 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+2*8,O2); 1444 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+3*8,O3); 1445 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+4*8,O4); 1446 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+5*8,O5); 1447 1448 retl(); // Leaf return; restore prior O7 in delay slot 1449 delayed()->ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+7*8,O7); 1450 1451 //----------------------- 1452 bind(null_or_fail); // nulls are less common but OK 1453 br_null(O0_obj, false, pt, succeed); 1454 delayed()->wrccr( O5_save_flags ); // Restore CCR's 1455 1456 //----------------------- 1457 // report failure: 1458 bind(fail); 1459 _verify_oop_implicit_branch[2] = pc(); 1460 1461 wrccr( O5_save_flags ); // Restore CCR's 1462 1463 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1464 1465 // stop_subroutine expects message pointer in I1. 1466 mov(I1, O1); 1467 1468 // Restore prior 64-bit registers 1469 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+0*8,I0); 1470 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+1*8,I1); 1471 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+2*8,I2); 1472 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+3*8,I3); 1473 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+4*8,I4); 1474 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+5*8,I5); 1475 1476 // factor long stop-sequence into subroutine to save space 1477 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); 1478 1479 // call indirectly to solve generation ordering problem 1480 AddressLiteral al(StubRoutines::Sparc::stop_subroutine_entry_address()); 1481 load_ptr_contents(al, O5); 1482 jmpl(O5, 0, O7); 1483 delayed()->nop(); 1484 } 1485 1486 1487 void MacroAssembler::stop(const char* msg) { 1488 // save frame first to get O7 for return address 1489 // add one word to size in case struct is odd number of words long 1490 // It must be doubleword-aligned for storing doubles into it. 1491 1492 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1493 1494 // stop_subroutine expects message pointer in I1. 1495 // Size of set() should stay the same 1496 patchable_set((intptr_t)msg, O1); 1497 1498 // factor long stop-sequence into subroutine to save space 1499 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); 1500 1501 // call indirectly to solve generation ordering problem 1502 AddressLiteral a(StubRoutines::Sparc::stop_subroutine_entry_address()); 1503 load_ptr_contents(a, O5); 1504 jmpl(O5, 0, O7); 1505 delayed()->nop(); 1506 1507 breakpoint_trap(); // make stop actually stop rather than writing 1508 // unnoticeable results in the output files. 1509 1510 // restore(); done in callee to save space! 1511 } 1512 1513 1514 void MacroAssembler::warn(const char* msg) { 1515 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1516 RegistersForDebugging::save_registers(this); 1517 mov(O0, L0); 1518 // Size of set() should stay the same 1519 patchable_set((intptr_t)msg, O0); 1520 call( CAST_FROM_FN_PTR(address, warning) ); 1521 delayed()->nop(); 1522 // ret(); 1523 // delayed()->restore(); 1524 RegistersForDebugging::restore_registers(this, L0); 1525 restore(); 1526 } 1527 1528 1529 void MacroAssembler::untested(const char* what) { 1530 // We must be able to turn interactive prompting off 1531 // in order to run automated test scripts on the VM 1532 // Use the flag ShowMessageBoxOnError 1533 1534 const char* b = NULL; 1535 { 1536 ResourceMark rm; 1537 stringStream ss; 1538 ss.print("untested: %s", what); 1539 b = code_string(ss.as_string()); 1540 } 1541 if (ShowMessageBoxOnError) { STOP(b); } 1542 else { warn(b); } 1543 } 1544 1545 1546 void MacroAssembler::stop_subroutine() { 1547 RegistersForDebugging::save_registers(this); 1548 1549 // for the sake of the debugger, stick a PC on the current frame 1550 // (this assumes that the caller has performed an extra "save") 1551 mov(I7, L7); 1552 add(O7, -7 * BytesPerInt, I7); 1553 1554 save_frame(); // one more save to free up another O7 register 1555 mov(I0, O1); // addr of reg save area 1556 1557 // We expect pointer to message in I1. Caller must set it up in O1 1558 mov(I1, O0); // get msg 1559 call (CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type); 1560 delayed()->nop(); 1561 1562 restore(); 1563 1564 RegistersForDebugging::restore_registers(this, O0); 1565 1566 save_frame(0); 1567 call(CAST_FROM_FN_PTR(address,breakpoint)); 1568 delayed()->nop(); 1569 restore(); 1570 1571 mov(L7, I7); 1572 retl(); 1573 delayed()->restore(); // see stop above 1574 } 1575 1576 1577 void MacroAssembler::debug(char* msg, RegistersForDebugging* regs) { 1578 if ( ShowMessageBoxOnError ) { 1579 JavaThread* thread = JavaThread::current(); 1580 JavaThreadState saved_state = thread->thread_state(); 1581 thread->set_thread_state(_thread_in_vm); 1582 { 1583 // In order to get locks work, we need to fake a in_VM state 1584 ttyLocker ttyl; 1585 ::tty->print_cr("EXECUTION STOPPED: %s\n", msg); 1586 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 1587 BytecodeCounter::print(); 1588 } 1589 if (os::message_box(msg, "Execution stopped, print registers?")) 1590 regs->print(::tty); 1591 } 1592 BREAKPOINT; 1593 ThreadStateTransition::transition(JavaThread::current(), _thread_in_vm, saved_state); 1594 } 1595 else { 1596 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg); 1597 } 1598 assert(false, "DEBUG MESSAGE: %s", msg); 1599 } 1600 1601 1602 void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresult) { 1603 subcc( Rparam_words, Argument::n_register_parameters, Rresult); // how many mem words? 1604 Label no_extras; 1605 br( negative, true, pt, no_extras ); // if neg, clear reg 1606 delayed()->set(0, Rresult); // annuled, so only if taken 1607 bind( no_extras ); 1608 } 1609 1610 1611 void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) { 1612 #ifdef _LP64 1613 add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult); 1614 #else 1615 add(Rextra_words, frame::memory_parameter_word_sp_offset + 1, Rresult); 1616 #endif 1617 bclr(1, Rresult); 1618 sll(Rresult, LogBytesPerWord, Rresult); // Rresult has total frame bytes 1619 } 1620 1621 1622 void MacroAssembler::calc_frame_size_and_save(Register Rextra_words, Register Rresult) { 1623 calc_frame_size(Rextra_words, Rresult); 1624 neg(Rresult); 1625 save(SP, Rresult, SP); 1626 } 1627 1628 1629 // --------------------------------------------------------- 1630 Assembler::RCondition cond2rcond(Assembler::Condition c) { 1631 switch (c) { 1632 /*case zero: */ 1633 case Assembler::equal: return Assembler::rc_z; 1634 case Assembler::lessEqual: return Assembler::rc_lez; 1635 case Assembler::less: return Assembler::rc_lz; 1636 /*case notZero:*/ 1637 case Assembler::notEqual: return Assembler::rc_nz; 1638 case Assembler::greater: return Assembler::rc_gz; 1639 case Assembler::greaterEqual: return Assembler::rc_gez; 1640 } 1641 ShouldNotReachHere(); 1642 return Assembler::rc_z; 1643 } 1644 1645 // compares (32 bit) register with zero and branches. NOT FOR USE WITH 64-bit POINTERS 1646 void MacroAssembler::cmp_zero_and_br(Condition c, Register s1, Label& L, bool a, Predict p) { 1647 tst(s1); 1648 br (c, a, p, L); 1649 } 1650 1651 // Compares a pointer register with zero and branches on null. 1652 // Does a test & branch on 32-bit systems and a register-branch on 64-bit. 1653 void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) { 1654 assert_not_delayed(); 1655 #ifdef _LP64 1656 bpr( rc_z, a, p, s1, L ); 1657 #else 1658 tst(s1); 1659 br ( zero, a, p, L ); 1660 #endif 1661 } 1662 1663 void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) { 1664 assert_not_delayed(); 1665 #ifdef _LP64 1666 bpr( rc_nz, a, p, s1, L ); 1667 #else 1668 tst(s1); 1669 br ( notZero, a, p, L ); 1670 #endif 1671 } 1672 1673 // Compare registers and branch with nop in delay slot or cbcond without delay slot. 1674 1675 // Compare integer (32 bit) values (icc only). 1676 void MacroAssembler::cmp_and_br_short(Register s1, Register s2, Condition c, 1677 Predict p, Label& L) { 1678 assert_not_delayed(); 1679 if (use_cbcond(L)) { 1680 Assembler::cbcond(c, icc, s1, s2, L); 1681 } else { 1682 cmp(s1, s2); 1683 br(c, false, p, L); 1684 delayed()->nop(); 1685 } 1686 } 1687 1688 // Compare integer (32 bit) values (icc only). 1689 void MacroAssembler::cmp_and_br_short(Register s1, int simm13a, Condition c, 1690 Predict p, Label& L) { 1691 assert_not_delayed(); 1692 if (is_simm(simm13a,5) && use_cbcond(L)) { 1693 Assembler::cbcond(c, icc, s1, simm13a, L); 1694 } else { 1695 cmp(s1, simm13a); 1696 br(c, false, p, L); 1697 delayed()->nop(); 1698 } 1699 } 1700 1701 // Branch that tests xcc in LP64 and icc in !LP64 1702 void MacroAssembler::cmp_and_brx_short(Register s1, Register s2, Condition c, 1703 Predict p, Label& L) { 1704 assert_not_delayed(); 1705 if (use_cbcond(L)) { 1706 Assembler::cbcond(c, ptr_cc, s1, s2, L); 1707 } else { 1708 cmp(s1, s2); 1709 brx(c, false, p, L); 1710 delayed()->nop(); 1711 } 1712 } 1713 1714 // Branch that tests xcc in LP64 and icc in !LP64 1715 void MacroAssembler::cmp_and_brx_short(Register s1, int simm13a, Condition c, 1716 Predict p, Label& L) { 1717 assert_not_delayed(); 1718 if (is_simm(simm13a,5) && use_cbcond(L)) { 1719 Assembler::cbcond(c, ptr_cc, s1, simm13a, L); 1720 } else { 1721 cmp(s1, simm13a); 1722 brx(c, false, p, L); 1723 delayed()->nop(); 1724 } 1725 } 1726 1727 // Short branch version for compares a pointer with zero. 1728 1729 void MacroAssembler::br_null_short(Register s1, Predict p, Label& L) { 1730 assert_not_delayed(); 1731 if (use_cbcond(L)) { 1732 Assembler::cbcond(zero, ptr_cc, s1, 0, L); 1733 return; 1734 } 1735 br_null(s1, false, p, L); 1736 delayed()->nop(); 1737 } 1738 1739 void MacroAssembler::br_notnull_short(Register s1, Predict p, Label& L) { 1740 assert_not_delayed(); 1741 if (use_cbcond(L)) { 1742 Assembler::cbcond(notZero, ptr_cc, s1, 0, L); 1743 return; 1744 } 1745 br_notnull(s1, false, p, L); 1746 delayed()->nop(); 1747 } 1748 1749 // Unconditional short branch 1750 void MacroAssembler::ba_short(Label& L) { 1751 if (use_cbcond(L)) { 1752 Assembler::cbcond(equal, icc, G0, G0, L); 1753 return; 1754 } 1755 br(always, false, pt, L); 1756 delayed()->nop(); 1757 } 1758 1759 // instruction sequences factored across compiler & interpreter 1760 1761 1762 void MacroAssembler::lcmp( Register Ra_hi, Register Ra_low, 1763 Register Rb_hi, Register Rb_low, 1764 Register Rresult) { 1765 1766 Label check_low_parts, done; 1767 1768 cmp(Ra_hi, Rb_hi ); // compare hi parts 1769 br(equal, true, pt, check_low_parts); 1770 delayed()->cmp(Ra_low, Rb_low); // test low parts 1771 1772 // And, with an unsigned comparison, it does not matter if the numbers 1773 // are negative or not. 1774 // E.g., -2 cmp -1: the low parts are 0xfffffffe and 0xffffffff. 1775 // The second one is bigger (unsignedly). 1776 1777 // Other notes: The first move in each triplet can be unconditional 1778 // (and therefore probably prefetchable). 1779 // And the equals case for the high part does not need testing, 1780 // since that triplet is reached only after finding the high halves differ. 1781 1782 mov(-1, Rresult); 1783 ba(done); 1784 delayed()->movcc(greater, false, icc, 1, Rresult); 1785 1786 bind(check_low_parts); 1787 1788 mov( -1, Rresult); 1789 movcc(equal, false, icc, 0, Rresult); 1790 movcc(greaterUnsigned, false, icc, 1, Rresult); 1791 1792 bind(done); 1793 } 1794 1795 void MacroAssembler::lneg( Register Rhi, Register Rlow ) { 1796 subcc( G0, Rlow, Rlow ); 1797 subc( G0, Rhi, Rhi ); 1798 } 1799 1800 void MacroAssembler::lshl( Register Rin_high, Register Rin_low, 1801 Register Rcount, 1802 Register Rout_high, Register Rout_low, 1803 Register Rtemp ) { 1804 1805 1806 Register Ralt_count = Rtemp; 1807 Register Rxfer_bits = Rtemp; 1808 1809 assert( Ralt_count != Rin_high 1810 && Ralt_count != Rin_low 1811 && Ralt_count != Rcount 1812 && Rxfer_bits != Rin_low 1813 && Rxfer_bits != Rin_high 1814 && Rxfer_bits != Rcount 1815 && Rxfer_bits != Rout_low 1816 && Rout_low != Rin_high, 1817 "register alias checks"); 1818 1819 Label big_shift, done; 1820 1821 // This code can be optimized to use the 64 bit shifts in V9. 1822 // Here we use the 32 bit shifts. 1823 1824 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1825 subcc(Rcount, 31, Ralt_count); 1826 br(greater, true, pn, big_shift); 1827 delayed()->dec(Ralt_count); 1828 1829 // shift < 32 bits, Ralt_count = Rcount-31 1830 1831 // We get the transfer bits by shifting right by 32-count the low 1832 // register. This is done by shifting right by 31-count and then by one 1833 // more to take care of the special (rare) case where count is zero 1834 // (shifting by 32 would not work). 1835 1836 neg(Ralt_count); 1837 1838 // The order of the next two instructions is critical in the case where 1839 // Rin and Rout are the same and should not be reversed. 1840 1841 srl(Rin_low, Ralt_count, Rxfer_bits); // shift right by 31-count 1842 if (Rcount != Rout_low) { 1843 sll(Rin_low, Rcount, Rout_low); // low half 1844 } 1845 sll(Rin_high, Rcount, Rout_high); 1846 if (Rcount == Rout_low) { 1847 sll(Rin_low, Rcount, Rout_low); // low half 1848 } 1849 srl(Rxfer_bits, 1, Rxfer_bits ); // shift right by one more 1850 ba(done); 1851 delayed()->or3(Rout_high, Rxfer_bits, Rout_high); // new hi value: or in shifted old hi part and xfer from low 1852 1853 // shift >= 32 bits, Ralt_count = Rcount-32 1854 bind(big_shift); 1855 sll(Rin_low, Ralt_count, Rout_high ); 1856 clr(Rout_low); 1857 1858 bind(done); 1859 } 1860 1861 1862 void MacroAssembler::lshr( Register Rin_high, Register Rin_low, 1863 Register Rcount, 1864 Register Rout_high, Register Rout_low, 1865 Register Rtemp ) { 1866 1867 Register Ralt_count = Rtemp; 1868 Register Rxfer_bits = Rtemp; 1869 1870 assert( Ralt_count != Rin_high 1871 && Ralt_count != Rin_low 1872 && Ralt_count != Rcount 1873 && Rxfer_bits != Rin_low 1874 && Rxfer_bits != Rin_high 1875 && Rxfer_bits != Rcount 1876 && Rxfer_bits != Rout_high 1877 && Rout_high != Rin_low, 1878 "register alias checks"); 1879 1880 Label big_shift, done; 1881 1882 // This code can be optimized to use the 64 bit shifts in V9. 1883 // Here we use the 32 bit shifts. 1884 1885 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1886 subcc(Rcount, 31, Ralt_count); 1887 br(greater, true, pn, big_shift); 1888 delayed()->dec(Ralt_count); 1889 1890 // shift < 32 bits, Ralt_count = Rcount-31 1891 1892 // We get the transfer bits by shifting left by 32-count the high 1893 // register. This is done by shifting left by 31-count and then by one 1894 // more to take care of the special (rare) case where count is zero 1895 // (shifting by 32 would not work). 1896 1897 neg(Ralt_count); 1898 if (Rcount != Rout_low) { 1899 srl(Rin_low, Rcount, Rout_low); 1900 } 1901 1902 // The order of the next two instructions is critical in the case where 1903 // Rin and Rout are the same and should not be reversed. 1904 1905 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count 1906 sra(Rin_high, Rcount, Rout_high ); // high half 1907 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more 1908 if (Rcount == Rout_low) { 1909 srl(Rin_low, Rcount, Rout_low); 1910 } 1911 ba(done); 1912 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high 1913 1914 // shift >= 32 bits, Ralt_count = Rcount-32 1915 bind(big_shift); 1916 1917 sra(Rin_high, Ralt_count, Rout_low); 1918 sra(Rin_high, 31, Rout_high); // sign into hi 1919 1920 bind( done ); 1921 } 1922 1923 1924 1925 void MacroAssembler::lushr( Register Rin_high, Register Rin_low, 1926 Register Rcount, 1927 Register Rout_high, Register Rout_low, 1928 Register Rtemp ) { 1929 1930 Register Ralt_count = Rtemp; 1931 Register Rxfer_bits = Rtemp; 1932 1933 assert( Ralt_count != Rin_high 1934 && Ralt_count != Rin_low 1935 && Ralt_count != Rcount 1936 && Rxfer_bits != Rin_low 1937 && Rxfer_bits != Rin_high 1938 && Rxfer_bits != Rcount 1939 && Rxfer_bits != Rout_high 1940 && Rout_high != Rin_low, 1941 "register alias checks"); 1942 1943 Label big_shift, done; 1944 1945 // This code can be optimized to use the 64 bit shifts in V9. 1946 // Here we use the 32 bit shifts. 1947 1948 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1949 subcc(Rcount, 31, Ralt_count); 1950 br(greater, true, pn, big_shift); 1951 delayed()->dec(Ralt_count); 1952 1953 // shift < 32 bits, Ralt_count = Rcount-31 1954 1955 // We get the transfer bits by shifting left by 32-count the high 1956 // register. This is done by shifting left by 31-count and then by one 1957 // more to take care of the special (rare) case where count is zero 1958 // (shifting by 32 would not work). 1959 1960 neg(Ralt_count); 1961 if (Rcount != Rout_low) { 1962 srl(Rin_low, Rcount, Rout_low); 1963 } 1964 1965 // The order of the next two instructions is critical in the case where 1966 // Rin and Rout are the same and should not be reversed. 1967 1968 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count 1969 srl(Rin_high, Rcount, Rout_high ); // high half 1970 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more 1971 if (Rcount == Rout_low) { 1972 srl(Rin_low, Rcount, Rout_low); 1973 } 1974 ba(done); 1975 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high 1976 1977 // shift >= 32 bits, Ralt_count = Rcount-32 1978 bind(big_shift); 1979 1980 srl(Rin_high, Ralt_count, Rout_low); 1981 clr(Rout_high); 1982 1983 bind( done ); 1984 } 1985 1986 #ifdef _LP64 1987 void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) { 1988 cmp(Ra, Rb); 1989 mov(-1, Rresult); 1990 movcc(equal, false, xcc, 0, Rresult); 1991 movcc(greater, false, xcc, 1, Rresult); 1992 } 1993 #endif 1994 1995 1996 void MacroAssembler::load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed) { 1997 switch (size_in_bytes) { 1998 case 8: ld_long(src, dst); break; 1999 case 4: ld( src, dst); break; 2000 case 2: is_signed ? ldsh(src, dst) : lduh(src, dst); break; 2001 case 1: is_signed ? ldsb(src, dst) : ldub(src, dst); break; 2002 default: ShouldNotReachHere(); 2003 } 2004 } 2005 2006 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) { 2007 switch (size_in_bytes) { 2008 case 8: st_long(src, dst); break; 2009 case 4: st( src, dst); break; 2010 case 2: sth( src, dst); break; 2011 case 1: stb( src, dst); break; 2012 default: ShouldNotReachHere(); 2013 } 2014 } 2015 2016 2017 void MacroAssembler::float_cmp( bool is_float, int unordered_result, 2018 FloatRegister Fa, FloatRegister Fb, 2019 Register Rresult) { 2020 if (is_float) { 2021 fcmp(FloatRegisterImpl::S, fcc0, Fa, Fb); 2022 } else { 2023 fcmp(FloatRegisterImpl::D, fcc0, Fa, Fb); 2024 } 2025 2026 if (unordered_result == 1) { 2027 mov( -1, Rresult); 2028 movcc(f_equal, true, fcc0, 0, Rresult); 2029 movcc(f_unorderedOrGreater, true, fcc0, 1, Rresult); 2030 } else { 2031 mov( -1, Rresult); 2032 movcc(f_equal, true, fcc0, 0, Rresult); 2033 movcc(f_greater, true, fcc0, 1, Rresult); 2034 } 2035 } 2036 2037 2038 void MacroAssembler::save_all_globals_into_locals() { 2039 mov(G1,L1); 2040 mov(G2,L2); 2041 mov(G3,L3); 2042 mov(G4,L4); 2043 mov(G5,L5); 2044 mov(G6,L6); 2045 mov(G7,L7); 2046 } 2047 2048 void MacroAssembler::restore_globals_from_locals() { 2049 mov(L1,G1); 2050 mov(L2,G2); 2051 mov(L3,G3); 2052 mov(L4,G4); 2053 mov(L5,G5); 2054 mov(L6,G6); 2055 mov(L7,G7); 2056 } 2057 2058 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, 2059 Register tmp, 2060 int offset) { 2061 intptr_t value = *delayed_value_addr; 2062 if (value != 0) 2063 return RegisterOrConstant(value + offset); 2064 2065 // load indirectly to solve generation ordering problem 2066 AddressLiteral a(delayed_value_addr); 2067 load_ptr_contents(a, tmp); 2068 2069 #ifdef ASSERT 2070 tst(tmp); 2071 breakpoint_trap(zero, xcc); 2072 #endif 2073 2074 if (offset != 0) 2075 add(tmp, offset, tmp); 2076 2077 return RegisterOrConstant(tmp); 2078 } 2079 2080 2081 RegisterOrConstant MacroAssembler::regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 2082 assert(d.register_or_noreg() != G0, "lost side effect"); 2083 if ((s2.is_constant() && s2.as_constant() == 0) || 2084 (s2.is_register() && s2.as_register() == G0)) { 2085 // Do nothing, just move value. 2086 if (s1.is_register()) { 2087 if (d.is_constant()) d = temp; 2088 mov(s1.as_register(), d.as_register()); 2089 return d; 2090 } else { 2091 return s1; 2092 } 2093 } 2094 2095 if (s1.is_register()) { 2096 assert_different_registers(s1.as_register(), temp); 2097 if (d.is_constant()) d = temp; 2098 andn(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 2099 return d; 2100 } else { 2101 if (s2.is_register()) { 2102 assert_different_registers(s2.as_register(), temp); 2103 if (d.is_constant()) d = temp; 2104 set(s1.as_constant(), temp); 2105 andn(temp, s2.as_register(), d.as_register()); 2106 return d; 2107 } else { 2108 intptr_t res = s1.as_constant() & ~s2.as_constant(); 2109 return res; 2110 } 2111 } 2112 } 2113 2114 RegisterOrConstant MacroAssembler::regcon_inc_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 2115 assert(d.register_or_noreg() != G0, "lost side effect"); 2116 if ((s2.is_constant() && s2.as_constant() == 0) || 2117 (s2.is_register() && s2.as_register() == G0)) { 2118 // Do nothing, just move value. 2119 if (s1.is_register()) { 2120 if (d.is_constant()) d = temp; 2121 mov(s1.as_register(), d.as_register()); 2122 return d; 2123 } else { 2124 return s1; 2125 } 2126 } 2127 2128 if (s1.is_register()) { 2129 assert_different_registers(s1.as_register(), temp); 2130 if (d.is_constant()) d = temp; 2131 add(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 2132 return d; 2133 } else { 2134 if (s2.is_register()) { 2135 assert_different_registers(s2.as_register(), temp); 2136 if (d.is_constant()) d = temp; 2137 add(s2.as_register(), ensure_simm13_or_reg(s1, temp), d.as_register()); 2138 return d; 2139 } else { 2140 intptr_t res = s1.as_constant() + s2.as_constant(); 2141 return res; 2142 } 2143 } 2144 } 2145 2146 RegisterOrConstant MacroAssembler::regcon_sll_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 2147 assert(d.register_or_noreg() != G0, "lost side effect"); 2148 if (!is_simm13(s2.constant_or_zero())) 2149 s2 = (s2.as_constant() & 0xFF); 2150 if ((s2.is_constant() && s2.as_constant() == 0) || 2151 (s2.is_register() && s2.as_register() == G0)) { 2152 // Do nothing, just move value. 2153 if (s1.is_register()) { 2154 if (d.is_constant()) d = temp; 2155 mov(s1.as_register(), d.as_register()); 2156 return d; 2157 } else { 2158 return s1; 2159 } 2160 } 2161 2162 if (s1.is_register()) { 2163 assert_different_registers(s1.as_register(), temp); 2164 if (d.is_constant()) d = temp; 2165 sll_ptr(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 2166 return d; 2167 } else { 2168 if (s2.is_register()) { 2169 assert_different_registers(s2.as_register(), temp); 2170 if (d.is_constant()) d = temp; 2171 set(s1.as_constant(), temp); 2172 sll_ptr(temp, s2.as_register(), d.as_register()); 2173 return d; 2174 } else { 2175 intptr_t res = s1.as_constant() << s2.as_constant(); 2176 return res; 2177 } 2178 } 2179 } 2180 2181 2182 // Look up the method for a megamorphic invokeinterface call. 2183 // The target method is determined by <intf_klass, itable_index>. 2184 // The receiver klass is in recv_klass. 2185 // On success, the result will be in method_result, and execution falls through. 2186 // On failure, execution transfers to the given label. 2187 void MacroAssembler::lookup_interface_method(Register recv_klass, 2188 Register intf_klass, 2189 RegisterOrConstant itable_index, 2190 Register method_result, 2191 Register scan_temp, 2192 Register sethi_temp, 2193 Label& L_no_such_interface) { 2194 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp); 2195 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 2196 "caller must use same register for non-constant itable index as for method"); 2197 2198 Label L_no_such_interface_restore; 2199 bool did_save = false; 2200 if (scan_temp == noreg || sethi_temp == noreg) { 2201 Register recv_2 = recv_klass->is_global() ? recv_klass : L0; 2202 Register intf_2 = intf_klass->is_global() ? intf_klass : L1; 2203 assert(method_result->is_global(), "must be able to return value"); 2204 scan_temp = L2; 2205 sethi_temp = L3; 2206 save_frame_and_mov(0, recv_klass, recv_2, intf_klass, intf_2); 2207 recv_klass = recv_2; 2208 intf_klass = intf_2; 2209 did_save = true; 2210 } 2211 2212 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 2213 int vtable_base = InstanceKlass::vtable_start_offset() * wordSize; 2214 int scan_step = itableOffsetEntry::size() * wordSize; 2215 int vte_size = vtableEntry::size() * wordSize; 2216 2217 lduw(recv_klass, InstanceKlass::vtable_length_offset() * wordSize, scan_temp); 2218 // %%% We should store the aligned, prescaled offset in the klassoop. 2219 // Then the next several instructions would fold away. 2220 2221 int round_to_unit = ((HeapWordsPerLong > 1) ? BytesPerLong : 0); 2222 int itb_offset = vtable_base; 2223 if (round_to_unit != 0) { 2224 // hoist first instruction of round_to(scan_temp, BytesPerLong): 2225 itb_offset += round_to_unit - wordSize; 2226 } 2227 int itb_scale = exact_log2(vtableEntry::size() * wordSize); 2228 sll(scan_temp, itb_scale, scan_temp); 2229 add(scan_temp, itb_offset, scan_temp); 2230 if (round_to_unit != 0) { 2231 // Round up to align_object_offset boundary 2232 // see code for InstanceKlass::start_of_itable! 2233 // Was: round_to(scan_temp, BytesPerLong); 2234 // Hoisted: add(scan_temp, BytesPerLong-1, scan_temp); 2235 and3(scan_temp, -round_to_unit, scan_temp); 2236 } 2237 add(recv_klass, scan_temp, scan_temp); 2238 2239 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 2240 RegisterOrConstant itable_offset = itable_index; 2241 itable_offset = regcon_sll_ptr(itable_index, exact_log2(itableMethodEntry::size() * wordSize), itable_offset); 2242 itable_offset = regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes(), itable_offset); 2243 add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass); 2244 2245 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) { 2246 // if (scan->interface() == intf) { 2247 // result = (klass + scan->offset() + itable_index); 2248 // } 2249 // } 2250 Label L_search, L_found_method; 2251 2252 for (int peel = 1; peel >= 0; peel--) { 2253 // %%%% Could load both offset and interface in one ldx, if they were 2254 // in the opposite order. This would save a load. 2255 ld_ptr(scan_temp, itableOffsetEntry::interface_offset_in_bytes(), method_result); 2256 2257 // Check that this entry is non-null. A null entry means that 2258 // the receiver class doesn't implement the interface, and wasn't the 2259 // same as when the caller was compiled. 2260 bpr(Assembler::rc_z, false, Assembler::pn, method_result, did_save ? L_no_such_interface_restore : L_no_such_interface); 2261 delayed()->cmp(method_result, intf_klass); 2262 2263 if (peel) { 2264 brx(Assembler::equal, false, Assembler::pt, L_found_method); 2265 } else { 2266 brx(Assembler::notEqual, false, Assembler::pn, L_search); 2267 // (invert the test to fall through to found_method...) 2268 } 2269 delayed()->add(scan_temp, scan_step, scan_temp); 2270 2271 if (!peel) break; 2272 2273 bind(L_search); 2274 } 2275 2276 bind(L_found_method); 2277 2278 // Got a hit. 2279 int ito_offset = itableOffsetEntry::offset_offset_in_bytes(); 2280 // scan_temp[-scan_step] points to the vtable offset we need 2281 ito_offset -= scan_step; 2282 lduw(scan_temp, ito_offset, scan_temp); 2283 ld_ptr(recv_klass, scan_temp, method_result); 2284 2285 if (did_save) { 2286 Label L_done; 2287 ba(L_done); 2288 delayed()->restore(); 2289 2290 bind(L_no_such_interface_restore); 2291 ba(L_no_such_interface); 2292 delayed()->restore(); 2293 2294 bind(L_done); 2295 } 2296 } 2297 2298 2299 // virtual method calling 2300 void MacroAssembler::lookup_virtual_method(Register recv_klass, 2301 RegisterOrConstant vtable_index, 2302 Register method_result) { 2303 assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg()); 2304 Register sethi_temp = method_result; 2305 const int base = (InstanceKlass::vtable_start_offset() * wordSize + 2306 // method pointer offset within the vtable entry: 2307 vtableEntry::method_offset_in_bytes()); 2308 RegisterOrConstant vtable_offset = vtable_index; 2309 // Each of the following three lines potentially generates an instruction. 2310 // But the total number of address formation instructions will always be 2311 // at most two, and will often be zero. In any case, it will be optimal. 2312 // If vtable_index is a register, we will have (sll_ptr N,x; inc_ptr B,x; ld_ptr k,x). 2313 // If vtable_index is a constant, we will have at most (set B+X<<N,t; ld_ptr k,t). 2314 vtable_offset = regcon_sll_ptr(vtable_index, exact_log2(vtableEntry::size() * wordSize), vtable_offset); 2315 vtable_offset = regcon_inc_ptr(vtable_offset, base, vtable_offset, sethi_temp); 2316 Address vtable_entry_addr(recv_klass, ensure_simm13_or_reg(vtable_offset, sethi_temp)); 2317 ld_ptr(vtable_entry_addr, method_result); 2318 } 2319 2320 2321 void MacroAssembler::check_klass_subtype(Register sub_klass, 2322 Register super_klass, 2323 Register temp_reg, 2324 Register temp2_reg, 2325 Label& L_success) { 2326 Register sub_2 = sub_klass; 2327 Register sup_2 = super_klass; 2328 if (!sub_2->is_global()) sub_2 = L0; 2329 if (!sup_2->is_global()) sup_2 = L1; 2330 bool did_save = false; 2331 if (temp_reg == noreg || temp2_reg == noreg) { 2332 temp_reg = L2; 2333 temp2_reg = L3; 2334 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); 2335 sub_klass = sub_2; 2336 super_klass = sup_2; 2337 did_save = true; 2338 } 2339 Label L_failure, L_pop_to_failure, L_pop_to_success; 2340 check_klass_subtype_fast_path(sub_klass, super_klass, 2341 temp_reg, temp2_reg, 2342 (did_save ? &L_pop_to_success : &L_success), 2343 (did_save ? &L_pop_to_failure : &L_failure), NULL); 2344 2345 if (!did_save) 2346 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); 2347 check_klass_subtype_slow_path(sub_2, sup_2, 2348 L2, L3, L4, L5, 2349 NULL, &L_pop_to_failure); 2350 2351 // on success: 2352 bind(L_pop_to_success); 2353 restore(); 2354 ba_short(L_success); 2355 2356 // on failure: 2357 bind(L_pop_to_failure); 2358 restore(); 2359 bind(L_failure); 2360 } 2361 2362 2363 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 2364 Register super_klass, 2365 Register temp_reg, 2366 Register temp2_reg, 2367 Label* L_success, 2368 Label* L_failure, 2369 Label* L_slow_path, 2370 RegisterOrConstant super_check_offset) { 2371 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 2372 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2373 2374 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 2375 bool need_slow_path = (must_load_sco || 2376 super_check_offset.constant_or_zero() == sco_offset); 2377 2378 assert_different_registers(sub_klass, super_klass, temp_reg); 2379 if (super_check_offset.is_register()) { 2380 assert_different_registers(sub_klass, super_klass, temp_reg, 2381 super_check_offset.as_register()); 2382 } else if (must_load_sco) { 2383 assert(temp2_reg != noreg, "supply either a temp or a register offset"); 2384 } 2385 2386 Label L_fallthrough; 2387 int label_nulls = 0; 2388 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 2389 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 2390 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } 2391 assert(label_nulls <= 1 || 2392 (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path), 2393 "at most one NULL in the batch, usually"); 2394 2395 // If the pointers are equal, we are done (e.g., String[] elements). 2396 // This self-check enables sharing of secondary supertype arrays among 2397 // non-primary types such as array-of-interface. Otherwise, each such 2398 // type would need its own customized SSA. 2399 // We move this check to the front of the fast path because many 2400 // type checks are in fact trivially successful in this manner, 2401 // so we get a nicely predicted branch right at the start of the check. 2402 cmp(super_klass, sub_klass); 2403 brx(Assembler::equal, false, Assembler::pn, *L_success); 2404 delayed()->nop(); 2405 2406 // Check the supertype display: 2407 if (must_load_sco) { 2408 // The super check offset is always positive... 2409 lduw(super_klass, sco_offset, temp2_reg); 2410 super_check_offset = RegisterOrConstant(temp2_reg); 2411 // super_check_offset is register. 2412 assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset.as_register()); 2413 } 2414 ld_ptr(sub_klass, super_check_offset, temp_reg); 2415 cmp(super_klass, temp_reg); 2416 2417 // This check has worked decisively for primary supers. 2418 // Secondary supers are sought in the super_cache ('super_cache_addr'). 2419 // (Secondary supers are interfaces and very deeply nested subtypes.) 2420 // This works in the same check above because of a tricky aliasing 2421 // between the super_cache and the primary super display elements. 2422 // (The 'super_check_addr' can address either, as the case requires.) 2423 // Note that the cache is updated below if it does not help us find 2424 // what we need immediately. 2425 // So if it was a primary super, we can just fail immediately. 2426 // Otherwise, it's the slow path for us (no success at this point). 2427 2428 // Hacked ba(), which may only be used just before L_fallthrough. 2429 #define FINAL_JUMP(label) \ 2430 if (&(label) != &L_fallthrough) { \ 2431 ba(label); delayed()->nop(); \ 2432 } 2433 2434 if (super_check_offset.is_register()) { 2435 brx(Assembler::equal, false, Assembler::pn, *L_success); 2436 delayed()->cmp(super_check_offset.as_register(), sc_offset); 2437 2438 if (L_failure == &L_fallthrough) { 2439 brx(Assembler::equal, false, Assembler::pt, *L_slow_path); 2440 delayed()->nop(); 2441 } else { 2442 brx(Assembler::notEqual, false, Assembler::pn, *L_failure); 2443 delayed()->nop(); 2444 FINAL_JUMP(*L_slow_path); 2445 } 2446 } else if (super_check_offset.as_constant() == sc_offset) { 2447 // Need a slow path; fast failure is impossible. 2448 if (L_slow_path == &L_fallthrough) { 2449 brx(Assembler::equal, false, Assembler::pt, *L_success); 2450 delayed()->nop(); 2451 } else { 2452 brx(Assembler::notEqual, false, Assembler::pn, *L_slow_path); 2453 delayed()->nop(); 2454 FINAL_JUMP(*L_success); 2455 } 2456 } else { 2457 // No slow path; it's a fast decision. 2458 if (L_failure == &L_fallthrough) { 2459 brx(Assembler::equal, false, Assembler::pt, *L_success); 2460 delayed()->nop(); 2461 } else { 2462 brx(Assembler::notEqual, false, Assembler::pn, *L_failure); 2463 delayed()->nop(); 2464 FINAL_JUMP(*L_success); 2465 } 2466 } 2467 2468 bind(L_fallthrough); 2469 2470 #undef FINAL_JUMP 2471 } 2472 2473 2474 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 2475 Register super_klass, 2476 Register count_temp, 2477 Register scan_temp, 2478 Register scratch_reg, 2479 Register coop_reg, 2480 Label* L_success, 2481 Label* L_failure) { 2482 assert_different_registers(sub_klass, super_klass, 2483 count_temp, scan_temp, scratch_reg, coop_reg); 2484 2485 Label L_fallthrough, L_loop; 2486 int label_nulls = 0; 2487 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 2488 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 2489 assert(label_nulls <= 1, "at most one NULL in the batch"); 2490 2491 // a couple of useful fields in sub_klass: 2492 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 2493 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 2494 2495 // Do a linear scan of the secondary super-klass chain. 2496 // This code is rarely used, so simplicity is a virtue here. 2497 2498 #ifndef PRODUCT 2499 int* pst_counter = &SharedRuntime::_partial_subtype_ctr; 2500 inc_counter((address) pst_counter, count_temp, scan_temp); 2501 #endif 2502 2503 // We will consult the secondary-super array. 2504 ld_ptr(sub_klass, ss_offset, scan_temp); 2505 2506 Register search_key = super_klass; 2507 2508 // Load the array length. (Positive movl does right thing on LP64.) 2509 lduw(scan_temp, Array<Klass*>::length_offset_in_bytes(), count_temp); 2510 2511 // Check for empty secondary super list 2512 tst(count_temp); 2513 2514 // In the array of super classes elements are pointer sized. 2515 int element_size = wordSize; 2516 2517 // Top of search loop 2518 bind(L_loop); 2519 br(Assembler::equal, false, Assembler::pn, *L_failure); 2520 delayed()->add(scan_temp, element_size, scan_temp); 2521 2522 // Skip the array header in all array accesses. 2523 int elem_offset = Array<Klass*>::base_offset_in_bytes(); 2524 elem_offset -= element_size; // the scan pointer was pre-incremented also 2525 2526 // Load next super to check 2527 ld_ptr( scan_temp, elem_offset, scratch_reg ); 2528 2529 // Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list 2530 cmp(scratch_reg, search_key); 2531 2532 // A miss means we are NOT a subtype and need to keep looping 2533 brx(Assembler::notEqual, false, Assembler::pn, L_loop); 2534 delayed()->deccc(count_temp); // decrement trip counter in delay slot 2535 2536 // Success. Cache the super we found and proceed in triumph. 2537 st_ptr(super_klass, sub_klass, sc_offset); 2538 2539 if (L_success != &L_fallthrough) { 2540 ba(*L_success); 2541 delayed()->nop(); 2542 } 2543 2544 bind(L_fallthrough); 2545 } 2546 2547 2548 RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot, 2549 Register temp_reg, 2550 int extra_slot_offset) { 2551 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 2552 int stackElementSize = Interpreter::stackElementSize; 2553 int offset = extra_slot_offset * stackElementSize; 2554 if (arg_slot.is_constant()) { 2555 offset += arg_slot.as_constant() * stackElementSize; 2556 return offset; 2557 } else { 2558 assert(temp_reg != noreg, "must specify"); 2559 sll_ptr(arg_slot.as_register(), exact_log2(stackElementSize), temp_reg); 2560 if (offset != 0) 2561 add(temp_reg, offset, temp_reg); 2562 return temp_reg; 2563 } 2564 } 2565 2566 2567 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 2568 Register temp_reg, 2569 int extra_slot_offset) { 2570 return Address(Gargs, argument_offset(arg_slot, temp_reg, extra_slot_offset)); 2571 } 2572 2573 2574 void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg, 2575 Register temp_reg, 2576 Label& done, Label* slow_case, 2577 BiasedLockingCounters* counters) { 2578 assert(UseBiasedLocking, "why call this otherwise?"); 2579 2580 if (PrintBiasedLockingStatistics) { 2581 assert_different_registers(obj_reg, mark_reg, temp_reg, O7); 2582 if (counters == NULL) 2583 counters = BiasedLocking::counters(); 2584 } 2585 2586 Label cas_label; 2587 2588 // Biased locking 2589 // See whether the lock is currently biased toward our thread and 2590 // whether the epoch is still valid 2591 // Note that the runtime guarantees sufficient alignment of JavaThread 2592 // pointers to allow age to be placed into low bits 2593 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); 2594 and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg); 2595 cmp_and_brx_short(temp_reg, markOopDesc::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label); 2596 2597 load_klass(obj_reg, temp_reg); 2598 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2599 or3(G2_thread, temp_reg, temp_reg); 2600 xor3(mark_reg, temp_reg, temp_reg); 2601 andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg); 2602 if (counters != NULL) { 2603 cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg); 2604 // Reload mark_reg as we may need it later 2605 ld_ptr(Address(obj_reg, oopDesc::mark_offset_in_bytes()), mark_reg); 2606 } 2607 brx(Assembler::equal, true, Assembler::pt, done); 2608 delayed()->nop(); 2609 2610 Label try_revoke_bias; 2611 Label try_rebias; 2612 Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes()); 2613 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2614 2615 // At this point we know that the header has the bias pattern and 2616 // that we are not the bias owner in the current epoch. We need to 2617 // figure out more details about the state of the header in order to 2618 // know what operations can be legally performed on the object's 2619 // header. 2620 2621 // If the low three bits in the xor result aren't clear, that means 2622 // the prototype header is no longer biased and we have to revoke 2623 // the bias on this object. 2624 btst(markOopDesc::biased_lock_mask_in_place, temp_reg); 2625 brx(Assembler::notZero, false, Assembler::pn, try_revoke_bias); 2626 2627 // Biasing is still enabled for this data type. See whether the 2628 // epoch of the current bias is still valid, meaning that the epoch 2629 // bits of the mark word are equal to the epoch bits of the 2630 // prototype header. (Note that the prototype header's epoch bits 2631 // only change at a safepoint.) If not, attempt to rebias the object 2632 // toward the current thread. Note that we must be absolutely sure 2633 // that the current epoch is invalid in order to do this because 2634 // otherwise the manipulations it performs on the mark word are 2635 // illegal. 2636 delayed()->btst(markOopDesc::epoch_mask_in_place, temp_reg); 2637 brx(Assembler::notZero, false, Assembler::pn, try_rebias); 2638 2639 // The epoch of the current bias is still valid but we know nothing 2640 // about the owner; it might be set or it might be clear. Try to 2641 // acquire the bias of the object using an atomic operation. If this 2642 // fails we will go in to the runtime to revoke the object's bias. 2643 // Note that we first construct the presumed unbiased header so we 2644 // don't accidentally blow away another thread's valid bias. 2645 delayed()->and3(mark_reg, 2646 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place, 2647 mark_reg); 2648 or3(G2_thread, mark_reg, temp_reg); 2649 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2650 // If the biasing toward our thread failed, this means that 2651 // another thread succeeded in biasing it toward itself and we 2652 // need to revoke that bias. The revocation will occur in the 2653 // interpreter runtime in the slow case. 2654 cmp(mark_reg, temp_reg); 2655 if (counters != NULL) { 2656 cond_inc(Assembler::zero, (address) counters->anonymously_biased_lock_entry_count_addr(), mark_reg, temp_reg); 2657 } 2658 if (slow_case != NULL) { 2659 brx(Assembler::notEqual, true, Assembler::pn, *slow_case); 2660 delayed()->nop(); 2661 } 2662 ba_short(done); 2663 2664 bind(try_rebias); 2665 // At this point we know the epoch has expired, meaning that the 2666 // current "bias owner", if any, is actually invalid. Under these 2667 // circumstances _only_, we are allowed to use the current header's 2668 // value as the comparison value when doing the cas to acquire the 2669 // bias in the current epoch. In other words, we allow transfer of 2670 // the bias from one thread to another directly in this situation. 2671 // 2672 // FIXME: due to a lack of registers we currently blow away the age 2673 // bits in this situation. Should attempt to preserve them. 2674 load_klass(obj_reg, temp_reg); 2675 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2676 or3(G2_thread, temp_reg, temp_reg); 2677 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2678 // If the biasing toward our thread failed, this means that 2679 // another thread succeeded in biasing it toward itself and we 2680 // need to revoke that bias. The revocation will occur in the 2681 // interpreter runtime in the slow case. 2682 cmp(mark_reg, temp_reg); 2683 if (counters != NULL) { 2684 cond_inc(Assembler::zero, (address) counters->rebiased_lock_entry_count_addr(), mark_reg, temp_reg); 2685 } 2686 if (slow_case != NULL) { 2687 brx(Assembler::notEqual, true, Assembler::pn, *slow_case); 2688 delayed()->nop(); 2689 } 2690 ba_short(done); 2691 2692 bind(try_revoke_bias); 2693 // The prototype mark in the klass doesn't have the bias bit set any 2694 // more, indicating that objects of this data type are not supposed 2695 // to be biased any more. We are going to try to reset the mark of 2696 // this object to the prototype value and fall through to the 2697 // CAS-based locking scheme. Note that if our CAS fails, it means 2698 // that another thread raced us for the privilege of revoking the 2699 // bias of this particular object, so it's okay to continue in the 2700 // normal locking code. 2701 // 2702 // FIXME: due to a lack of registers we currently blow away the age 2703 // bits in this situation. Should attempt to preserve them. 2704 load_klass(obj_reg, temp_reg); 2705 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2706 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2707 // Fall through to the normal CAS-based lock, because no matter what 2708 // the result of the above CAS, some thread must have succeeded in 2709 // removing the bias bit from the object's header. 2710 if (counters != NULL) { 2711 cmp(mark_reg, temp_reg); 2712 cond_inc(Assembler::zero, (address) counters->revoked_lock_entry_count_addr(), mark_reg, temp_reg); 2713 } 2714 2715 bind(cas_label); 2716 } 2717 2718 void MacroAssembler::biased_locking_exit (Address mark_addr, Register temp_reg, Label& done, 2719 bool allow_delay_slot_filling) { 2720 // Check for biased locking unlock case, which is a no-op 2721 // Note: we do not have to check the thread ID for two reasons. 2722 // First, the interpreter checks for IllegalMonitorStateException at 2723 // a higher level. Second, if the bias was revoked while we held the 2724 // lock, the object could not be rebiased toward another thread, so 2725 // the bias bit would be clear. 2726 ld_ptr(mark_addr, temp_reg); 2727 and3(temp_reg, markOopDesc::biased_lock_mask_in_place, temp_reg); 2728 cmp(temp_reg, markOopDesc::biased_lock_pattern); 2729 brx(Assembler::equal, allow_delay_slot_filling, Assembler::pt, done); 2730 delayed(); 2731 if (!allow_delay_slot_filling) { 2732 nop(); 2733 } 2734 } 2735 2736 2737 // compiler_lock_object() and compiler_unlock_object() are direct transliterations 2738 // of i486.ad fast_lock() and fast_unlock(). See those methods for detailed comments. 2739 // The code could be tightened up considerably. 2740 // 2741 // box->dhw disposition - post-conditions at DONE_LABEL. 2742 // - Successful inflated lock: box->dhw != 0. 2743 // Any non-zero value suffices. 2744 // Consider G2_thread, rsp, boxReg, or markOopDesc::unused_mark() 2745 // - Successful Stack-lock: box->dhw == mark. 2746 // box->dhw must contain the displaced mark word value 2747 // - Failure -- icc.ZFlag == 0 and box->dhw is undefined. 2748 // The slow-path fast_enter() and slow_enter() operators 2749 // are responsible for setting box->dhw = NonZero (typically markOopDesc::unused_mark()). 2750 // - Biased: box->dhw is undefined 2751 // 2752 // SPARC refworkload performance - specifically jetstream and scimark - are 2753 // extremely sensitive to the size of the code emitted by compiler_lock_object 2754 // and compiler_unlock_object. Critically, the key factor is code size, not path 2755 // length. (Simply experiments to pad CLO with unexecuted NOPs demonstrte the 2756 // effect). 2757 2758 2759 void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark, 2760 Register Rbox, Register Rscratch, 2761 BiasedLockingCounters* counters, 2762 bool try_bias) { 2763 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); 2764 2765 verify_oop(Roop); 2766 Label done ; 2767 2768 if (counters != NULL) { 2769 inc_counter((address) counters->total_entry_count_addr(), Rmark, Rscratch); 2770 } 2771 2772 if (EmitSync & 1) { 2773 mov(3, Rscratch); 2774 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2775 cmp(SP, G0); 2776 return ; 2777 } 2778 2779 if (EmitSync & 2) { 2780 2781 // Fetch object's markword 2782 ld_ptr(mark_addr, Rmark); 2783 2784 if (try_bias) { 2785 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 2786 } 2787 2788 // Save Rbox in Rscratch to be used for the cas operation 2789 mov(Rbox, Rscratch); 2790 2791 // set Rmark to markOop | markOopDesc::unlocked_value 2792 or3(Rmark, markOopDesc::unlocked_value, Rmark); 2793 2794 // Initialize the box. (Must happen before we update the object mark!) 2795 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2796 2797 // compare object markOop with Rmark and if equal exchange Rscratch with object markOop 2798 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2799 cas_ptr(mark_addr.base(), Rmark, Rscratch); 2800 2801 // if compare/exchange succeeded we found an unlocked object and we now have locked it 2802 // hence we are done 2803 cmp(Rmark, Rscratch); 2804 #ifdef _LP64 2805 sub(Rscratch, STACK_BIAS, Rscratch); 2806 #endif 2807 brx(Assembler::equal, false, Assembler::pt, done); 2808 delayed()->sub(Rscratch, SP, Rscratch); //pull next instruction into delay slot 2809 2810 // we did not find an unlocked object so see if this is a recursive case 2811 // sub(Rscratch, SP, Rscratch); 2812 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 2813 andcc(Rscratch, 0xfffff003, Rscratch); 2814 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2815 bind (done); 2816 return ; 2817 } 2818 2819 Label Egress ; 2820 2821 if (EmitSync & 256) { 2822 Label IsInflated ; 2823 2824 ld_ptr(mark_addr, Rmark); // fetch obj->mark 2825 // Triage: biased, stack-locked, neutral, inflated 2826 if (try_bias) { 2827 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 2828 // Invariant: if control reaches this point in the emitted stream 2829 // then Rmark has not been modified. 2830 } 2831 2832 // Store mark into displaced mark field in the on-stack basic-lock "box" 2833 // Critically, this must happen before the CAS 2834 // Maximize the ST-CAS distance to minimize the ST-before-CAS penalty. 2835 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2836 andcc(Rmark, 2, G0); 2837 brx(Assembler::notZero, false, Assembler::pn, IsInflated); 2838 delayed()-> 2839 2840 // Try stack-lock acquisition. 2841 // Beware: the 1st instruction is in a delay slot 2842 mov(Rbox, Rscratch); 2843 or3(Rmark, markOopDesc::unlocked_value, Rmark); 2844 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2845 cas_ptr(mark_addr.base(), Rmark, Rscratch); 2846 cmp(Rmark, Rscratch); 2847 brx(Assembler::equal, false, Assembler::pt, done); 2848 delayed()->sub(Rscratch, SP, Rscratch); 2849 2850 // Stack-lock attempt failed - check for recursive stack-lock. 2851 // See the comments below about how we might remove this case. 2852 #ifdef _LP64 2853 sub(Rscratch, STACK_BIAS, Rscratch); 2854 #endif 2855 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 2856 andcc(Rscratch, 0xfffff003, Rscratch); 2857 br(Assembler::always, false, Assembler::pt, done); 2858 delayed()-> st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2859 2860 bind(IsInflated); 2861 if (EmitSync & 64) { 2862 // If m->owner != null goto IsLocked 2863 // Pessimistic form: Test-and-CAS vs CAS 2864 // The optimistic form avoids RTS->RTO cache line upgrades. 2865 ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rscratch); 2866 andcc(Rscratch, Rscratch, G0); 2867 brx(Assembler::notZero, false, Assembler::pn, done); 2868 delayed()->nop(); 2869 // m->owner == null : it's unlocked. 2870 } 2871 2872 // Try to CAS m->owner from null to Self 2873 // Invariant: if we acquire the lock then _recursions should be 0. 2874 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2875 mov(G2_thread, Rscratch); 2876 cas_ptr(Rmark, G0, Rscratch); 2877 cmp(Rscratch, G0); 2878 // Intentional fall-through into done 2879 } else { 2880 // Aggressively avoid the Store-before-CAS penalty 2881 // Defer the store into box->dhw until after the CAS 2882 Label IsInflated, Recursive ; 2883 2884 // Anticipate CAS -- Avoid RTS->RTO upgrade 2885 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); 2886 2887 ld_ptr(mark_addr, Rmark); // fetch obj->mark 2888 // Triage: biased, stack-locked, neutral, inflated 2889 2890 if (try_bias) { 2891 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 2892 // Invariant: if control reaches this point in the emitted stream 2893 // then Rmark has not been modified. 2894 } 2895 andcc(Rmark, 2, G0); 2896 brx(Assembler::notZero, false, Assembler::pn, IsInflated); 2897 delayed()-> // Beware - dangling delay-slot 2898 2899 // Try stack-lock acquisition. 2900 // Transiently install BUSY (0) encoding in the mark word. 2901 // if the CAS of 0 into the mark was successful then we execute: 2902 // ST box->dhw = mark -- save fetched mark in on-stack basiclock box 2903 // ST obj->mark = box -- overwrite transient 0 value 2904 // This presumes TSO, of course. 2905 2906 mov(0, Rscratch); 2907 or3(Rmark, markOopDesc::unlocked_value, Rmark); 2908 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2909 cas_ptr(mark_addr.base(), Rmark, Rscratch); 2910 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); 2911 cmp(Rscratch, Rmark); 2912 brx(Assembler::notZero, false, Assembler::pn, Recursive); 2913 delayed()->st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2914 if (counters != NULL) { 2915 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); 2916 } 2917 ba(done); 2918 delayed()->st_ptr(Rbox, mark_addr); 2919 2920 bind(Recursive); 2921 // Stack-lock attempt failed - check for recursive stack-lock. 2922 // Tests show that we can remove the recursive case with no impact 2923 // on refworkload 0.83. If we need to reduce the size of the code 2924 // emitted by compiler_lock_object() the recursive case is perfect 2925 // candidate. 2926 // 2927 // A more extreme idea is to always inflate on stack-lock recursion. 2928 // This lets us eliminate the recursive checks in compiler_lock_object 2929 // and compiler_unlock_object and the (box->dhw == 0) encoding. 2930 // A brief experiment - requiring changes to synchronizer.cpp, interpreter, 2931 // and showed a performance *increase*. In the same experiment I eliminated 2932 // the fast-path stack-lock code from the interpreter and always passed 2933 // control to the "slow" operators in synchronizer.cpp. 2934 2935 // RScratch contains the fetched obj->mark value from the failed CAS. 2936 #ifdef _LP64 2937 sub(Rscratch, STACK_BIAS, Rscratch); 2938 #endif 2939 sub(Rscratch, SP, Rscratch); 2940 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 2941 andcc(Rscratch, 0xfffff003, Rscratch); 2942 if (counters != NULL) { 2943 // Accounting needs the Rscratch register 2944 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2945 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); 2946 ba_short(done); 2947 } else { 2948 ba(done); 2949 delayed()->st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2950 } 2951 2952 bind (IsInflated); 2953 2954 // Try to CAS m->owner from null to Self 2955 // Invariant: if we acquire the lock then _recursions should be 0. 2956 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2957 mov(G2_thread, Rscratch); 2958 cas_ptr(Rmark, G0, Rscratch); 2959 andcc(Rscratch, Rscratch, G0); // set ICCs for done: icc.zf iff success 2960 // set icc.zf : 1=success 0=failure 2961 // ST box->displaced_header = NonZero. 2962 // Any non-zero value suffices: 2963 // markOopDesc::unused_mark(), G2_thread, RBox, RScratch, rsp, etc. 2964 st_ptr(Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2965 // Intentional fall-through into done 2966 } 2967 2968 bind (done); 2969 } 2970 2971 void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark, 2972 Register Rbox, Register Rscratch, 2973 bool try_bias) { 2974 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); 2975 2976 Label done ; 2977 2978 if (EmitSync & 4) { 2979 cmp(SP, G0); 2980 return ; 2981 } 2982 2983 if (EmitSync & 8) { 2984 if (try_bias) { 2985 biased_locking_exit(mark_addr, Rscratch, done); 2986 } 2987 2988 // Test first if it is a fast recursive unlock 2989 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rmark); 2990 br_null_short(Rmark, Assembler::pt, done); 2991 2992 // Check if it is still a light weight lock, this is is true if we see 2993 // the stack address of the basicLock in the markOop of the object 2994 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2995 cas_ptr(mark_addr.base(), Rbox, Rmark); 2996 ba(done); 2997 delayed()->cmp(Rbox, Rmark); 2998 bind(done); 2999 return ; 3000 } 3001 3002 // Beware ... If the aggregate size of the code emitted by CLO and CUO is 3003 // is too large performance rolls abruptly off a cliff. 3004 // This could be related to inlining policies, code cache management, or 3005 // I$ effects. 3006 Label LStacked ; 3007 3008 if (try_bias) { 3009 // TODO: eliminate redundant LDs of obj->mark 3010 biased_locking_exit(mark_addr, Rscratch, done); 3011 } 3012 3013 ld_ptr(Roop, oopDesc::mark_offset_in_bytes(), Rmark); 3014 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rscratch); 3015 andcc(Rscratch, Rscratch, G0); 3016 brx(Assembler::zero, false, Assembler::pn, done); 3017 delayed()->nop(); // consider: relocate fetch of mark, above, into this DS 3018 andcc(Rmark, 2, G0); 3019 brx(Assembler::zero, false, Assembler::pt, LStacked); 3020 delayed()->nop(); 3021 3022 // It's inflated 3023 // Conceptually we need a #loadstore|#storestore "release" MEMBAR before 3024 // the ST of 0 into _owner which releases the lock. This prevents loads 3025 // and stores within the critical section from reordering (floating) 3026 // past the store that releases the lock. But TSO is a strong memory model 3027 // and that particular flavor of barrier is a noop, so we can safely elide it. 3028 // Note that we use 1-0 locking by default for the inflated case. We 3029 // close the resultant (and rare) race by having contended threads in 3030 // monitorenter periodically poll _owner. 3031 3032 if (EmitSync & 1024) { 3033 // Emit code to check that _owner == Self 3034 // We could fold the _owner test into subsequent code more efficiently 3035 // than using a stand-alone check, but since _owner checking is off by 3036 // default we don't bother. We also might consider predicating the 3037 // _owner==Self check on Xcheck:jni or running on a debug build. 3038 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), Rscratch); 3039 orcc(Rscratch, G0, G0); 3040 brx(Assembler::notZero, false, Assembler::pn, done); 3041 delayed()->nop(); 3042 } 3043 3044 if (EmitSync & 512) { 3045 // classic lock release code absent 1-0 locking 3046 // m->Owner = null; 3047 // membar #storeload 3048 // if (m->cxq|m->EntryList) == null goto Success 3049 // if (m->succ != null) goto Success 3050 // if CAS (&m->Owner,0,Self) != 0 goto Success 3051 // goto SlowPath 3052 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), Rbox); 3053 orcc(Rbox, G0, G0); 3054 brx(Assembler::notZero, false, Assembler::pn, done); 3055 delayed()->nop(); 3056 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 3057 if (os::is_MP()) { membar(StoreLoad); } 3058 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)), Rscratch); 3059 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)), Rbox); 3060 orcc(Rbox, Rscratch, G0); 3061 brx(Assembler::zero, false, Assembler::pt, done); 3062 delayed()-> 3063 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch); 3064 andcc(Rscratch, Rscratch, G0); 3065 brx(Assembler::notZero, false, Assembler::pt, done); 3066 delayed()->andcc(G0, G0, G0); 3067 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 3068 mov(G2_thread, Rscratch); 3069 cas_ptr(Rmark, G0, Rscratch); 3070 cmp(Rscratch, G0); 3071 // invert icc.zf and goto done 3072 brx(Assembler::notZero, false, Assembler::pt, done); 3073 delayed()->cmp(G0, G0); 3074 br(Assembler::always, false, Assembler::pt, done); 3075 delayed()->cmp(G0, 1); 3076 } else { 3077 // 1-0 form : avoids CAS and MEMBAR in the common case 3078 // Do not bother to ratify that m->Owner == Self. 3079 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), Rbox); 3080 orcc(Rbox, G0, G0); 3081 brx(Assembler::notZero, false, Assembler::pn, done); 3082 delayed()-> 3083 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)), Rscratch); 3084 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)), Rbox); 3085 orcc(Rbox, Rscratch, G0); 3086 if (EmitSync & 16384) { 3087 // As an optional optimization, if (EntryList|cxq) != null and _succ is null then 3088 // we should transfer control directly to the slow-path. 3089 // This test makes the reacquire operation below very infrequent. 3090 // The logic is equivalent to : 3091 // if (cxq|EntryList) == null : Owner=null; goto Success 3092 // if succ == null : goto SlowPath 3093 // Owner=null; membar #storeload 3094 // if succ != null : goto Success 3095 // if CAS(&Owner,null,Self) != null goto Success 3096 // goto SlowPath 3097 brx(Assembler::zero, true, Assembler::pt, done); 3098 delayed()-> 3099 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 3100 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch); 3101 andcc(Rscratch, Rscratch, G0) ; 3102 brx(Assembler::zero, false, Assembler::pt, done); 3103 delayed()->orcc(G0, 1, G0); 3104 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 3105 } else { 3106 brx(Assembler::zero, false, Assembler::pt, done); 3107 delayed()-> 3108 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 3109 } 3110 if (os::is_MP()) { membar(StoreLoad); } 3111 // Check that _succ is (or remains) non-zero 3112 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch); 3113 andcc(Rscratch, Rscratch, G0); 3114 brx(Assembler::notZero, false, Assembler::pt, done); 3115 delayed()->andcc(G0, G0, G0); 3116 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 3117 mov(G2_thread, Rscratch); 3118 cas_ptr(Rmark, G0, Rscratch); 3119 cmp(Rscratch, G0); 3120 // invert icc.zf and goto done 3121 // A slightly better v8+/v9 idiom would be the following: 3122 // movrnz Rscratch,1,Rscratch 3123 // ba done 3124 // xorcc Rscratch,1,G0 3125 // In v8+ mode the idiom would be valid IFF Rscratch was a G or O register 3126 brx(Assembler::notZero, false, Assembler::pt, done); 3127 delayed()->cmp(G0, G0); 3128 br(Assembler::always, false, Assembler::pt, done); 3129 delayed()->cmp(G0, 1); 3130 } 3131 3132 bind (LStacked); 3133 // Consider: we could replace the expensive CAS in the exit 3134 // path with a simple ST of the displaced mark value fetched from 3135 // the on-stack basiclock box. That admits a race where a thread T2 3136 // in the slow lock path -- inflating with monitor M -- could race a 3137 // thread T1 in the fast unlock path, resulting in a missed wakeup for T2. 3138 // More precisely T1 in the stack-lock unlock path could "stomp" the 3139 // inflated mark value M installed by T2, resulting in an orphan 3140 // object monitor M and T2 becoming stranded. We can remedy that situation 3141 // by having T2 periodically poll the object's mark word using timed wait 3142 // operations. If T2 discovers that a stomp has occurred it vacates 3143 // the monitor M and wakes any other threads stranded on the now-orphan M. 3144 // In addition the monitor scavenger, which performs deflation, 3145 // would also need to check for orpan monitors and stranded threads. 3146 // 3147 // Finally, inflation is also used when T2 needs to assign a hashCode 3148 // to O and O is stack-locked by T1. The "stomp" race could cause 3149 // an assigned hashCode value to be lost. We can avoid that condition 3150 // and provide the necessary hashCode stability invariants by ensuring 3151 // that hashCode generation is idempotent between copying GCs. 3152 // For example we could compute the hashCode of an object O as 3153 // O's heap address XOR some high quality RNG value that is refreshed 3154 // at GC-time. The monitor scavenger would install the hashCode 3155 // found in any orphan monitors. Again, the mechanism admits a 3156 // lost-update "stomp" WAW race but detects and recovers as needed. 3157 // 3158 // A prototype implementation showed excellent results, although 3159 // the scavenger and timeout code was rather involved. 3160 3161 cas_ptr(mark_addr.base(), Rbox, Rscratch); 3162 cmp(Rbox, Rscratch); 3163 // Intentional fall through into done ... 3164 3165 bind(done); 3166 } 3167 3168 3169 3170 void MacroAssembler::print_CPU_state() { 3171 // %%%%% need to implement this 3172 } 3173 3174 void MacroAssembler::verify_FPU(int stack_depth, const char* s) { 3175 // %%%%% need to implement this 3176 } 3177 3178 void MacroAssembler::push_IU_state() { 3179 // %%%%% need to implement this 3180 } 3181 3182 3183 void MacroAssembler::pop_IU_state() { 3184 // %%%%% need to implement this 3185 } 3186 3187 3188 void MacroAssembler::push_FPU_state() { 3189 // %%%%% need to implement this 3190 } 3191 3192 3193 void MacroAssembler::pop_FPU_state() { 3194 // %%%%% need to implement this 3195 } 3196 3197 3198 void MacroAssembler::push_CPU_state() { 3199 // %%%%% need to implement this 3200 } 3201 3202 3203 void MacroAssembler::pop_CPU_state() { 3204 // %%%%% need to implement this 3205 } 3206 3207 3208 3209 void MacroAssembler::verify_tlab() { 3210 #ifdef ASSERT 3211 if (UseTLAB && VerifyOops) { 3212 Label next, next2, ok; 3213 Register t1 = L0; 3214 Register t2 = L1; 3215 Register t3 = L2; 3216 3217 save_frame(0); 3218 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); 3219 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2); 3220 or3(t1, t2, t3); 3221 cmp_and_br_short(t1, t2, Assembler::greaterEqual, Assembler::pn, next); 3222 STOP("assert(top >= start)"); 3223 should_not_reach_here(); 3224 3225 bind(next); 3226 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); 3227 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2); 3228 or3(t3, t2, t3); 3229 cmp_and_br_short(t1, t2, Assembler::lessEqual, Assembler::pn, next2); 3230 STOP("assert(top <= end)"); 3231 should_not_reach_here(); 3232 3233 bind(next2); 3234 and3(t3, MinObjAlignmentInBytesMask, t3); 3235 cmp_and_br_short(t3, 0, Assembler::lessEqual, Assembler::pn, ok); 3236 STOP("assert(aligned)"); 3237 should_not_reach_here(); 3238 3239 bind(ok); 3240 restore(); 3241 } 3242 #endif 3243 } 3244 3245 3246 void MacroAssembler::eden_allocate( 3247 Register obj, // result: pointer to object after successful allocation 3248 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 3249 int con_size_in_bytes, // object size in bytes if known at compile time 3250 Register t1, // temp register 3251 Register t2, // temp register 3252 Label& slow_case // continuation point if fast allocation fails 3253 ){ 3254 // make sure arguments make sense 3255 assert_different_registers(obj, var_size_in_bytes, t1, t2); 3256 assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size"); 3257 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); 3258 3259 if (!Universe::heap()->supports_inline_contig_alloc()) { 3260 // No allocation in the shared eden. 3261 ba(slow_case); 3262 delayed()->nop(); 3263 } else { 3264 // get eden boundaries 3265 // note: we need both top & top_addr! 3266 const Register top_addr = t1; 3267 const Register end = t2; 3268 3269 CollectedHeap* ch = Universe::heap(); 3270 set((intx)ch->top_addr(), top_addr); 3271 intx delta = (intx)ch->end_addr() - (intx)ch->top_addr(); 3272 ld_ptr(top_addr, delta, end); 3273 ld_ptr(top_addr, 0, obj); 3274 3275 // try to allocate 3276 Label retry; 3277 bind(retry); 3278 #ifdef ASSERT 3279 // make sure eden top is properly aligned 3280 { 3281 Label L; 3282 btst(MinObjAlignmentInBytesMask, obj); 3283 br(Assembler::zero, false, Assembler::pt, L); 3284 delayed()->nop(); 3285 STOP("eden top is not properly aligned"); 3286 bind(L); 3287 } 3288 #endif // ASSERT 3289 const Register free = end; 3290 sub(end, obj, free); // compute amount of free space 3291 if (var_size_in_bytes->is_valid()) { 3292 // size is unknown at compile time 3293 cmp(free, var_size_in_bytes); 3294 br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case 3295 delayed()->add(obj, var_size_in_bytes, end); 3296 } else { 3297 // size is known at compile time 3298 cmp(free, con_size_in_bytes); 3299 br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case 3300 delayed()->add(obj, con_size_in_bytes, end); 3301 } 3302 // Compare obj with the value at top_addr; if still equal, swap the value of 3303 // end with the value at top_addr. If not equal, read the value at top_addr 3304 // into end. 3305 cas_ptr(top_addr, obj, end); 3306 // if someone beat us on the allocation, try again, otherwise continue 3307 cmp(obj, end); 3308 brx(Assembler::notEqual, false, Assembler::pn, retry); 3309 delayed()->mov(end, obj); // nop if successfull since obj == end 3310 3311 #ifdef ASSERT 3312 // make sure eden top is properly aligned 3313 { 3314 Label L; 3315 const Register top_addr = t1; 3316 3317 set((intx)ch->top_addr(), top_addr); 3318 ld_ptr(top_addr, 0, top_addr); 3319 btst(MinObjAlignmentInBytesMask, top_addr); 3320 br(Assembler::zero, false, Assembler::pt, L); 3321 delayed()->nop(); 3322 STOP("eden top is not properly aligned"); 3323 bind(L); 3324 } 3325 #endif // ASSERT 3326 } 3327 } 3328 3329 3330 void MacroAssembler::tlab_allocate( 3331 Register obj, // result: pointer to object after successful allocation 3332 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 3333 int con_size_in_bytes, // object size in bytes if known at compile time 3334 Register t1, // temp register 3335 Label& slow_case // continuation point if fast allocation fails 3336 ){ 3337 // make sure arguments make sense 3338 assert_different_registers(obj, var_size_in_bytes, t1); 3339 assert(0 <= con_size_in_bytes && is_simm13(con_size_in_bytes), "illegal object size"); 3340 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); 3341 3342 const Register free = t1; 3343 3344 verify_tlab(); 3345 3346 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), obj); 3347 3348 // calculate amount of free space 3349 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), free); 3350 sub(free, obj, free); 3351 3352 Label done; 3353 if (var_size_in_bytes == noreg) { 3354 cmp(free, con_size_in_bytes); 3355 } else { 3356 cmp(free, var_size_in_bytes); 3357 } 3358 br(Assembler::less, false, Assembler::pn, slow_case); 3359 // calculate the new top pointer 3360 if (var_size_in_bytes == noreg) { 3361 delayed()->add(obj, con_size_in_bytes, free); 3362 } else { 3363 delayed()->add(obj, var_size_in_bytes, free); 3364 } 3365 3366 bind(done); 3367 3368 #ifdef ASSERT 3369 // make sure new free pointer is properly aligned 3370 { 3371 Label L; 3372 btst(MinObjAlignmentInBytesMask, free); 3373 br(Assembler::zero, false, Assembler::pt, L); 3374 delayed()->nop(); 3375 STOP("updated TLAB free is not properly aligned"); 3376 bind(L); 3377 } 3378 #endif // ASSERT 3379 3380 // update the tlab top pointer 3381 st_ptr(free, G2_thread, in_bytes(JavaThread::tlab_top_offset())); 3382 verify_tlab(); 3383 } 3384 3385 3386 void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case) { 3387 Register top = O0; 3388 Register t1 = G1; 3389 Register t2 = G3; 3390 Register t3 = O1; 3391 assert_different_registers(top, t1, t2, t3, G4, G5 /* preserve G4 and G5 */); 3392 Label do_refill, discard_tlab; 3393 3394 if (!Universe::heap()->supports_inline_contig_alloc()) { 3395 // No allocation in the shared eden. 3396 ba(slow_case); 3397 delayed()->nop(); 3398 } 3399 3400 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top); 3401 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t1); 3402 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), t2); 3403 3404 // calculate amount of free space 3405 sub(t1, top, t1); 3406 srl_ptr(t1, LogHeapWordSize, t1); 3407 3408 // Retain tlab and allocate object in shared space if 3409 // the amount free in the tlab is too large to discard. 3410 cmp(t1, t2); 3411 brx(Assembler::lessEqual, false, Assembler::pt, discard_tlab); 3412 3413 // increment waste limit to prevent getting stuck on this slow path 3414 delayed()->add(t2, ThreadLocalAllocBuffer::refill_waste_limit_increment(), t2); 3415 st_ptr(t2, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset())); 3416 if (TLABStats) { 3417 // increment number of slow_allocations 3418 ld(G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()), t2); 3419 add(t2, 1, t2); 3420 stw(t2, G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset())); 3421 } 3422 ba(try_eden); 3423 delayed()->nop(); 3424 3425 bind(discard_tlab); 3426 if (TLABStats) { 3427 // increment number of refills 3428 ld(G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()), t2); 3429 add(t2, 1, t2); 3430 stw(t2, G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset())); 3431 // accumulate wastage 3432 ld(G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()), t2); 3433 add(t2, t1, t2); 3434 stw(t2, G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset())); 3435 } 3436 3437 // if tlab is currently allocated (top or end != null) then 3438 // fill [top, end + alignment_reserve) with array object 3439 br_null_short(top, Assembler::pn, do_refill); 3440 3441 set((intptr_t)markOopDesc::prototype()->copy_set_hash(0x2), t2); 3442 st_ptr(t2, top, oopDesc::mark_offset_in_bytes()); // set up the mark word 3443 // set klass to intArrayKlass 3444 sub(t1, typeArrayOopDesc::header_size(T_INT), t1); 3445 add(t1, ThreadLocalAllocBuffer::alignment_reserve(), t1); 3446 sll_ptr(t1, log2_intptr(HeapWordSize/sizeof(jint)), t1); 3447 st(t1, top, arrayOopDesc::length_offset_in_bytes()); 3448 set((intptr_t)Universe::intArrayKlassObj_addr(), t2); 3449 ld_ptr(t2, 0, t2); 3450 // store klass last. concurrent gcs assumes klass length is valid if 3451 // klass field is not null. 3452 store_klass(t2, top); 3453 verify_oop(top); 3454 3455 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t1); 3456 sub(top, t1, t1); // size of tlab's allocated portion 3457 incr_allocated_bytes(t1, t2, t3); 3458 3459 // refill the tlab with an eden allocation 3460 bind(do_refill); 3461 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t1); 3462 sll_ptr(t1, LogHeapWordSize, t1); 3463 // allocate new tlab, address returned in top 3464 eden_allocate(top, t1, 0, t2, t3, slow_case); 3465 3466 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_start_offset())); 3467 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_top_offset())); 3468 #ifdef ASSERT 3469 // check that tlab_size (t1) is still valid 3470 { 3471 Label ok; 3472 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t2); 3473 sll_ptr(t2, LogHeapWordSize, t2); 3474 cmp_and_br_short(t1, t2, Assembler::equal, Assembler::pt, ok); 3475 STOP("assert(t1 == tlab_size)"); 3476 should_not_reach_here(); 3477 3478 bind(ok); 3479 } 3480 #endif // ASSERT 3481 add(top, t1, top); // t1 is tlab_size 3482 sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top); 3483 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset())); 3484 verify_tlab(); 3485 ba(retry); 3486 delayed()->nop(); 3487 } 3488 3489 void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes, 3490 Register t1, Register t2) { 3491 // Bump total bytes allocated by this thread 3492 assert(t1->is_global(), "must be global reg"); // so all 64 bits are saved on a context switch 3493 assert_different_registers(size_in_bytes.register_or_noreg(), t1, t2); 3494 // v8 support has gone the way of the dodo 3495 ldx(G2_thread, in_bytes(JavaThread::allocated_bytes_offset()), t1); 3496 add(t1, ensure_simm13_or_reg(size_in_bytes, t2), t1); 3497 stx(t1, G2_thread, in_bytes(JavaThread::allocated_bytes_offset())); 3498 } 3499 3500 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { 3501 switch (cond) { 3502 // Note some conditions are synonyms for others 3503 case Assembler::never: return Assembler::always; 3504 case Assembler::zero: return Assembler::notZero; 3505 case Assembler::lessEqual: return Assembler::greater; 3506 case Assembler::less: return Assembler::greaterEqual; 3507 case Assembler::lessEqualUnsigned: return Assembler::greaterUnsigned; 3508 case Assembler::lessUnsigned: return Assembler::greaterEqualUnsigned; 3509 case Assembler::negative: return Assembler::positive; 3510 case Assembler::overflowSet: return Assembler::overflowClear; 3511 case Assembler::always: return Assembler::never; 3512 case Assembler::notZero: return Assembler::zero; 3513 case Assembler::greater: return Assembler::lessEqual; 3514 case Assembler::greaterEqual: return Assembler::less; 3515 case Assembler::greaterUnsigned: return Assembler::lessEqualUnsigned; 3516 case Assembler::greaterEqualUnsigned: return Assembler::lessUnsigned; 3517 case Assembler::positive: return Assembler::negative; 3518 case Assembler::overflowClear: return Assembler::overflowSet; 3519 } 3520 3521 ShouldNotReachHere(); return Assembler::overflowClear; 3522 } 3523 3524 void MacroAssembler::cond_inc(Assembler::Condition cond, address counter_ptr, 3525 Register Rtmp1, Register Rtmp2 /*, Register Rtmp3, Register Rtmp4 */) { 3526 Condition negated_cond = negate_condition(cond); 3527 Label L; 3528 brx(negated_cond, false, Assembler::pt, L); 3529 delayed()->nop(); 3530 inc_counter(counter_ptr, Rtmp1, Rtmp2); 3531 bind(L); 3532 } 3533 3534 void MacroAssembler::inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2) { 3535 AddressLiteral addrlit(counter_addr); 3536 sethi(addrlit, Rtmp1); // Move hi22 bits into temporary register. 3537 Address addr(Rtmp1, addrlit.low10()); // Build an address with low10 bits. 3538 ld(addr, Rtmp2); 3539 inc(Rtmp2); 3540 st(Rtmp2, addr); 3541 } 3542 3543 void MacroAssembler::inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2) { 3544 inc_counter((address) counter_addr, Rtmp1, Rtmp2); 3545 } 3546 3547 SkipIfEqual::SkipIfEqual( 3548 MacroAssembler* masm, Register temp, const bool* flag_addr, 3549 Assembler::Condition condition) { 3550 _masm = masm; 3551 AddressLiteral flag(flag_addr); 3552 _masm->sethi(flag, temp); 3553 _masm->ldub(temp, flag.low10(), temp); 3554 _masm->tst(temp); 3555 _masm->br(condition, false, Assembler::pt, _label); 3556 _masm->delayed()->nop(); 3557 } 3558 3559 SkipIfEqual::~SkipIfEqual() { 3560 _masm->bind(_label); 3561 } 3562 3563 3564 // Writes to stack successive pages until offset reached to check for 3565 // stack overflow + shadow pages. This clobbers tsp and scratch. 3566 void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp, 3567 Register Rscratch) { 3568 // Use stack pointer in temp stack pointer 3569 mov(SP, Rtsp); 3570 3571 // Bang stack for total size given plus stack shadow page size. 3572 // Bang one page at a time because a large size can overflow yellow and 3573 // red zones (the bang will fail but stack overflow handling can't tell that 3574 // it was a stack overflow bang vs a regular segv). 3575 int offset = os::vm_page_size(); 3576 Register Roffset = Rscratch; 3577 3578 Label loop; 3579 bind(loop); 3580 set((-offset)+STACK_BIAS, Rscratch); 3581 st(G0, Rtsp, Rscratch); 3582 set(offset, Roffset); 3583 sub(Rsize, Roffset, Rsize); 3584 cmp(Rsize, G0); 3585 br(Assembler::greater, false, Assembler::pn, loop); 3586 delayed()->sub(Rtsp, Roffset, Rtsp); 3587 3588 // Bang down shadow pages too. 3589 // At this point, (tmp-0) is the last address touched, so don't 3590 // touch it again. (It was touched as (tmp-pagesize) but then tmp 3591 // was post-decremented.) Skip this address by starting at i=1, and 3592 // touch a few more pages below. N.B. It is important to touch all 3593 // the way down to and including i=StackShadowPages. 3594 for (int i = 1; i < StackShadowPages; i++) { 3595 set((-i*offset)+STACK_BIAS, Rscratch); 3596 st(G0, Rtsp, Rscratch); 3597 } 3598 } 3599 3600 /////////////////////////////////////////////////////////////////////////////////// 3601 #if INCLUDE_ALL_GCS 3602 3603 static address satb_log_enqueue_with_frame = NULL; 3604 static u_char* satb_log_enqueue_with_frame_end = NULL; 3605 3606 static address satb_log_enqueue_frameless = NULL; 3607 static u_char* satb_log_enqueue_frameless_end = NULL; 3608 3609 static int EnqueueCodeSize = 128 DEBUG_ONLY( + 256); // Instructions? 3610 3611 static void generate_satb_log_enqueue(bool with_frame) { 3612 BufferBlob* bb = BufferBlob::create("enqueue_with_frame", EnqueueCodeSize); 3613 CodeBuffer buf(bb); 3614 MacroAssembler masm(&buf); 3615 3616 #define __ masm. 3617 3618 address start = __ pc(); 3619 Register pre_val; 3620 3621 Label refill, restart; 3622 if (with_frame) { 3623 __ save_frame(0); 3624 pre_val = I0; // Was O0 before the save. 3625 } else { 3626 pre_val = O0; 3627 } 3628 3629 int satb_q_index_byte_offset = 3630 in_bytes(JavaThread::satb_mark_queue_offset() + 3631 PtrQueue::byte_offset_of_index()); 3632 3633 int satb_q_buf_byte_offset = 3634 in_bytes(JavaThread::satb_mark_queue_offset() + 3635 PtrQueue::byte_offset_of_buf()); 3636 3637 assert(in_bytes(PtrQueue::byte_width_of_index()) == sizeof(intptr_t) && 3638 in_bytes(PtrQueue::byte_width_of_buf()) == sizeof(intptr_t), 3639 "check sizes in assembly below"); 3640 3641 __ bind(restart); 3642 3643 // Load the index into the SATB buffer. PtrQueue::_index is a size_t 3644 // so ld_ptr is appropriate. 3645 __ ld_ptr(G2_thread, satb_q_index_byte_offset, L0); 3646 3647 // index == 0? 3648 __ cmp_and_brx_short(L0, G0, Assembler::equal, Assembler::pn, refill); 3649 3650 __ ld_ptr(G2_thread, satb_q_buf_byte_offset, L1); 3651 __ sub(L0, oopSize, L0); 3652 3653 __ st_ptr(pre_val, L1, L0); // [_buf + index] := I0 3654 if (!with_frame) { 3655 // Use return-from-leaf 3656 __ retl(); 3657 __ delayed()->st_ptr(L0, G2_thread, satb_q_index_byte_offset); 3658 } else { 3659 // Not delayed. 3660 __ st_ptr(L0, G2_thread, satb_q_index_byte_offset); 3661 } 3662 if (with_frame) { 3663 __ ret(); 3664 __ delayed()->restore(); 3665 } 3666 __ bind(refill); 3667 3668 address handle_zero = 3669 CAST_FROM_FN_PTR(address, 3670 &SATBMarkQueueSet::handle_zero_index_for_thread); 3671 // This should be rare enough that we can afford to save all the 3672 // scratch registers that the calling context might be using. 3673 __ mov(G1_scratch, L0); 3674 __ mov(G3_scratch, L1); 3675 __ mov(G4, L2); 3676 // We need the value of O0 above (for the write into the buffer), so we 3677 // save and restore it. 3678 __ mov(O0, L3); 3679 // Since the call will overwrite O7, we save and restore that, as well. 3680 __ mov(O7, L4); 3681 __ call_VM_leaf(L5, handle_zero, G2_thread); 3682 __ mov(L0, G1_scratch); 3683 __ mov(L1, G3_scratch); 3684 __ mov(L2, G4); 3685 __ mov(L3, O0); 3686 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); 3687 __ delayed()->mov(L4, O7); 3688 3689 if (with_frame) { 3690 satb_log_enqueue_with_frame = start; 3691 satb_log_enqueue_with_frame_end = __ pc(); 3692 } else { 3693 satb_log_enqueue_frameless = start; 3694 satb_log_enqueue_frameless_end = __ pc(); 3695 } 3696 3697 #undef __ 3698 } 3699 3700 static inline void generate_satb_log_enqueue_if_necessary(bool with_frame) { 3701 if (with_frame) { 3702 if (satb_log_enqueue_with_frame == 0) { 3703 generate_satb_log_enqueue(with_frame); 3704 assert(satb_log_enqueue_with_frame != 0, "postcondition."); 3705 } 3706 } else { 3707 if (satb_log_enqueue_frameless == 0) { 3708 generate_satb_log_enqueue(with_frame); 3709 assert(satb_log_enqueue_frameless != 0, "postcondition."); 3710 } 3711 } 3712 } 3713 3714 void MacroAssembler::g1_write_barrier_pre(Register obj, 3715 Register index, 3716 int offset, 3717 Register pre_val, 3718 Register tmp, 3719 bool preserve_o_regs) { 3720 Label filtered; 3721 3722 if (obj == noreg) { 3723 // We are not loading the previous value so make 3724 // sure that we don't trash the value in pre_val 3725 // with the code below. 3726 assert_different_registers(pre_val, tmp); 3727 } else { 3728 // We will be loading the previous value 3729 // in this code so... 3730 assert(offset == 0 || index == noreg, "choose one"); 3731 assert(pre_val == noreg, "check this code"); 3732 } 3733 3734 // Is marking active? 3735 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) { 3736 ld(G2, 3737 in_bytes(JavaThread::satb_mark_queue_offset() + 3738 PtrQueue::byte_offset_of_active()), 3739 tmp); 3740 } else { 3741 guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1, 3742 "Assumption"); 3743 ldsb(G2, 3744 in_bytes(JavaThread::satb_mark_queue_offset() + 3745 PtrQueue::byte_offset_of_active()), 3746 tmp); 3747 } 3748 3749 // Is marking active? 3750 cmp_and_br_short(tmp, G0, Assembler::equal, Assembler::pt, filtered); 3751 3752 // Do we need to load the previous value? 3753 if (obj != noreg) { 3754 // Load the previous value... 3755 if (index == noreg) { 3756 if (Assembler::is_simm13(offset)) { 3757 load_heap_oop(obj, offset, tmp); 3758 } else { 3759 set(offset, tmp); 3760 load_heap_oop(obj, tmp, tmp); 3761 } 3762 } else { 3763 load_heap_oop(obj, index, tmp); 3764 } 3765 // Previous value has been loaded into tmp 3766 pre_val = tmp; 3767 } 3768 3769 assert(pre_val != noreg, "must have a real register"); 3770 3771 // Is the previous value null? 3772 cmp_and_brx_short(pre_val, G0, Assembler::equal, Assembler::pt, filtered); 3773 3774 // OK, it's not filtered, so we'll need to call enqueue. In the normal 3775 // case, pre_val will be a scratch G-reg, but there are some cases in 3776 // which it's an O-reg. In the first case, do a normal call. In the 3777 // latter, do a save here and call the frameless version. 3778 3779 guarantee(pre_val->is_global() || pre_val->is_out(), 3780 "Or we need to think harder."); 3781 3782 if (pre_val->is_global() && !preserve_o_regs) { 3783 generate_satb_log_enqueue_if_necessary(true); // with frame 3784 3785 call(satb_log_enqueue_with_frame); 3786 delayed()->mov(pre_val, O0); 3787 } else { 3788 generate_satb_log_enqueue_if_necessary(false); // frameless 3789 3790 save_frame(0); 3791 call(satb_log_enqueue_frameless); 3792 delayed()->mov(pre_val->after_save(), O0); 3793 restore(); 3794 } 3795 3796 bind(filtered); 3797 } 3798 3799 static address dirty_card_log_enqueue = 0; 3800 static u_char* dirty_card_log_enqueue_end = 0; 3801 3802 // This gets to assume that o0 contains the object address. 3803 static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) { 3804 BufferBlob* bb = BufferBlob::create("dirty_card_enqueue", EnqueueCodeSize*2); 3805 CodeBuffer buf(bb); 3806 MacroAssembler masm(&buf); 3807 #define __ masm. 3808 address start = __ pc(); 3809 3810 Label not_already_dirty, restart, refill, young_card; 3811 3812 #ifdef _LP64 3813 __ srlx(O0, CardTableModRefBS::card_shift, O0); 3814 #else 3815 __ srl(O0, CardTableModRefBS::card_shift, O0); 3816 #endif 3817 AddressLiteral addrlit(byte_map_base); 3818 __ set(addrlit, O1); // O1 := <card table base> 3819 __ ldub(O0, O1, O2); // O2 := [O0 + O1] 3820 3821 __ cmp_and_br_short(O2, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card); 3822 3823 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 3824 __ ldub(O0, O1, O2); // O2 := [O0 + O1] 3825 3826 assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code"); 3827 __ cmp_and_br_short(O2, G0, Assembler::notEqual, Assembler::pt, not_already_dirty); 3828 3829 __ bind(young_card); 3830 // We didn't take the branch, so we're already dirty: return. 3831 // Use return-from-leaf 3832 __ retl(); 3833 __ delayed()->nop(); 3834 3835 // Not dirty. 3836 __ bind(not_already_dirty); 3837 3838 // Get O0 + O1 into a reg by itself 3839 __ add(O0, O1, O3); 3840 3841 // First, dirty it. 3842 __ stb(G0, O3, G0); // [cardPtr] := 0 (i.e., dirty). 3843 3844 int dirty_card_q_index_byte_offset = 3845 in_bytes(JavaThread::dirty_card_queue_offset() + 3846 PtrQueue::byte_offset_of_index()); 3847 int dirty_card_q_buf_byte_offset = 3848 in_bytes(JavaThread::dirty_card_queue_offset() + 3849 PtrQueue::byte_offset_of_buf()); 3850 __ bind(restart); 3851 3852 // Load the index into the update buffer. PtrQueue::_index is 3853 // a size_t so ld_ptr is appropriate here. 3854 __ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, L0); 3855 3856 // index == 0? 3857 __ cmp_and_brx_short(L0, G0, Assembler::equal, Assembler::pn, refill); 3858 3859 __ ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, L1); 3860 __ sub(L0, oopSize, L0); 3861 3862 __ st_ptr(O3, L1, L0); // [_buf + index] := I0 3863 // Use return-from-leaf 3864 __ retl(); 3865 __ delayed()->st_ptr(L0, G2_thread, dirty_card_q_index_byte_offset); 3866 3867 __ bind(refill); 3868 address handle_zero = 3869 CAST_FROM_FN_PTR(address, 3870 &DirtyCardQueueSet::handle_zero_index_for_thread); 3871 // This should be rare enough that we can afford to save all the 3872 // scratch registers that the calling context might be using. 3873 __ mov(G1_scratch, L3); 3874 __ mov(G3_scratch, L5); 3875 // We need the value of O3 above (for the write into the buffer), so we 3876 // save and restore it. 3877 __ mov(O3, L6); 3878 // Since the call will overwrite O7, we save and restore that, as well. 3879 __ mov(O7, L4); 3880 3881 __ call_VM_leaf(L7_thread_cache, handle_zero, G2_thread); 3882 __ mov(L3, G1_scratch); 3883 __ mov(L5, G3_scratch); 3884 __ mov(L6, O3); 3885 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); 3886 __ delayed()->mov(L4, O7); 3887 3888 dirty_card_log_enqueue = start; 3889 dirty_card_log_enqueue_end = __ pc(); 3890 // XXX Should have a guarantee here about not going off the end! 3891 // Does it already do so? Do an experiment... 3892 3893 #undef __ 3894 3895 } 3896 3897 static inline void 3898 generate_dirty_card_log_enqueue_if_necessary(jbyte* byte_map_base) { 3899 if (dirty_card_log_enqueue == 0) { 3900 generate_dirty_card_log_enqueue(byte_map_base); 3901 assert(dirty_card_log_enqueue != 0, "postcondition."); 3902 } 3903 } 3904 3905 3906 void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val, Register tmp) { 3907 3908 Label filtered; 3909 MacroAssembler* post_filter_masm = this; 3910 3911 if (new_val == G0) return; 3912 3913 G1SATBCardTableLoggingModRefBS* bs = 3914 barrier_set_cast<G1SATBCardTableLoggingModRefBS>(Universe::heap()->barrier_set()); 3915 3916 if (G1RSBarrierRegionFilter) { 3917 xor3(store_addr, new_val, tmp); 3918 #ifdef _LP64 3919 srlx(tmp, HeapRegion::LogOfHRGrainBytes, tmp); 3920 #else 3921 srl(tmp, HeapRegion::LogOfHRGrainBytes, tmp); 3922 #endif 3923 3924 // XXX Should I predict this taken or not? Does it matter? 3925 cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pt, filtered); 3926 } 3927 3928 // If the "store_addr" register is an "in" or "local" register, move it to 3929 // a scratch reg so we can pass it as an argument. 3930 bool use_scr = !(store_addr->is_global() || store_addr->is_out()); 3931 // Pick a scratch register different from "tmp". 3932 Register scr = (tmp == G1_scratch ? G3_scratch : G1_scratch); 3933 // Make sure we use up the delay slot! 3934 if (use_scr) { 3935 post_filter_masm->mov(store_addr, scr); 3936 } else { 3937 post_filter_masm->nop(); 3938 } 3939 generate_dirty_card_log_enqueue_if_necessary(bs->byte_map_base); 3940 save_frame(0); 3941 call(dirty_card_log_enqueue); 3942 if (use_scr) { 3943 delayed()->mov(scr, O0); 3944 } else { 3945 delayed()->mov(store_addr->after_save(), O0); 3946 } 3947 restore(); 3948 3949 bind(filtered); 3950 } 3951 3952 #endif // INCLUDE_ALL_GCS 3953 /////////////////////////////////////////////////////////////////////////////////// 3954 3955 void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_val, Register tmp) { 3956 // If we're writing constant NULL, we can skip the write barrier. 3957 if (new_val == G0) return; 3958 CardTableModRefBS* bs = 3959 barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set()); 3960 assert(bs->kind() == BarrierSet::CardTableForRS || 3961 bs->kind() == BarrierSet::CardTableExtension, "wrong barrier"); 3962 card_table_write(bs->byte_map_base, tmp, store_addr); 3963 } 3964 3965 void MacroAssembler::load_klass(Register src_oop, Register klass) { 3966 // The number of bytes in this code is used by 3967 // MachCallDynamicJavaNode::ret_addr_offset() 3968 // if this changes, change that. 3969 if (UseCompressedClassPointers) { 3970 lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass); 3971 decode_klass_not_null(klass); 3972 } else { 3973 ld_ptr(src_oop, oopDesc::klass_offset_in_bytes(), klass); 3974 } 3975 } 3976 3977 void MacroAssembler::store_klass(Register klass, Register dst_oop) { 3978 if (UseCompressedClassPointers) { 3979 assert(dst_oop != klass, "not enough registers"); 3980 encode_klass_not_null(klass); 3981 st(klass, dst_oop, oopDesc::klass_offset_in_bytes()); 3982 } else { 3983 st_ptr(klass, dst_oop, oopDesc::klass_offset_in_bytes()); 3984 } 3985 } 3986 3987 void MacroAssembler::store_klass_gap(Register s, Register d) { 3988 if (UseCompressedClassPointers) { 3989 assert(s != d, "not enough registers"); 3990 st(s, d, oopDesc::klass_gap_offset_in_bytes()); 3991 } 3992 } 3993 3994 void MacroAssembler::load_heap_oop(const Address& s, Register d) { 3995 if (UseCompressedOops) { 3996 lduw(s, d); 3997 decode_heap_oop(d); 3998 } else { 3999 ld_ptr(s, d); 4000 } 4001 } 4002 4003 void MacroAssembler::load_heap_oop(Register s1, Register s2, Register d) { 4004 if (UseCompressedOops) { 4005 lduw(s1, s2, d); 4006 decode_heap_oop(d, d); 4007 } else { 4008 ld_ptr(s1, s2, d); 4009 } 4010 } 4011 4012 void MacroAssembler::load_heap_oop(Register s1, int simm13a, Register d) { 4013 if (UseCompressedOops) { 4014 lduw(s1, simm13a, d); 4015 decode_heap_oop(d, d); 4016 } else { 4017 ld_ptr(s1, simm13a, d); 4018 } 4019 } 4020 4021 void MacroAssembler::load_heap_oop(Register s1, RegisterOrConstant s2, Register d) { 4022 if (s2.is_constant()) load_heap_oop(s1, s2.as_constant(), d); 4023 else load_heap_oop(s1, s2.as_register(), d); 4024 } 4025 4026 void MacroAssembler::store_heap_oop(Register d, Register s1, Register s2) { 4027 if (UseCompressedOops) { 4028 assert(s1 != d && s2 != d, "not enough registers"); 4029 encode_heap_oop(d); 4030 st(d, s1, s2); 4031 } else { 4032 st_ptr(d, s1, s2); 4033 } 4034 } 4035 4036 void MacroAssembler::store_heap_oop(Register d, Register s1, int simm13a) { 4037 if (UseCompressedOops) { 4038 assert(s1 != d, "not enough registers"); 4039 encode_heap_oop(d); 4040 st(d, s1, simm13a); 4041 } else { 4042 st_ptr(d, s1, simm13a); 4043 } 4044 } 4045 4046 void MacroAssembler::store_heap_oop(Register d, const Address& a, int offset) { 4047 if (UseCompressedOops) { 4048 assert(a.base() != d, "not enough registers"); 4049 encode_heap_oop(d); 4050 st(d, a, offset); 4051 } else { 4052 st_ptr(d, a, offset); 4053 } 4054 } 4055 4056 4057 void MacroAssembler::encode_heap_oop(Register src, Register dst) { 4058 assert (UseCompressedOops, "must be compressed"); 4059 assert (Universe::heap() != NULL, "java heap should be initialized"); 4060 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4061 verify_oop(src); 4062 if (Universe::narrow_oop_base() == NULL) { 4063 srlx(src, LogMinObjAlignmentInBytes, dst); 4064 return; 4065 } 4066 Label done; 4067 if (src == dst) { 4068 // optimize for frequent case src == dst 4069 bpr(rc_nz, true, Assembler::pt, src, done); 4070 delayed() -> sub(src, G6_heapbase, dst); // annuled if not taken 4071 bind(done); 4072 srlx(src, LogMinObjAlignmentInBytes, dst); 4073 } else { 4074 bpr(rc_z, false, Assembler::pn, src, done); 4075 delayed() -> mov(G0, dst); 4076 // could be moved before branch, and annulate delay, 4077 // but may add some unneeded work decoding null 4078 sub(src, G6_heapbase, dst); 4079 srlx(dst, LogMinObjAlignmentInBytes, dst); 4080 bind(done); 4081 } 4082 } 4083 4084 4085 void MacroAssembler::encode_heap_oop_not_null(Register r) { 4086 assert (UseCompressedOops, "must be compressed"); 4087 assert (Universe::heap() != NULL, "java heap should be initialized"); 4088 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4089 verify_oop(r); 4090 if (Universe::narrow_oop_base() != NULL) 4091 sub(r, G6_heapbase, r); 4092 srlx(r, LogMinObjAlignmentInBytes, r); 4093 } 4094 4095 void MacroAssembler::encode_heap_oop_not_null(Register src, Register dst) { 4096 assert (UseCompressedOops, "must be compressed"); 4097 assert (Universe::heap() != NULL, "java heap should be initialized"); 4098 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4099 verify_oop(src); 4100 if (Universe::narrow_oop_base() == NULL) { 4101 srlx(src, LogMinObjAlignmentInBytes, dst); 4102 } else { 4103 sub(src, G6_heapbase, dst); 4104 srlx(dst, LogMinObjAlignmentInBytes, dst); 4105 } 4106 } 4107 4108 // Same algorithm as oops.inline.hpp decode_heap_oop. 4109 void MacroAssembler::decode_heap_oop(Register src, Register dst) { 4110 assert (UseCompressedOops, "must be compressed"); 4111 assert (Universe::heap() != NULL, "java heap should be initialized"); 4112 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4113 sllx(src, LogMinObjAlignmentInBytes, dst); 4114 if (Universe::narrow_oop_base() != NULL) { 4115 Label done; 4116 bpr(rc_nz, true, Assembler::pt, dst, done); 4117 delayed() -> add(dst, G6_heapbase, dst); // annuled if not taken 4118 bind(done); 4119 } 4120 verify_oop(dst); 4121 } 4122 4123 void MacroAssembler::decode_heap_oop_not_null(Register r) { 4124 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 4125 // pd_code_size_limit. 4126 // Also do not verify_oop as this is called by verify_oop. 4127 assert (UseCompressedOops, "must be compressed"); 4128 assert (Universe::heap() != NULL, "java heap should be initialized"); 4129 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4130 sllx(r, LogMinObjAlignmentInBytes, r); 4131 if (Universe::narrow_oop_base() != NULL) 4132 add(r, G6_heapbase, r); 4133 } 4134 4135 void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) { 4136 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 4137 // pd_code_size_limit. 4138 // Also do not verify_oop as this is called by verify_oop. 4139 assert (UseCompressedOops, "must be compressed"); 4140 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4141 sllx(src, LogMinObjAlignmentInBytes, dst); 4142 if (Universe::narrow_oop_base() != NULL) 4143 add(dst, G6_heapbase, dst); 4144 } 4145 4146 void MacroAssembler::encode_klass_not_null(Register r) { 4147 assert (UseCompressedClassPointers, "must be compressed"); 4148 if (Universe::narrow_klass_base() != NULL) { 4149 assert(r != G6_heapbase, "bad register choice"); 4150 set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); 4151 sub(r, G6_heapbase, r); 4152 if (Universe::narrow_klass_shift() != 0) { 4153 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 4154 srlx(r, LogKlassAlignmentInBytes, r); 4155 } 4156 reinit_heapbase(); 4157 } else { 4158 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 4159 srlx(r, Universe::narrow_klass_shift(), r); 4160 } 4161 } 4162 4163 void MacroAssembler::encode_klass_not_null(Register src, Register dst) { 4164 if (src == dst) { 4165 encode_klass_not_null(src); 4166 } else { 4167 assert (UseCompressedClassPointers, "must be compressed"); 4168 if (Universe::narrow_klass_base() != NULL) { 4169 set((intptr_t)Universe::narrow_klass_base(), dst); 4170 sub(src, dst, dst); 4171 if (Universe::narrow_klass_shift() != 0) { 4172 srlx(dst, LogKlassAlignmentInBytes, dst); 4173 } 4174 } else { 4175 // shift src into dst 4176 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 4177 srlx(src, Universe::narrow_klass_shift(), dst); 4178 } 4179 } 4180 } 4181 4182 // Function instr_size_for_decode_klass_not_null() counts the instructions 4183 // generated by decode_klass_not_null() and reinit_heapbase(). Hence, if 4184 // the instructions they generate change, then this method needs to be updated. 4185 int MacroAssembler::instr_size_for_decode_klass_not_null() { 4186 assert (UseCompressedClassPointers, "only for compressed klass ptrs"); 4187 int num_instrs = 1; // shift src,dst or add 4188 if (Universe::narrow_klass_base() != NULL) { 4189 // set + add + set 4190 num_instrs += insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) + 4191 insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base()); 4192 if (Universe::narrow_klass_shift() != 0) { 4193 num_instrs += 1; // sllx 4194 } 4195 } 4196 return num_instrs * BytesPerInstWord; 4197 } 4198 4199 // !!! If the instructions that get generated here change then function 4200 // instr_size_for_decode_klass_not_null() needs to get updated. 4201 void MacroAssembler::decode_klass_not_null(Register r) { 4202 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 4203 // pd_code_size_limit. 4204 assert (UseCompressedClassPointers, "must be compressed"); 4205 if (Universe::narrow_klass_base() != NULL) { 4206 assert(r != G6_heapbase, "bad register choice"); 4207 set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); 4208 if (Universe::narrow_klass_shift() != 0) 4209 sllx(r, LogKlassAlignmentInBytes, r); 4210 add(r, G6_heapbase, r); 4211 reinit_heapbase(); 4212 } else { 4213 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 4214 sllx(r, Universe::narrow_klass_shift(), r); 4215 } 4216 } 4217 4218 void MacroAssembler::decode_klass_not_null(Register src, Register dst) { 4219 if (src == dst) { 4220 decode_klass_not_null(src); 4221 } else { 4222 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 4223 // pd_code_size_limit. 4224 assert (UseCompressedClassPointers, "must be compressed"); 4225 if (Universe::narrow_klass_base() != NULL) { 4226 if (Universe::narrow_klass_shift() != 0) { 4227 assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice"); 4228 set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); 4229 sllx(src, LogKlassAlignmentInBytes, dst); 4230 add(dst, G6_heapbase, dst); 4231 reinit_heapbase(); 4232 } else { 4233 set((intptr_t)Universe::narrow_klass_base(), dst); 4234 add(src, dst, dst); 4235 } 4236 } else { 4237 // shift/mov src into dst. 4238 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 4239 sllx(src, Universe::narrow_klass_shift(), dst); 4240 } 4241 } 4242 } 4243 4244 void MacroAssembler::reinit_heapbase() { 4245 if (UseCompressedOops || UseCompressedClassPointers) { 4246 if (Universe::heap() != NULL) { 4247 set((intptr_t)Universe::narrow_ptrs_base(), G6_heapbase); 4248 } else { 4249 AddressLiteral base(Universe::narrow_ptrs_base_addr()); 4250 load_ptr_contents(base, G6_heapbase); 4251 } 4252 } 4253 } 4254 4255 // Compare char[] arrays aligned to 4 bytes. 4256 void MacroAssembler::char_arrays_equals(Register ary1, Register ary2, 4257 Register limit, Register result, 4258 Register chr1, Register chr2, Label& Ldone) { 4259 Label Lvector, Lloop; 4260 assert(chr1 == result, "should be the same"); 4261 4262 // Note: limit contains number of bytes (2*char_elements) != 0. 4263 andcc(limit, 0x2, chr1); // trailing character ? 4264 br(Assembler::zero, false, Assembler::pt, Lvector); 4265 delayed()->nop(); 4266 4267 // compare the trailing char 4268 sub(limit, sizeof(jchar), limit); 4269 lduh(ary1, limit, chr1); 4270 lduh(ary2, limit, chr2); 4271 cmp(chr1, chr2); 4272 br(Assembler::notEqual, true, Assembler::pt, Ldone); 4273 delayed()->mov(G0, result); // not equal 4274 4275 // only one char ? 4276 cmp_zero_and_br(zero, limit, Ldone, true, Assembler::pn); 4277 delayed()->add(G0, 1, result); // zero-length arrays are equal 4278 4279 // word by word compare, dont't need alignment check 4280 bind(Lvector); 4281 // Shift ary1 and ary2 to the end of the arrays, negate limit 4282 add(ary1, limit, ary1); 4283 add(ary2, limit, ary2); 4284 neg(limit, limit); 4285 4286 lduw(ary1, limit, chr1); 4287 bind(Lloop); 4288 lduw(ary2, limit, chr2); 4289 cmp(chr1, chr2); 4290 br(Assembler::notEqual, true, Assembler::pt, Ldone); 4291 delayed()->mov(G0, result); // not equal 4292 inccc(limit, 2*sizeof(jchar)); 4293 // annul LDUW if branch is not taken to prevent access past end of array 4294 br(Assembler::notZero, true, Assembler::pt, Lloop); 4295 delayed()->lduw(ary1, limit, chr1); // hoisted 4296 4297 // Caller should set it: 4298 // add(G0, 1, result); // equals 4299 } 4300 4301 // Use BIS for zeroing (count is in bytes). 4302 void MacroAssembler::bis_zeroing(Register to, Register count, Register temp, Label& Ldone) { 4303 assert(UseBlockZeroing && VM_Version::has_block_zeroing(), "only works with BIS zeroing"); 4304 Register end = count; 4305 int cache_line_size = VM_Version::prefetch_data_size(); 4306 // Minimum count when BIS zeroing can be used since 4307 // it needs membar which is expensive. 4308 int block_zero_size = MAX2(cache_line_size*3, (int)BlockZeroingLowLimit); 4309 4310 Label small_loop; 4311 // Check if count is negative (dead code) or zero. 4312 // Note, count uses 64bit in 64 bit VM. 4313 cmp_and_brx_short(count, 0, Assembler::lessEqual, Assembler::pn, Ldone); 4314 4315 // Use BIS zeroing only for big arrays since it requires membar. 4316 if (Assembler::is_simm13(block_zero_size)) { // < 4096 4317 cmp(count, block_zero_size); 4318 } else { 4319 set(block_zero_size, temp); 4320 cmp(count, temp); 4321 } 4322 br(Assembler::lessUnsigned, false, Assembler::pt, small_loop); 4323 delayed()->add(to, count, end); 4324 4325 // Note: size is >= three (32 bytes) cache lines. 4326 4327 // Clean the beginning of space up to next cache line. 4328 for (int offs = 0; offs < cache_line_size; offs += 8) { 4329 stx(G0, to, offs); 4330 } 4331 4332 // align to next cache line 4333 add(to, cache_line_size, to); 4334 and3(to, -cache_line_size, to); 4335 4336 // Note: size left >= two (32 bytes) cache lines. 4337 4338 // BIS should not be used to zero tail (64 bytes) 4339 // to avoid zeroing a header of the following object. 4340 sub(end, (cache_line_size*2)-8, end); 4341 4342 Label bis_loop; 4343 bind(bis_loop); 4344 stxa(G0, to, G0, Assembler::ASI_ST_BLKINIT_PRIMARY); 4345 add(to, cache_line_size, to); 4346 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, bis_loop); 4347 4348 // BIS needs membar. 4349 membar(Assembler::StoreLoad); 4350 4351 add(end, (cache_line_size*2)-8, end); // restore end 4352 cmp_and_brx_short(to, end, Assembler::greaterEqualUnsigned, Assembler::pn, Ldone); 4353 4354 // Clean the tail. 4355 bind(small_loop); 4356 stx(G0, to, 0); 4357 add(to, 8, to); 4358 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, small_loop); 4359 nop(); // Separate short branches 4360 } 4361 4362 /** 4363 * Update CRC-32[C] with a byte value according to constants in table 4364 * 4365 * @param [in,out]crc Register containing the crc. 4366 * @param [in]val Register containing the byte to fold into the CRC. 4367 * @param [in]table Register containing the table of crc constants. 4368 * 4369 * uint32_t crc; 4370 * val = crc_table[(val ^ crc) & 0xFF]; 4371 * crc = val ^ (crc >> 8); 4372 */ 4373 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 4374 xor3(val, crc, val); 4375 and3(val, 0xFF, val); 4376 sllx(val, 2, val); 4377 lduw(table, val, val); 4378 srlx(crc, 8, crc); 4379 xor3(val, crc, crc); 4380 } 4381 4382 // Reverse byte order of lower 32 bits, assuming upper 32 bits all zeros 4383 void MacroAssembler::reverse_bytes_32(Register src, Register dst, Register tmp) { 4384 srlx(src, 24, dst); 4385 4386 sllx(src, 32+8, tmp); 4387 srlx(tmp, 32+24, tmp); 4388 sllx(tmp, 8, tmp); 4389 or3(dst, tmp, dst); 4390 4391 sllx(src, 32+16, tmp); 4392 srlx(tmp, 32+24, tmp); 4393 sllx(tmp, 16, tmp); 4394 or3(dst, tmp, dst); 4395 4396 sllx(src, 32+24, tmp); 4397 srlx(tmp, 32, tmp); 4398 or3(dst, tmp, dst); 4399 } 4400 4401 void MacroAssembler::movitof_revbytes(Register src, FloatRegister dst, Register tmp1, Register tmp2) { 4402 reverse_bytes_32(src, tmp1, tmp2); 4403 movxtod(tmp1, dst); 4404 } 4405 4406 void MacroAssembler::movftoi_revbytes(FloatRegister src, Register dst, Register tmp1, Register tmp2) { 4407 movdtox(src, tmp1); 4408 reverse_bytes_32(tmp1, dst, tmp2); 4409 }