1 /* 2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.inline.hpp" 27 #include "compiler/disassembler.hpp" 28 #include "gc_interface/collectedHeap.inline.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "memory/cardTableModRefBS.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "prims/methodHandles.hpp" 33 #include "runtime/biasedLocking.hpp" 34 #include "runtime/interfaceSupport.hpp" 35 #include "runtime/objectMonitor.hpp" 36 #include "runtime/os.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubRoutines.hpp" 39 #include "utilities/macros.hpp" 40 #if INCLUDE_ALL_GCS 41 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 42 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" 43 #include "gc_implementation/g1/heapRegion.hpp" 44 #endif // INCLUDE_ALL_GCS 45 46 #ifdef PRODUCT 47 #define BLOCK_COMMENT(str) /* nothing */ 48 #define STOP(error) stop(error) 49 #else 50 #define BLOCK_COMMENT(str) block_comment(str) 51 #define STOP(error) block_comment(error); stop(error) 52 #endif 53 54 // Convert the raw encoding form into the form expected by the 55 // constructor for Address. 56 Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) { 57 assert(scale == 0, "not supported"); 58 RelocationHolder rspec; 59 if (disp_reloc != relocInfo::none) { 60 rspec = Relocation::spec_simple(disp_reloc); 61 } 62 63 Register rindex = as_Register(index); 64 if (rindex != G0) { 65 Address madr(as_Register(base), rindex); 66 madr._rspec = rspec; 67 return madr; 68 } else { 69 Address madr(as_Register(base), disp); 70 madr._rspec = rspec; 71 return madr; 72 } 73 } 74 75 Address Argument::address_in_frame() const { 76 // Warning: In LP64 mode disp will occupy more than 10 bits, but 77 // op codes such as ld or ldx, only access disp() to get 78 // their simm13 argument. 79 int disp = ((_number - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS; 80 if (is_in()) 81 return Address(FP, disp); // In argument. 82 else 83 return Address(SP, disp); // Out argument. 84 } 85 86 static const char* argumentNames[][2] = { 87 {"A0","P0"}, {"A1","P1"}, {"A2","P2"}, {"A3","P3"}, {"A4","P4"}, 88 {"A5","P5"}, {"A6","P6"}, {"A7","P7"}, {"A8","P8"}, {"A9","P9"}, 89 {"A(n>9)","P(n>9)"} 90 }; 91 92 const char* Argument::name() const { 93 int nofArgs = sizeof argumentNames / sizeof argumentNames[0]; 94 int num = number(); 95 if (num >= nofArgs) num = nofArgs - 1; 96 return argumentNames[num][is_in() ? 1 : 0]; 97 } 98 99 #ifdef ASSERT 100 // On RISC, there's no benefit to verifying instruction boundaries. 101 bool AbstractAssembler::pd_check_instruction_mark() { return false; } 102 #endif 103 104 // Patch instruction inst at offset inst_pos to refer to dest_pos 105 // and return the resulting instruction. 106 // We should have pcs, not offsets, but since all is relative, it will work out 107 // OK. 108 int MacroAssembler::patched_branch(int dest_pos, int inst, int inst_pos) { 109 int m; // mask for displacement field 110 int v; // new value for displacement field 111 const int word_aligned_ones = -4; 112 switch (inv_op(inst)) { 113 default: ShouldNotReachHere(); 114 case call_op: m = wdisp(word_aligned_ones, 0, 30); v = wdisp(dest_pos, inst_pos, 30); break; 115 case branch_op: 116 switch (inv_op2(inst)) { 117 case fbp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; 118 case bp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; 119 case fb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; 120 case br_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; 121 case cb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; 122 case bpr_op2: { 123 if (is_cbcond(inst)) { 124 m = wdisp10(word_aligned_ones, 0); 125 v = wdisp10(dest_pos, inst_pos); 126 } else { 127 m = wdisp16(word_aligned_ones, 0); 128 v = wdisp16(dest_pos, inst_pos); 129 } 130 break; 131 } 132 default: ShouldNotReachHere(); 133 } 134 } 135 return inst & ~m | v; 136 } 137 138 // Return the offset of the branch destionation of instruction inst 139 // at offset pos. 140 // Should have pcs, but since all is relative, it works out. 141 int MacroAssembler::branch_destination(int inst, int pos) { 142 int r; 143 switch (inv_op(inst)) { 144 default: ShouldNotReachHere(); 145 case call_op: r = inv_wdisp(inst, pos, 30); break; 146 case branch_op: 147 switch (inv_op2(inst)) { 148 case fbp_op2: r = inv_wdisp( inst, pos, 19); break; 149 case bp_op2: r = inv_wdisp( inst, pos, 19); break; 150 case fb_op2: r = inv_wdisp( inst, pos, 22); break; 151 case br_op2: r = inv_wdisp( inst, pos, 22); break; 152 case cb_op2: r = inv_wdisp( inst, pos, 22); break; 153 case bpr_op2: { 154 if (is_cbcond(inst)) { 155 r = inv_wdisp10(inst, pos); 156 } else { 157 r = inv_wdisp16(inst, pos); 158 } 159 break; 160 } 161 default: ShouldNotReachHere(); 162 } 163 } 164 return r; 165 } 166 167 void MacroAssembler::null_check(Register reg, int offset) { 168 if (needs_explicit_null_check((intptr_t)offset)) { 169 // provoke OS NULL exception if reg = NULL by 170 // accessing M[reg] w/o changing any registers 171 ld_ptr(reg, 0, G0); 172 } 173 else { 174 // nothing to do, (later) access of M[reg + offset] 175 // will provoke OS NULL exception if reg = NULL 176 } 177 } 178 179 // Ring buffer jumps 180 181 #ifndef PRODUCT 182 void MacroAssembler::ret( bool trace ) { if (trace) { 183 mov(I7, O7); // traceable register 184 JMP(O7, 2 * BytesPerInstWord); 185 } else { 186 jmpl( I7, 2 * BytesPerInstWord, G0 ); 187 } 188 } 189 190 void MacroAssembler::retl( bool trace ) { if (trace) JMP(O7, 2 * BytesPerInstWord); 191 else jmpl( O7, 2 * BytesPerInstWord, G0 ); } 192 #endif /* PRODUCT */ 193 194 195 void MacroAssembler::jmp2(Register r1, Register r2, const char* file, int line ) { 196 assert_not_delayed(); 197 // This can only be traceable if r1 & r2 are visible after a window save 198 if (TraceJumps) { 199 #ifndef PRODUCT 200 save_frame(0); 201 verify_thread(); 202 ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0); 203 add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1); 204 sll(O0, exact_log2(4*sizeof(intptr_t)), O2); 205 add(O2, O1, O1); 206 207 add(r1->after_save(), r2->after_save(), O2); 208 set((intptr_t)file, O3); 209 set(line, O4); 210 Label L; 211 // get nearby pc, store jmp target 212 call(L, relocInfo::none); // No relocation for call to pc+0x8 213 delayed()->st(O2, O1, 0); 214 bind(L); 215 216 // store nearby pc 217 st(O7, O1, sizeof(intptr_t)); 218 // store file 219 st(O3, O1, 2*sizeof(intptr_t)); 220 // store line 221 st(O4, O1, 3*sizeof(intptr_t)); 222 add(O0, 1, O0); 223 and3(O0, JavaThread::jump_ring_buffer_size - 1, O0); 224 st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset())); 225 restore(); 226 #endif /* PRODUCT */ 227 } 228 jmpl(r1, r2, G0); 229 } 230 void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) { 231 assert_not_delayed(); 232 // This can only be traceable if r1 is visible after a window save 233 if (TraceJumps) { 234 #ifndef PRODUCT 235 save_frame(0); 236 verify_thread(); 237 ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0); 238 add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1); 239 sll(O0, exact_log2(4*sizeof(intptr_t)), O2); 240 add(O2, O1, O1); 241 242 add(r1->after_save(), offset, O2); 243 set((intptr_t)file, O3); 244 set(line, O4); 245 Label L; 246 // get nearby pc, store jmp target 247 call(L, relocInfo::none); // No relocation for call to pc+0x8 248 delayed()->st(O2, O1, 0); 249 bind(L); 250 251 // store nearby pc 252 st(O7, O1, sizeof(intptr_t)); 253 // store file 254 st(O3, O1, 2*sizeof(intptr_t)); 255 // store line 256 st(O4, O1, 3*sizeof(intptr_t)); 257 add(O0, 1, O0); 258 and3(O0, JavaThread::jump_ring_buffer_size - 1, O0); 259 st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset())); 260 restore(); 261 #endif /* PRODUCT */ 262 } 263 jmp(r1, offset); 264 } 265 266 // This code sequence is relocatable to any address, even on LP64. 267 void MacroAssembler::jumpl(const AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line) { 268 assert_not_delayed(); 269 // Force fixed length sethi because NativeJump and NativeFarCall don't handle 270 // variable length instruction streams. 271 patchable_sethi(addrlit, temp); 272 Address a(temp, addrlit.low10() + offset); // Add the offset to the displacement. 273 if (TraceJumps) { 274 #ifndef PRODUCT 275 // Must do the add here so relocation can find the remainder of the 276 // value to be relocated. 277 add(a.base(), a.disp(), a.base(), addrlit.rspec(offset)); 278 save_frame(0); 279 verify_thread(); 280 ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0); 281 add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1); 282 sll(O0, exact_log2(4*sizeof(intptr_t)), O2); 283 add(O2, O1, O1); 284 285 set((intptr_t)file, O3); 286 set(line, O4); 287 Label L; 288 289 // get nearby pc, store jmp target 290 call(L, relocInfo::none); // No relocation for call to pc+0x8 291 delayed()->st(a.base()->after_save(), O1, 0); 292 bind(L); 293 294 // store nearby pc 295 st(O7, O1, sizeof(intptr_t)); 296 // store file 297 st(O3, O1, 2*sizeof(intptr_t)); 298 // store line 299 st(O4, O1, 3*sizeof(intptr_t)); 300 add(O0, 1, O0); 301 and3(O0, JavaThread::jump_ring_buffer_size - 1, O0); 302 st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset())); 303 restore(); 304 jmpl(a.base(), G0, d); 305 #else 306 jmpl(a.base(), a.disp(), d); 307 #endif /* PRODUCT */ 308 } else { 309 jmpl(a.base(), a.disp(), d); 310 } 311 } 312 313 void MacroAssembler::jump(const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line) { 314 jumpl(addrlit, temp, G0, offset, file, line); 315 } 316 317 318 // Conditional breakpoint (for assertion checks in assembly code) 319 void MacroAssembler::breakpoint_trap(Condition c, CC cc) { 320 trap(c, cc, G0, ST_RESERVED_FOR_USER_0); 321 } 322 323 // We want to use ST_BREAKPOINT here, but the debugger is confused by it. 324 void MacroAssembler::breakpoint_trap() { 325 trap(ST_RESERVED_FOR_USER_0); 326 } 327 328 // flush windows (except current) using flushw instruction if avail. 329 void MacroAssembler::flush_windows() { 330 if (VM_Version::v9_instructions_work()) flushw(); 331 else flush_windows_trap(); 332 } 333 334 // Write serialization page so VM thread can do a pseudo remote membar 335 // We use the current thread pointer to calculate a thread specific 336 // offset to write to within the page. This minimizes bus traffic 337 // due to cache line collision. 338 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) { 339 srl(thread, os::get_serialize_page_shift_count(), tmp2); 340 if (Assembler::is_simm13(os::vm_page_size())) { 341 and3(tmp2, (os::vm_page_size() - sizeof(int)), tmp2); 342 } 343 else { 344 set((os::vm_page_size() - sizeof(int)), tmp1); 345 and3(tmp2, tmp1, tmp2); 346 } 347 set(os::get_memory_serialize_page(), tmp1); 348 st(G0, tmp1, tmp2); 349 } 350 351 352 353 void MacroAssembler::enter() { 354 Unimplemented(); 355 } 356 357 void MacroAssembler::leave() { 358 Unimplemented(); 359 } 360 361 void MacroAssembler::mult(Register s1, Register s2, Register d) { 362 if(VM_Version::v9_instructions_work()) { 363 mulx (s1, s2, d); 364 } else { 365 smul (s1, s2, d); 366 } 367 } 368 369 void MacroAssembler::mult(Register s1, int simm13a, Register d) { 370 if(VM_Version::v9_instructions_work()) { 371 mulx (s1, simm13a, d); 372 } else { 373 smul (s1, simm13a, d); 374 } 375 } 376 377 378 #ifdef ASSERT 379 void MacroAssembler::read_ccr_v8_assert(Register ccr_save) { 380 const Register s1 = G3_scratch; 381 const Register s2 = G4_scratch; 382 Label get_psr_test; 383 // Get the condition codes the V8 way. 384 read_ccr_trap(s1); 385 mov(ccr_save, s2); 386 // This is a test of V8 which has icc but not xcc 387 // so mask off the xcc bits 388 and3(s2, 0xf, s2); 389 // Compare condition codes from the V8 and V9 ways. 390 subcc(s2, s1, G0); 391 br(Assembler::notEqual, true, Assembler::pt, get_psr_test); 392 delayed()->breakpoint_trap(); 393 bind(get_psr_test); 394 } 395 396 void MacroAssembler::write_ccr_v8_assert(Register ccr_save) { 397 const Register s1 = G3_scratch; 398 const Register s2 = G4_scratch; 399 Label set_psr_test; 400 // Write out the saved condition codes the V8 way 401 write_ccr_trap(ccr_save, s1, s2); 402 // Read back the condition codes using the V9 instruction 403 rdccr(s1); 404 mov(ccr_save, s2); 405 // This is a test of V8 which has icc but not xcc 406 // so mask off the xcc bits 407 and3(s2, 0xf, s2); 408 and3(s1, 0xf, s1); 409 // Compare the V8 way with the V9 way. 410 subcc(s2, s1, G0); 411 br(Assembler::notEqual, true, Assembler::pt, set_psr_test); 412 delayed()->breakpoint_trap(); 413 bind(set_psr_test); 414 } 415 #else 416 #define read_ccr_v8_assert(x) 417 #define write_ccr_v8_assert(x) 418 #endif // ASSERT 419 420 void MacroAssembler::read_ccr(Register ccr_save) { 421 if (VM_Version::v9_instructions_work()) { 422 rdccr(ccr_save); 423 // Test code sequence used on V8. Do not move above rdccr. 424 read_ccr_v8_assert(ccr_save); 425 } else { 426 read_ccr_trap(ccr_save); 427 } 428 } 429 430 void MacroAssembler::write_ccr(Register ccr_save) { 431 if (VM_Version::v9_instructions_work()) { 432 // Test code sequence used on V8. Do not move below wrccr. 433 write_ccr_v8_assert(ccr_save); 434 wrccr(ccr_save); 435 } else { 436 const Register temp_reg1 = G3_scratch; 437 const Register temp_reg2 = G4_scratch; 438 write_ccr_trap(ccr_save, temp_reg1, temp_reg2); 439 } 440 } 441 442 443 // Calls to C land 444 445 #ifdef ASSERT 446 // a hook for debugging 447 static Thread* reinitialize_thread() { 448 return ThreadLocalStorage::thread(); 449 } 450 #else 451 #define reinitialize_thread ThreadLocalStorage::thread 452 #endif 453 454 #ifdef ASSERT 455 address last_get_thread = NULL; 456 #endif 457 458 // call this when G2_thread is not known to be valid 459 void MacroAssembler::get_thread() { 460 save_frame(0); // to avoid clobbering O0 461 mov(G1, L0); // avoid clobbering G1 462 mov(G5_method, L1); // avoid clobbering G5 463 mov(G3, L2); // avoid clobbering G3 also 464 mov(G4, L5); // avoid clobbering G4 465 #ifdef ASSERT 466 AddressLiteral last_get_thread_addrlit(&last_get_thread); 467 set(last_get_thread_addrlit, L3); 468 inc(L4, get_pc(L4) + 2 * BytesPerInstWord); // skip getpc() code + inc + st_ptr to point L4 at call 469 st_ptr(L4, L3, 0); 470 #endif 471 call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type); 472 delayed()->nop(); 473 mov(L0, G1); 474 mov(L1, G5_method); 475 mov(L2, G3); 476 mov(L5, G4); 477 restore(O0, 0, G2_thread); 478 } 479 480 static Thread* verify_thread_subroutine(Thread* gthread_value) { 481 Thread* correct_value = ThreadLocalStorage::thread(); 482 guarantee(gthread_value == correct_value, "G2_thread value must be the thread"); 483 return correct_value; 484 } 485 486 void MacroAssembler::verify_thread() { 487 if (VerifyThread) { 488 // NOTE: this chops off the heads of the 64-bit O registers. 489 #ifdef CC_INTERP 490 save_frame(0); 491 #else 492 // make sure G2_thread contains the right value 493 save_frame_and_mov(0, Lmethod, Lmethod); // to avoid clobbering O0 (and propagate Lmethod for -Xprof) 494 mov(G1, L1); // avoid clobbering G1 495 // G2 saved below 496 mov(G3, L3); // avoid clobbering G3 497 mov(G4, L4); // avoid clobbering G4 498 mov(G5_method, L5); // avoid clobbering G5_method 499 #endif /* CC_INTERP */ 500 #if defined(COMPILER2) && !defined(_LP64) 501 // Save & restore possible 64-bit Long arguments in G-regs 502 srlx(G1,32,L0); 503 srlx(G4,32,L6); 504 #endif 505 call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type); 506 delayed()->mov(G2_thread, O0); 507 508 mov(L1, G1); // Restore G1 509 // G2 restored below 510 mov(L3, G3); // restore G3 511 mov(L4, G4); // restore G4 512 mov(L5, G5_method); // restore G5_method 513 #if defined(COMPILER2) && !defined(_LP64) 514 // Save & restore possible 64-bit Long arguments in G-regs 515 sllx(L0,32,G2); // Move old high G1 bits high in G2 516 srl(G1, 0,G1); // Clear current high G1 bits 517 or3 (G1,G2,G1); // Recover 64-bit G1 518 sllx(L6,32,G2); // Move old high G4 bits high in G2 519 srl(G4, 0,G4); // Clear current high G4 bits 520 or3 (G4,G2,G4); // Recover 64-bit G4 521 #endif 522 restore(O0, 0, G2_thread); 523 } 524 } 525 526 527 void MacroAssembler::save_thread(const Register thread_cache) { 528 verify_thread(); 529 if (thread_cache->is_valid()) { 530 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); 531 mov(G2_thread, thread_cache); 532 } 533 if (VerifyThread) { 534 // smash G2_thread, as if the VM were about to anyway 535 set(0x67676767, G2_thread); 536 } 537 } 538 539 540 void MacroAssembler::restore_thread(const Register thread_cache) { 541 if (thread_cache->is_valid()) { 542 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); 543 mov(thread_cache, G2_thread); 544 verify_thread(); 545 } else { 546 // do it the slow way 547 get_thread(); 548 } 549 } 550 551 552 // %%% maybe get rid of [re]set_last_Java_frame 553 void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Java_pc) { 554 assert_not_delayed(); 555 Address flags(G2_thread, JavaThread::frame_anchor_offset() + 556 JavaFrameAnchor::flags_offset()); 557 Address pc_addr(G2_thread, JavaThread::last_Java_pc_offset()); 558 559 // Always set last_Java_pc and flags first because once last_Java_sp is visible 560 // has_last_Java_frame is true and users will look at the rest of the fields. 561 // (Note: flags should always be zero before we get here so doesn't need to be set.) 562 563 #ifdef ASSERT 564 // Verify that flags was zeroed on return to Java 565 Label PcOk; 566 save_frame(0); // to avoid clobbering O0 567 ld_ptr(pc_addr, L0); 568 br_null_short(L0, Assembler::pt, PcOk); 569 STOP("last_Java_pc not zeroed before leaving Java"); 570 bind(PcOk); 571 572 // Verify that flags was zeroed on return to Java 573 Label FlagsOk; 574 ld(flags, L0); 575 tst(L0); 576 br(Assembler::zero, false, Assembler::pt, FlagsOk); 577 delayed() -> restore(); 578 STOP("flags not zeroed before leaving Java"); 579 bind(FlagsOk); 580 #endif /* ASSERT */ 581 // 582 // When returning from calling out from Java mode the frame anchor's last_Java_pc 583 // will always be set to NULL. It is set here so that if we are doing a call to 584 // native (not VM) that we capture the known pc and don't have to rely on the 585 // native call having a standard frame linkage where we can find the pc. 586 587 if (last_Java_pc->is_valid()) { 588 st_ptr(last_Java_pc, pc_addr); 589 } 590 591 #ifdef _LP64 592 #ifdef ASSERT 593 // Make sure that we have an odd stack 594 Label StackOk; 595 andcc(last_java_sp, 0x01, G0); 596 br(Assembler::notZero, false, Assembler::pt, StackOk); 597 delayed()->nop(); 598 STOP("Stack Not Biased in set_last_Java_frame"); 599 bind(StackOk); 600 #endif // ASSERT 601 assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame"); 602 add( last_java_sp, STACK_BIAS, G4_scratch ); 603 st_ptr(G4_scratch, G2_thread, JavaThread::last_Java_sp_offset()); 604 #else 605 st_ptr(last_java_sp, G2_thread, JavaThread::last_Java_sp_offset()); 606 #endif // _LP64 607 } 608 609 void MacroAssembler::reset_last_Java_frame(void) { 610 assert_not_delayed(); 611 612 Address sp_addr(G2_thread, JavaThread::last_Java_sp_offset()); 613 Address pc_addr(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); 614 Address flags (G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); 615 616 #ifdef ASSERT 617 // check that it WAS previously set 618 #ifdef CC_INTERP 619 save_frame(0); 620 #else 621 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod to helper frame for -Xprof 622 #endif /* CC_INTERP */ 623 ld_ptr(sp_addr, L0); 624 tst(L0); 625 breakpoint_trap(Assembler::zero, Assembler::ptr_cc); 626 restore(); 627 #endif // ASSERT 628 629 st_ptr(G0, sp_addr); 630 // Always return last_Java_pc to zero 631 st_ptr(G0, pc_addr); 632 // Always null flags after return to Java 633 st(G0, flags); 634 } 635 636 637 void MacroAssembler::call_VM_base( 638 Register oop_result, 639 Register thread_cache, 640 Register last_java_sp, 641 address entry_point, 642 int number_of_arguments, 643 bool check_exceptions) 644 { 645 assert_not_delayed(); 646 647 // determine last_java_sp register 648 if (!last_java_sp->is_valid()) { 649 last_java_sp = SP; 650 } 651 // debugging support 652 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 653 654 // 64-bit last_java_sp is biased! 655 set_last_Java_frame(last_java_sp, noreg); 656 if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early 657 save_thread(thread_cache); 658 // do the call 659 call(entry_point, relocInfo::runtime_call_type); 660 if (!VerifyThread) 661 delayed()->mov(G2_thread, O0); // pass thread as first argument 662 else 663 delayed()->nop(); // (thread already passed) 664 restore_thread(thread_cache); 665 reset_last_Java_frame(); 666 667 // check for pending exceptions. use Gtemp as scratch register. 668 if (check_exceptions) { 669 check_and_forward_exception(Gtemp); 670 } 671 672 #ifdef ASSERT 673 set(badHeapWordVal, G3); 674 set(badHeapWordVal, G4); 675 set(badHeapWordVal, G5); 676 #endif 677 678 // get oop result if there is one and reset the value in the thread 679 if (oop_result->is_valid()) { 680 get_vm_result(oop_result); 681 } 682 } 683 684 void MacroAssembler::check_and_forward_exception(Register scratch_reg) 685 { 686 Label L; 687 688 check_and_handle_popframe(scratch_reg); 689 check_and_handle_earlyret(scratch_reg); 690 691 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 692 ld_ptr(exception_addr, scratch_reg); 693 br_null_short(scratch_reg, pt, L); 694 // we use O7 linkage so that forward_exception_entry has the issuing PC 695 call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 696 delayed()->nop(); 697 bind(L); 698 } 699 700 701 void MacroAssembler::check_and_handle_popframe(Register scratch_reg) { 702 } 703 704 705 void MacroAssembler::check_and_handle_earlyret(Register scratch_reg) { 706 } 707 708 709 void MacroAssembler::call_VM(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 710 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); 711 } 712 713 714 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) { 715 // O0 is reserved for the thread 716 mov(arg_1, O1); 717 call_VM(oop_result, entry_point, 1, check_exceptions); 718 } 719 720 721 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 722 // O0 is reserved for the thread 723 mov(arg_1, O1); 724 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 725 call_VM(oop_result, entry_point, 2, check_exceptions); 726 } 727 728 729 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { 730 // O0 is reserved for the thread 731 mov(arg_1, O1); 732 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 733 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); 734 call_VM(oop_result, entry_point, 3, check_exceptions); 735 } 736 737 738 739 // Note: The following call_VM overloadings are useful when a "save" 740 // has already been performed by a stub, and the last Java frame is 741 // the previous one. In that case, last_java_sp must be passed as FP 742 // instead of SP. 743 744 745 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments, bool check_exceptions) { 746 call_VM_base(oop_result, noreg, last_java_sp, entry_point, number_of_arguments, check_exceptions); 747 } 748 749 750 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) { 751 // O0 is reserved for the thread 752 mov(arg_1, O1); 753 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 754 } 755 756 757 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 758 // O0 is reserved for the thread 759 mov(arg_1, O1); 760 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 761 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 762 } 763 764 765 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { 766 // O0 is reserved for the thread 767 mov(arg_1, O1); 768 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 769 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); 770 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 771 } 772 773 774 775 void MacroAssembler::call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments) { 776 assert_not_delayed(); 777 save_thread(thread_cache); 778 // do the call 779 call(entry_point, relocInfo::runtime_call_type); 780 delayed()->nop(); 781 restore_thread(thread_cache); 782 #ifdef ASSERT 783 set(badHeapWordVal, G3); 784 set(badHeapWordVal, G4); 785 set(badHeapWordVal, G5); 786 #endif 787 } 788 789 790 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments) { 791 call_VM_leaf_base(thread_cache, entry_point, number_of_arguments); 792 } 793 794 795 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1) { 796 mov(arg_1, O0); 797 call_VM_leaf(thread_cache, entry_point, 1); 798 } 799 800 801 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) { 802 mov(arg_1, O0); 803 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); 804 call_VM_leaf(thread_cache, entry_point, 2); 805 } 806 807 808 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3) { 809 mov(arg_1, O0); 810 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); 811 mov(arg_3, O2); assert(arg_3 != O0 && arg_3 != O1, "smashed argument"); 812 call_VM_leaf(thread_cache, entry_point, 3); 813 } 814 815 816 void MacroAssembler::get_vm_result(Register oop_result) { 817 verify_thread(); 818 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 819 ld_ptr( vm_result_addr, oop_result); 820 st_ptr(G0, vm_result_addr); 821 verify_oop(oop_result); 822 } 823 824 825 void MacroAssembler::get_vm_result_2(Register metadata_result) { 826 verify_thread(); 827 Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset()); 828 ld_ptr(vm_result_addr_2, metadata_result); 829 st_ptr(G0, vm_result_addr_2); 830 } 831 832 833 // We require that C code which does not return a value in vm_result will 834 // leave it undisturbed. 835 void MacroAssembler::set_vm_result(Register oop_result) { 836 verify_thread(); 837 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 838 verify_oop(oop_result); 839 840 # ifdef ASSERT 841 // Check that we are not overwriting any other oop. 842 #ifdef CC_INTERP 843 save_frame(0); 844 #else 845 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod for -Xprof 846 #endif /* CC_INTERP */ 847 ld_ptr(vm_result_addr, L0); 848 tst(L0); 849 restore(); 850 breakpoint_trap(notZero, Assembler::ptr_cc); 851 // } 852 # endif 853 854 st_ptr(oop_result, vm_result_addr); 855 } 856 857 858 void MacroAssembler::ic_call(address entry, bool emit_delay) { 859 RelocationHolder rspec = virtual_call_Relocation::spec(pc()); 860 patchable_set((intptr_t)Universe::non_oop_word(), G5_inline_cache_reg); 861 relocate(rspec); 862 call(entry, relocInfo::none); 863 if (emit_delay) { 864 delayed()->nop(); 865 } 866 } 867 868 869 void MacroAssembler::card_table_write(jbyte* byte_map_base, 870 Register tmp, Register obj) { 871 #ifdef _LP64 872 srlx(obj, CardTableModRefBS::card_shift, obj); 873 #else 874 srl(obj, CardTableModRefBS::card_shift, obj); 875 #endif 876 assert(tmp != obj, "need separate temp reg"); 877 set((address) byte_map_base, tmp); 878 stb(G0, tmp, obj); 879 } 880 881 882 void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { 883 address save_pc; 884 int shiftcnt; 885 #ifdef _LP64 886 # ifdef CHECK_DELAY 887 assert_not_delayed((char*) "cannot put two instructions in delay slot"); 888 # endif 889 v9_dep(); 890 save_pc = pc(); 891 892 int msb32 = (int) (addrlit.value() >> 32); 893 int lsb32 = (int) (addrlit.value()); 894 895 if (msb32 == 0 && lsb32 >= 0) { 896 Assembler::sethi(lsb32, d, addrlit.rspec()); 897 } 898 else if (msb32 == -1) { 899 Assembler::sethi(~lsb32, d, addrlit.rspec()); 900 xor3(d, ~low10(~0), d); 901 } 902 else { 903 Assembler::sethi(msb32, d, addrlit.rspec()); // msb 22-bits 904 if (msb32 & 0x3ff) // Any bits? 905 or3(d, msb32 & 0x3ff, d); // msb 32-bits are now in lsb 32 906 if (lsb32 & 0xFFFFFC00) { // done? 907 if ((lsb32 >> 20) & 0xfff) { // Any bits set? 908 sllx(d, 12, d); // Make room for next 12 bits 909 or3(d, (lsb32 >> 20) & 0xfff, d); // Or in next 12 910 shiftcnt = 0; // We already shifted 911 } 912 else 913 shiftcnt = 12; 914 if ((lsb32 >> 10) & 0x3ff) { 915 sllx(d, shiftcnt + 10, d); // Make room for last 10 bits 916 or3(d, (lsb32 >> 10) & 0x3ff, d); // Or in next 10 917 shiftcnt = 0; 918 } 919 else 920 shiftcnt = 10; 921 sllx(d, shiftcnt + 10, d); // Shift leaving disp field 0'd 922 } 923 else 924 sllx(d, 32, d); 925 } 926 // Pad out the instruction sequence so it can be patched later. 927 if (ForceRelocatable || (addrlit.rtype() != relocInfo::none && 928 addrlit.rtype() != relocInfo::runtime_call_type)) { 929 while (pc() < (save_pc + (7 * BytesPerInstWord))) 930 nop(); 931 } 932 #else 933 Assembler::sethi(addrlit.value(), d, addrlit.rspec()); 934 #endif 935 } 936 937 938 void MacroAssembler::sethi(const AddressLiteral& addrlit, Register d) { 939 internal_sethi(addrlit, d, false); 940 } 941 942 943 void MacroAssembler::patchable_sethi(const AddressLiteral& addrlit, Register d) { 944 internal_sethi(addrlit, d, true); 945 } 946 947 948 int MacroAssembler::insts_for_sethi(address a, bool worst_case) { 949 #ifdef _LP64 950 if (worst_case) return 7; 951 intptr_t iaddr = (intptr_t) a; 952 int msb32 = (int) (iaddr >> 32); 953 int lsb32 = (int) (iaddr); 954 int count; 955 if (msb32 == 0 && lsb32 >= 0) 956 count = 1; 957 else if (msb32 == -1) 958 count = 2; 959 else { 960 count = 2; 961 if (msb32 & 0x3ff) 962 count++; 963 if (lsb32 & 0xFFFFFC00 ) { 964 if ((lsb32 >> 20) & 0xfff) count += 2; 965 if ((lsb32 >> 10) & 0x3ff) count += 2; 966 } 967 } 968 return count; 969 #else 970 return 1; 971 #endif 972 } 973 974 int MacroAssembler::worst_case_insts_for_set() { 975 return insts_for_sethi(NULL, true) + 1; 976 } 977 978 979 // Keep in sync with MacroAssembler::insts_for_internal_set 980 void MacroAssembler::internal_set(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { 981 intptr_t value = addrlit.value(); 982 983 if (!ForceRelocatable && addrlit.rspec().type() == relocInfo::none) { 984 // can optimize 985 if (-4096 <= value && value <= 4095) { 986 or3(G0, value, d); // setsw (this leaves upper 32 bits sign-extended) 987 return; 988 } 989 if (inv_hi22(hi22(value)) == value) { 990 sethi(addrlit, d); 991 return; 992 } 993 } 994 assert_not_delayed((char*) "cannot put two instructions in delay slot"); 995 internal_sethi(addrlit, d, ForceRelocatable); 996 if (ForceRelocatable || addrlit.rspec().type() != relocInfo::none || addrlit.low10() != 0) { 997 add(d, addrlit.low10(), d, addrlit.rspec()); 998 } 999 } 1000 1001 // Keep in sync with MacroAssembler::internal_set 1002 int MacroAssembler::insts_for_internal_set(intptr_t value) { 1003 // can optimize 1004 if (-4096 <= value && value <= 4095) { 1005 return 1; 1006 } 1007 if (inv_hi22(hi22(value)) == value) { 1008 return insts_for_sethi((address) value); 1009 } 1010 int count = insts_for_sethi((address) value); 1011 AddressLiteral al(value); 1012 if (al.low10() != 0) { 1013 count++; 1014 } 1015 return count; 1016 } 1017 1018 void MacroAssembler::set(const AddressLiteral& al, Register d) { 1019 internal_set(al, d, false); 1020 } 1021 1022 void MacroAssembler::set(intptr_t value, Register d) { 1023 AddressLiteral al(value); 1024 internal_set(al, d, false); 1025 } 1026 1027 void MacroAssembler::set(address addr, Register d, RelocationHolder const& rspec) { 1028 AddressLiteral al(addr, rspec); 1029 internal_set(al, d, false); 1030 } 1031 1032 void MacroAssembler::patchable_set(const AddressLiteral& al, Register d) { 1033 internal_set(al, d, true); 1034 } 1035 1036 void MacroAssembler::patchable_set(intptr_t value, Register d) { 1037 AddressLiteral al(value); 1038 internal_set(al, d, true); 1039 } 1040 1041 1042 void MacroAssembler::set64(jlong value, Register d, Register tmp) { 1043 assert_not_delayed(); 1044 v9_dep(); 1045 1046 int hi = (int)(value >> 32); 1047 int lo = (int)(value & ~0); 1048 // (Matcher::isSimpleConstant64 knows about the following optimizations.) 1049 if (Assembler::is_simm13(lo) && value == lo) { 1050 or3(G0, lo, d); 1051 } else if (hi == 0) { 1052 Assembler::sethi(lo, d); // hardware version zero-extends to upper 32 1053 if (low10(lo) != 0) 1054 or3(d, low10(lo), d); 1055 } 1056 else if (hi == -1) { 1057 Assembler::sethi(~lo, d); // hardware version zero-extends to upper 32 1058 xor3(d, low10(lo) ^ ~low10(~0), d); 1059 } 1060 else if (lo == 0) { 1061 if (Assembler::is_simm13(hi)) { 1062 or3(G0, hi, d); 1063 } else { 1064 Assembler::sethi(hi, d); // hardware version zero-extends to upper 32 1065 if (low10(hi) != 0) 1066 or3(d, low10(hi), d); 1067 } 1068 sllx(d, 32, d); 1069 } 1070 else { 1071 Assembler::sethi(hi, tmp); 1072 Assembler::sethi(lo, d); // macro assembler version sign-extends 1073 if (low10(hi) != 0) 1074 or3 (tmp, low10(hi), tmp); 1075 if (low10(lo) != 0) 1076 or3 ( d, low10(lo), d); 1077 sllx(tmp, 32, tmp); 1078 or3 (d, tmp, d); 1079 } 1080 } 1081 1082 int MacroAssembler::insts_for_set64(jlong value) { 1083 v9_dep(); 1084 1085 int hi = (int) (value >> 32); 1086 int lo = (int) (value & ~0); 1087 int count = 0; 1088 1089 // (Matcher::isSimpleConstant64 knows about the following optimizations.) 1090 if (Assembler::is_simm13(lo) && value == lo) { 1091 count++; 1092 } else if (hi == 0) { 1093 count++; 1094 if (low10(lo) != 0) 1095 count++; 1096 } 1097 else if (hi == -1) { 1098 count += 2; 1099 } 1100 else if (lo == 0) { 1101 if (Assembler::is_simm13(hi)) { 1102 count++; 1103 } else { 1104 count++; 1105 if (low10(hi) != 0) 1106 count++; 1107 } 1108 count++; 1109 } 1110 else { 1111 count += 2; 1112 if (low10(hi) != 0) 1113 count++; 1114 if (low10(lo) != 0) 1115 count++; 1116 count += 2; 1117 } 1118 return count; 1119 } 1120 1121 // compute size in bytes of sparc frame, given 1122 // number of extraWords 1123 int MacroAssembler::total_frame_size_in_bytes(int extraWords) { 1124 1125 int nWords = frame::memory_parameter_word_sp_offset; 1126 1127 nWords += extraWords; 1128 1129 if (nWords & 1) ++nWords; // round up to double-word 1130 1131 return nWords * BytesPerWord; 1132 } 1133 1134 1135 // save_frame: given number of "extra" words in frame, 1136 // issue approp. save instruction (p 200, v8 manual) 1137 1138 void MacroAssembler::save_frame(int extraWords) { 1139 int delta = -total_frame_size_in_bytes(extraWords); 1140 if (is_simm13(delta)) { 1141 save(SP, delta, SP); 1142 } else { 1143 set(delta, G3_scratch); 1144 save(SP, G3_scratch, SP); 1145 } 1146 } 1147 1148 1149 void MacroAssembler::save_frame_c1(int size_in_bytes) { 1150 if (is_simm13(-size_in_bytes)) { 1151 save(SP, -size_in_bytes, SP); 1152 } else { 1153 set(-size_in_bytes, G3_scratch); 1154 save(SP, G3_scratch, SP); 1155 } 1156 } 1157 1158 1159 void MacroAssembler::save_frame_and_mov(int extraWords, 1160 Register s1, Register d1, 1161 Register s2, Register d2) { 1162 assert_not_delayed(); 1163 1164 // The trick here is to use precisely the same memory word 1165 // that trap handlers also use to save the register. 1166 // This word cannot be used for any other purpose, but 1167 // it works fine to save the register's value, whether or not 1168 // an interrupt flushes register windows at any given moment! 1169 Address s1_addr; 1170 if (s1->is_valid() && (s1->is_in() || s1->is_local())) { 1171 s1_addr = s1->address_in_saved_window(); 1172 st_ptr(s1, s1_addr); 1173 } 1174 1175 Address s2_addr; 1176 if (s2->is_valid() && (s2->is_in() || s2->is_local())) { 1177 s2_addr = s2->address_in_saved_window(); 1178 st_ptr(s2, s2_addr); 1179 } 1180 1181 save_frame(extraWords); 1182 1183 if (s1_addr.base() == SP) { 1184 ld_ptr(s1_addr.after_save(), d1); 1185 } else if (s1->is_valid()) { 1186 mov(s1->after_save(), d1); 1187 } 1188 1189 if (s2_addr.base() == SP) { 1190 ld_ptr(s2_addr.after_save(), d2); 1191 } else if (s2->is_valid()) { 1192 mov(s2->after_save(), d2); 1193 } 1194 } 1195 1196 1197 AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) { 1198 assert(oop_recorder() != NULL, "this assembler needs a Recorder"); 1199 int index = oop_recorder()->allocate_metadata_index(obj); 1200 RelocationHolder rspec = metadata_Relocation::spec(index); 1201 return AddressLiteral((address)obj, rspec); 1202 } 1203 1204 AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) { 1205 assert(oop_recorder() != NULL, "this assembler needs a Recorder"); 1206 int index = oop_recorder()->find_index(obj); 1207 RelocationHolder rspec = metadata_Relocation::spec(index); 1208 return AddressLiteral((address)obj, rspec); 1209 } 1210 1211 1212 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) { 1213 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1214 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop"); 1215 int oop_index = oop_recorder()->find_index(obj); 1216 return AddressLiteral(obj, oop_Relocation::spec(oop_index)); 1217 } 1218 1219 void MacroAssembler::set_narrow_oop(jobject obj, Register d) { 1220 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1221 int oop_index = oop_recorder()->find_index(obj); 1222 RelocationHolder rspec = oop_Relocation::spec(oop_index); 1223 1224 assert_not_delayed(); 1225 // Relocation with special format (see relocInfo_sparc.hpp). 1226 relocate(rspec, 1); 1227 // Assembler::sethi(0x3fffff, d); 1228 emit_int32( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(0x3fffff) ); 1229 // Don't add relocation for 'add'. Do patching during 'sethi' processing. 1230 add(d, 0x3ff, d); 1231 1232 } 1233 1234 void MacroAssembler::set_narrow_klass(Klass* k, Register d) { 1235 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1236 int klass_index = oop_recorder()->find_index(k); 1237 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 1238 narrowOop encoded_k = oopDesc::encode_klass(k); 1239 1240 assert_not_delayed(); 1241 // Relocation with special format (see relocInfo_sparc.hpp). 1242 relocate(rspec, 1); 1243 // Assembler::sethi(encoded_k, d); 1244 emit_int32( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(encoded_k) ); 1245 // Don't add relocation for 'add'. Do patching during 'sethi' processing. 1246 add(d, low10(encoded_k), d); 1247 1248 } 1249 1250 void MacroAssembler::align(int modulus) { 1251 while (offset() % modulus != 0) nop(); 1252 } 1253 1254 1255 void MacroAssembler::safepoint() { 1256 relocate(breakpoint_Relocation::spec(breakpoint_Relocation::safepoint)); 1257 } 1258 1259 1260 void RegistersForDebugging::print(outputStream* s) { 1261 FlagSetting fs(Debugging, true); 1262 int j; 1263 for (j = 0; j < 8; ++j) { 1264 if (j != 6) { s->print("i%d = ", j); os::print_location(s, i[j]); } 1265 else { s->print( "fp = " ); os::print_location(s, i[j]); } 1266 } 1267 s->cr(); 1268 1269 for (j = 0; j < 8; ++j) { 1270 s->print("l%d = ", j); os::print_location(s, l[j]); 1271 } 1272 s->cr(); 1273 1274 for (j = 0; j < 8; ++j) { 1275 if (j != 6) { s->print("o%d = ", j); os::print_location(s, o[j]); } 1276 else { s->print( "sp = " ); os::print_location(s, o[j]); } 1277 } 1278 s->cr(); 1279 1280 for (j = 0; j < 8; ++j) { 1281 s->print("g%d = ", j); os::print_location(s, g[j]); 1282 } 1283 s->cr(); 1284 1285 // print out floats with compression 1286 for (j = 0; j < 32; ) { 1287 jfloat val = f[j]; 1288 int last = j; 1289 for ( ; last+1 < 32; ++last ) { 1290 char b1[1024], b2[1024]; 1291 sprintf(b1, "%f", val); 1292 sprintf(b2, "%f", f[last+1]); 1293 if (strcmp(b1, b2)) 1294 break; 1295 } 1296 s->print("f%d", j); 1297 if ( j != last ) s->print(" - f%d", last); 1298 s->print(" = %f", val); 1299 s->fill_to(25); 1300 s->print_cr(" (0x%x)", val); 1301 j = last + 1; 1302 } 1303 s->cr(); 1304 1305 // and doubles (evens only) 1306 for (j = 0; j < 32; ) { 1307 jdouble val = d[j]; 1308 int last = j; 1309 for ( ; last+1 < 32; ++last ) { 1310 char b1[1024], b2[1024]; 1311 sprintf(b1, "%f", val); 1312 sprintf(b2, "%f", d[last+1]); 1313 if (strcmp(b1, b2)) 1314 break; 1315 } 1316 s->print("d%d", 2 * j); 1317 if ( j != last ) s->print(" - d%d", last); 1318 s->print(" = %f", val); 1319 s->fill_to(30); 1320 s->print("(0x%x)", *(int*)&val); 1321 s->fill_to(42); 1322 s->print_cr("(0x%x)", *(1 + (int*)&val)); 1323 j = last + 1; 1324 } 1325 s->cr(); 1326 } 1327 1328 void RegistersForDebugging::save_registers(MacroAssembler* a) { 1329 a->sub(FP, round_to(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0); 1330 a->flush_windows(); 1331 int i; 1332 for (i = 0; i < 8; ++i) { 1333 a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, i_offset(i)); 1334 a->ld_ptr(as_lRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, l_offset(i)); 1335 a->st_ptr(as_oRegister(i)->after_save(), O0, o_offset(i)); 1336 a->st_ptr(as_gRegister(i)->after_save(), O0, g_offset(i)); 1337 } 1338 for (i = 0; i < 32; ++i) { 1339 a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i)); 1340 } 1341 for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) { 1342 a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i)); 1343 } 1344 } 1345 1346 void RegistersForDebugging::restore_registers(MacroAssembler* a, Register r) { 1347 for (int i = 1; i < 8; ++i) { 1348 a->ld_ptr(r, g_offset(i), as_gRegister(i)); 1349 } 1350 for (int j = 0; j < 32; ++j) { 1351 a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j)); 1352 } 1353 for (int k = 0; k < (VM_Version::v9_instructions_work() ? 64 : 32); k += 2) { 1354 a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k)); 1355 } 1356 } 1357 1358 1359 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack 1360 void MacroAssembler::push_fTOS() { 1361 // %%%%%% need to implement this 1362 } 1363 1364 // pops double TOS element from CPU stack and pushes on FPU stack 1365 void MacroAssembler::pop_fTOS() { 1366 // %%%%%% need to implement this 1367 } 1368 1369 void MacroAssembler::empty_FPU_stack() { 1370 // %%%%%% need to implement this 1371 } 1372 1373 void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * file, int line) { 1374 // plausibility check for oops 1375 if (!VerifyOops) return; 1376 1377 if (reg == G0) return; // always NULL, which is always an oop 1378 1379 BLOCK_COMMENT("verify_oop {"); 1380 char buffer[64]; 1381 #ifdef COMPILER1 1382 if (CommentedAssembly) { 1383 snprintf(buffer, sizeof(buffer), "verify_oop at %d", offset()); 1384 block_comment(buffer); 1385 } 1386 #endif 1387 1388 const char* real_msg = NULL; 1389 { 1390 ResourceMark rm; 1391 stringStream ss; 1392 ss.print("%s at offset %d (%s:%d)", msg, offset(), file, line); 1393 real_msg = code_string(ss.as_string()); 1394 } 1395 1396 // Call indirectly to solve generation ordering problem 1397 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address()); 1398 1399 // Make some space on stack above the current register window. 1400 // Enough to hold 8 64-bit registers. 1401 add(SP,-8*8,SP); 1402 1403 // Save some 64-bit registers; a normal 'save' chops the heads off 1404 // of 64-bit longs in the 32-bit build. 1405 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); 1406 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); 1407 mov(reg,O0); // Move arg into O0; arg might be in O7 which is about to be crushed 1408 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); 1409 1410 // Size of set() should stay the same 1411 patchable_set((intptr_t)real_msg, O1); 1412 // Load address to call to into O7 1413 load_ptr_contents(a, O7); 1414 // Register call to verify_oop_subroutine 1415 callr(O7, G0); 1416 delayed()->nop(); 1417 // recover frame size 1418 add(SP, 8*8,SP); 1419 BLOCK_COMMENT("} verify_oop"); 1420 } 1421 1422 void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char * file, int line) { 1423 // plausibility check for oops 1424 if (!VerifyOops) return; 1425 1426 const char* real_msg = NULL; 1427 { 1428 ResourceMark rm; 1429 stringStream ss; 1430 ss.print("%s at SP+%d (%s:%d)", msg, addr.disp(), file, line); 1431 real_msg = code_string(ss.as_string()); 1432 } 1433 1434 // Call indirectly to solve generation ordering problem 1435 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address()); 1436 1437 // Make some space on stack above the current register window. 1438 // Enough to hold 8 64-bit registers. 1439 add(SP,-8*8,SP); 1440 1441 // Save some 64-bit registers; a normal 'save' chops the heads off 1442 // of 64-bit longs in the 32-bit build. 1443 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); 1444 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); 1445 ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed 1446 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); 1447 1448 // Size of set() should stay the same 1449 patchable_set((intptr_t)real_msg, O1); 1450 // Load address to call to into O7 1451 load_ptr_contents(a, O7); 1452 // Register call to verify_oop_subroutine 1453 callr(O7, G0); 1454 delayed()->nop(); 1455 // recover frame size 1456 add(SP, 8*8,SP); 1457 } 1458 1459 // side-door communication with signalHandler in os_solaris.cpp 1460 address MacroAssembler::_verify_oop_implicit_branch[3] = { NULL }; 1461 1462 // This macro is expanded just once; it creates shared code. Contract: 1463 // receives an oop in O0. Must restore O0 & O7 from TLS. Must not smash ANY 1464 // registers, including flags. May not use a register 'save', as this blows 1465 // the high bits of the O-regs if they contain Long values. Acts as a 'leaf' 1466 // call. 1467 void MacroAssembler::verify_oop_subroutine() { 1468 assert( VM_Version::v9_instructions_work(), "VerifyOops not supported for V8" ); 1469 1470 // Leaf call; no frame. 1471 Label succeed, fail, null_or_fail; 1472 1473 // O0 and O7 were saved already (O0 in O0's TLS home, O7 in O5's TLS home). 1474 // O0 is now the oop to be checked. O7 is the return address. 1475 Register O0_obj = O0; 1476 1477 // Save some more registers for temps. 1478 stx(O2,SP,frame::register_save_words*wordSize+STACK_BIAS+2*8); 1479 stx(O3,SP,frame::register_save_words*wordSize+STACK_BIAS+3*8); 1480 stx(O4,SP,frame::register_save_words*wordSize+STACK_BIAS+4*8); 1481 stx(O5,SP,frame::register_save_words*wordSize+STACK_BIAS+5*8); 1482 1483 // Save flags 1484 Register O5_save_flags = O5; 1485 rdccr( O5_save_flags ); 1486 1487 { // count number of verifies 1488 Register O2_adr = O2; 1489 Register O3_accum = O3; 1490 inc_counter(StubRoutines::verify_oop_count_addr(), O2_adr, O3_accum); 1491 } 1492 1493 Register O2_mask = O2; 1494 Register O3_bits = O3; 1495 Register O4_temp = O4; 1496 1497 // mark lower end of faulting range 1498 assert(_verify_oop_implicit_branch[0] == NULL, "set once"); 1499 _verify_oop_implicit_branch[0] = pc(); 1500 1501 // We can't check the mark oop because it could be in the process of 1502 // locking or unlocking while this is running. 1503 set(Universe::verify_oop_mask (), O2_mask); 1504 set(Universe::verify_oop_bits (), O3_bits); 1505 1506 // assert((obj & oop_mask) == oop_bits); 1507 and3(O0_obj, O2_mask, O4_temp); 1508 cmp_and_brx_short(O4_temp, O3_bits, notEqual, pn, null_or_fail); 1509 1510 if ((NULL_WORD & Universe::verify_oop_mask()) == Universe::verify_oop_bits()) { 1511 // the null_or_fail case is useless; must test for null separately 1512 br_null_short(O0_obj, pn, succeed); 1513 } 1514 1515 // Check the Klass* of this object for being in the right area of memory. 1516 // Cannot do the load in the delay above slot in case O0 is null 1517 load_klass(O0_obj, O0_obj); 1518 // assert((klass != NULL) 1519 br_null_short(O0_obj, pn, fail); 1520 // TODO: Future assert that klass is lower 4g memory for UseCompressedKlassPointers 1521 1522 wrccr( O5_save_flags ); // Restore CCR's 1523 1524 // mark upper end of faulting range 1525 _verify_oop_implicit_branch[1] = pc(); 1526 1527 //----------------------- 1528 // all tests pass 1529 bind(succeed); 1530 1531 // Restore prior 64-bit registers 1532 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+0*8,O0); 1533 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+1*8,O1); 1534 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+2*8,O2); 1535 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+3*8,O3); 1536 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+4*8,O4); 1537 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+5*8,O5); 1538 1539 retl(); // Leaf return; restore prior O7 in delay slot 1540 delayed()->ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+7*8,O7); 1541 1542 //----------------------- 1543 bind(null_or_fail); // nulls are less common but OK 1544 br_null(O0_obj, false, pt, succeed); 1545 delayed()->wrccr( O5_save_flags ); // Restore CCR's 1546 1547 //----------------------- 1548 // report failure: 1549 bind(fail); 1550 _verify_oop_implicit_branch[2] = pc(); 1551 1552 wrccr( O5_save_flags ); // Restore CCR's 1553 1554 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1555 1556 // stop_subroutine expects message pointer in I1. 1557 mov(I1, O1); 1558 1559 // Restore prior 64-bit registers 1560 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+0*8,I0); 1561 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+1*8,I1); 1562 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+2*8,I2); 1563 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+3*8,I3); 1564 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+4*8,I4); 1565 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+5*8,I5); 1566 1567 // factor long stop-sequence into subroutine to save space 1568 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); 1569 1570 // call indirectly to solve generation ordering problem 1571 AddressLiteral al(StubRoutines::Sparc::stop_subroutine_entry_address()); 1572 load_ptr_contents(al, O5); 1573 jmpl(O5, 0, O7); 1574 delayed()->nop(); 1575 } 1576 1577 1578 void MacroAssembler::stop(const char* msg) { 1579 // save frame first to get O7 for return address 1580 // add one word to size in case struct is odd number of words long 1581 // It must be doubleword-aligned for storing doubles into it. 1582 1583 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1584 1585 // stop_subroutine expects message pointer in I1. 1586 // Size of set() should stay the same 1587 patchable_set((intptr_t)msg, O1); 1588 1589 // factor long stop-sequence into subroutine to save space 1590 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); 1591 1592 // call indirectly to solve generation ordering problem 1593 AddressLiteral a(StubRoutines::Sparc::stop_subroutine_entry_address()); 1594 load_ptr_contents(a, O5); 1595 jmpl(O5, 0, O7); 1596 delayed()->nop(); 1597 1598 breakpoint_trap(); // make stop actually stop rather than writing 1599 // unnoticeable results in the output files. 1600 1601 // restore(); done in callee to save space! 1602 } 1603 1604 1605 void MacroAssembler::warn(const char* msg) { 1606 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1607 RegistersForDebugging::save_registers(this); 1608 mov(O0, L0); 1609 // Size of set() should stay the same 1610 patchable_set((intptr_t)msg, O0); 1611 call( CAST_FROM_FN_PTR(address, warning) ); 1612 delayed()->nop(); 1613 // ret(); 1614 // delayed()->restore(); 1615 RegistersForDebugging::restore_registers(this, L0); 1616 restore(); 1617 } 1618 1619 1620 void MacroAssembler::untested(const char* what) { 1621 // We must be able to turn interactive prompting off 1622 // in order to run automated test scripts on the VM 1623 // Use the flag ShowMessageBoxOnError 1624 1625 const char* b = NULL; 1626 { 1627 ResourceMark rm; 1628 stringStream ss; 1629 ss.print("untested: %s", what); 1630 b = code_string(ss.as_string()); 1631 } 1632 if (ShowMessageBoxOnError) { STOP(b); } 1633 else { warn(b); } 1634 } 1635 1636 1637 void MacroAssembler::stop_subroutine() { 1638 RegistersForDebugging::save_registers(this); 1639 1640 // for the sake of the debugger, stick a PC on the current frame 1641 // (this assumes that the caller has performed an extra "save") 1642 mov(I7, L7); 1643 add(O7, -7 * BytesPerInt, I7); 1644 1645 save_frame(); // one more save to free up another O7 register 1646 mov(I0, O1); // addr of reg save area 1647 1648 // We expect pointer to message in I1. Caller must set it up in O1 1649 mov(I1, O0); // get msg 1650 call (CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type); 1651 delayed()->nop(); 1652 1653 restore(); 1654 1655 RegistersForDebugging::restore_registers(this, O0); 1656 1657 save_frame(0); 1658 call(CAST_FROM_FN_PTR(address,breakpoint)); 1659 delayed()->nop(); 1660 restore(); 1661 1662 mov(L7, I7); 1663 retl(); 1664 delayed()->restore(); // see stop above 1665 } 1666 1667 1668 void MacroAssembler::debug(char* msg, RegistersForDebugging* regs) { 1669 if ( ShowMessageBoxOnError ) { 1670 JavaThread* thread = JavaThread::current(); 1671 JavaThreadState saved_state = thread->thread_state(); 1672 thread->set_thread_state(_thread_in_vm); 1673 { 1674 // In order to get locks work, we need to fake a in_VM state 1675 ttyLocker ttyl; 1676 ::tty->print_cr("EXECUTION STOPPED: %s\n", msg); 1677 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 1678 BytecodeCounter::print(); 1679 } 1680 if (os::message_box(msg, "Execution stopped, print registers?")) 1681 regs->print(::tty); 1682 } 1683 BREAKPOINT; 1684 ThreadStateTransition::transition(JavaThread::current(), _thread_in_vm, saved_state); 1685 } 1686 else { 1687 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg); 1688 } 1689 assert(false, err_msg("DEBUG MESSAGE: %s", msg)); 1690 } 1691 1692 1693 void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresult) { 1694 subcc( Rparam_words, Argument::n_register_parameters, Rresult); // how many mem words? 1695 Label no_extras; 1696 br( negative, true, pt, no_extras ); // if neg, clear reg 1697 delayed()->set(0, Rresult); // annuled, so only if taken 1698 bind( no_extras ); 1699 } 1700 1701 1702 void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) { 1703 #ifdef _LP64 1704 add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult); 1705 #else 1706 add(Rextra_words, frame::memory_parameter_word_sp_offset + 1, Rresult); 1707 #endif 1708 bclr(1, Rresult); 1709 sll(Rresult, LogBytesPerWord, Rresult); // Rresult has total frame bytes 1710 } 1711 1712 1713 void MacroAssembler::calc_frame_size_and_save(Register Rextra_words, Register Rresult) { 1714 calc_frame_size(Rextra_words, Rresult); 1715 neg(Rresult); 1716 save(SP, Rresult, SP); 1717 } 1718 1719 1720 // --------------------------------------------------------- 1721 Assembler::RCondition cond2rcond(Assembler::Condition c) { 1722 switch (c) { 1723 /*case zero: */ 1724 case Assembler::equal: return Assembler::rc_z; 1725 case Assembler::lessEqual: return Assembler::rc_lez; 1726 case Assembler::less: return Assembler::rc_lz; 1727 /*case notZero:*/ 1728 case Assembler::notEqual: return Assembler::rc_nz; 1729 case Assembler::greater: return Assembler::rc_gz; 1730 case Assembler::greaterEqual: return Assembler::rc_gez; 1731 } 1732 ShouldNotReachHere(); 1733 return Assembler::rc_z; 1734 } 1735 1736 // compares (32 bit) register with zero and branches. NOT FOR USE WITH 64-bit POINTERS 1737 void MacroAssembler::cmp_zero_and_br(Condition c, Register s1, Label& L, bool a, Predict p) { 1738 tst(s1); 1739 br (c, a, p, L); 1740 } 1741 1742 // Compares a pointer register with zero and branches on null. 1743 // Does a test & branch on 32-bit systems and a register-branch on 64-bit. 1744 void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) { 1745 assert_not_delayed(); 1746 #ifdef _LP64 1747 bpr( rc_z, a, p, s1, L ); 1748 #else 1749 tst(s1); 1750 br ( zero, a, p, L ); 1751 #endif 1752 } 1753 1754 void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) { 1755 assert_not_delayed(); 1756 #ifdef _LP64 1757 bpr( rc_nz, a, p, s1, L ); 1758 #else 1759 tst(s1); 1760 br ( notZero, a, p, L ); 1761 #endif 1762 } 1763 1764 // Compare registers and branch with nop in delay slot or cbcond without delay slot. 1765 1766 // Compare integer (32 bit) values (icc only). 1767 void MacroAssembler::cmp_and_br_short(Register s1, Register s2, Condition c, 1768 Predict p, Label& L) { 1769 assert_not_delayed(); 1770 if (use_cbcond(L)) { 1771 Assembler::cbcond(c, icc, s1, s2, L); 1772 } else { 1773 cmp(s1, s2); 1774 br(c, false, p, L); 1775 delayed()->nop(); 1776 } 1777 } 1778 1779 // Compare integer (32 bit) values (icc only). 1780 void MacroAssembler::cmp_and_br_short(Register s1, int simm13a, Condition c, 1781 Predict p, Label& L) { 1782 assert_not_delayed(); 1783 if (is_simm(simm13a,5) && use_cbcond(L)) { 1784 Assembler::cbcond(c, icc, s1, simm13a, L); 1785 } else { 1786 cmp(s1, simm13a); 1787 br(c, false, p, L); 1788 delayed()->nop(); 1789 } 1790 } 1791 1792 // Branch that tests xcc in LP64 and icc in !LP64 1793 void MacroAssembler::cmp_and_brx_short(Register s1, Register s2, Condition c, 1794 Predict p, Label& L) { 1795 assert_not_delayed(); 1796 if (use_cbcond(L)) { 1797 Assembler::cbcond(c, ptr_cc, s1, s2, L); 1798 } else { 1799 cmp(s1, s2); 1800 brx(c, false, p, L); 1801 delayed()->nop(); 1802 } 1803 } 1804 1805 // Branch that tests xcc in LP64 and icc in !LP64 1806 void MacroAssembler::cmp_and_brx_short(Register s1, int simm13a, Condition c, 1807 Predict p, Label& L) { 1808 assert_not_delayed(); 1809 if (is_simm(simm13a,5) && use_cbcond(L)) { 1810 Assembler::cbcond(c, ptr_cc, s1, simm13a, L); 1811 } else { 1812 cmp(s1, simm13a); 1813 brx(c, false, p, L); 1814 delayed()->nop(); 1815 } 1816 } 1817 1818 // Short branch version for compares a pointer with zero. 1819 1820 void MacroAssembler::br_null_short(Register s1, Predict p, Label& L) { 1821 assert_not_delayed(); 1822 if (use_cbcond(L)) { 1823 Assembler::cbcond(zero, ptr_cc, s1, 0, L); 1824 return; 1825 } 1826 br_null(s1, false, p, L); 1827 delayed()->nop(); 1828 } 1829 1830 void MacroAssembler::br_notnull_short(Register s1, Predict p, Label& L) { 1831 assert_not_delayed(); 1832 if (use_cbcond(L)) { 1833 Assembler::cbcond(notZero, ptr_cc, s1, 0, L); 1834 return; 1835 } 1836 br_notnull(s1, false, p, L); 1837 delayed()->nop(); 1838 } 1839 1840 // Unconditional short branch 1841 void MacroAssembler::ba_short(Label& L) { 1842 if (use_cbcond(L)) { 1843 Assembler::cbcond(equal, icc, G0, G0, L); 1844 return; 1845 } 1846 br(always, false, pt, L); 1847 delayed()->nop(); 1848 } 1849 1850 // instruction sequences factored across compiler & interpreter 1851 1852 1853 void MacroAssembler::lcmp( Register Ra_hi, Register Ra_low, 1854 Register Rb_hi, Register Rb_low, 1855 Register Rresult) { 1856 1857 Label check_low_parts, done; 1858 1859 cmp(Ra_hi, Rb_hi ); // compare hi parts 1860 br(equal, true, pt, check_low_parts); 1861 delayed()->cmp(Ra_low, Rb_low); // test low parts 1862 1863 // And, with an unsigned comparison, it does not matter if the numbers 1864 // are negative or not. 1865 // E.g., -2 cmp -1: the low parts are 0xfffffffe and 0xffffffff. 1866 // The second one is bigger (unsignedly). 1867 1868 // Other notes: The first move in each triplet can be unconditional 1869 // (and therefore probably prefetchable). 1870 // And the equals case for the high part does not need testing, 1871 // since that triplet is reached only after finding the high halves differ. 1872 1873 if (VM_Version::v9_instructions_work()) { 1874 mov(-1, Rresult); 1875 ba(done); delayed()-> movcc(greater, false, icc, 1, Rresult); 1876 } else { 1877 br(less, true, pt, done); delayed()-> set(-1, Rresult); 1878 br(greater, true, pt, done); delayed()-> set( 1, Rresult); 1879 } 1880 1881 bind( check_low_parts ); 1882 1883 if (VM_Version::v9_instructions_work()) { 1884 mov( -1, Rresult); 1885 movcc(equal, false, icc, 0, Rresult); 1886 movcc(greaterUnsigned, false, icc, 1, Rresult); 1887 } else { 1888 set(-1, Rresult); 1889 br(equal, true, pt, done); delayed()->set( 0, Rresult); 1890 br(greaterUnsigned, true, pt, done); delayed()->set( 1, Rresult); 1891 } 1892 bind( done ); 1893 } 1894 1895 void MacroAssembler::lneg( Register Rhi, Register Rlow ) { 1896 subcc( G0, Rlow, Rlow ); 1897 subc( G0, Rhi, Rhi ); 1898 } 1899 1900 void MacroAssembler::lshl( Register Rin_high, Register Rin_low, 1901 Register Rcount, 1902 Register Rout_high, Register Rout_low, 1903 Register Rtemp ) { 1904 1905 1906 Register Ralt_count = Rtemp; 1907 Register Rxfer_bits = Rtemp; 1908 1909 assert( Ralt_count != Rin_high 1910 && Ralt_count != Rin_low 1911 && Ralt_count != Rcount 1912 && Rxfer_bits != Rin_low 1913 && Rxfer_bits != Rin_high 1914 && Rxfer_bits != Rcount 1915 && Rxfer_bits != Rout_low 1916 && Rout_low != Rin_high, 1917 "register alias checks"); 1918 1919 Label big_shift, done; 1920 1921 // This code can be optimized to use the 64 bit shifts in V9. 1922 // Here we use the 32 bit shifts. 1923 1924 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1925 subcc(Rcount, 31, Ralt_count); 1926 br(greater, true, pn, big_shift); 1927 delayed()->dec(Ralt_count); 1928 1929 // shift < 32 bits, Ralt_count = Rcount-31 1930 1931 // We get the transfer bits by shifting right by 32-count the low 1932 // register. This is done by shifting right by 31-count and then by one 1933 // more to take care of the special (rare) case where count is zero 1934 // (shifting by 32 would not work). 1935 1936 neg(Ralt_count); 1937 1938 // The order of the next two instructions is critical in the case where 1939 // Rin and Rout are the same and should not be reversed. 1940 1941 srl(Rin_low, Ralt_count, Rxfer_bits); // shift right by 31-count 1942 if (Rcount != Rout_low) { 1943 sll(Rin_low, Rcount, Rout_low); // low half 1944 } 1945 sll(Rin_high, Rcount, Rout_high); 1946 if (Rcount == Rout_low) { 1947 sll(Rin_low, Rcount, Rout_low); // low half 1948 } 1949 srl(Rxfer_bits, 1, Rxfer_bits ); // shift right by one more 1950 ba(done); 1951 delayed()->or3(Rout_high, Rxfer_bits, Rout_high); // new hi value: or in shifted old hi part and xfer from low 1952 1953 // shift >= 32 bits, Ralt_count = Rcount-32 1954 bind(big_shift); 1955 sll(Rin_low, Ralt_count, Rout_high ); 1956 clr(Rout_low); 1957 1958 bind(done); 1959 } 1960 1961 1962 void MacroAssembler::lshr( Register Rin_high, Register Rin_low, 1963 Register Rcount, 1964 Register Rout_high, Register Rout_low, 1965 Register Rtemp ) { 1966 1967 Register Ralt_count = Rtemp; 1968 Register Rxfer_bits = Rtemp; 1969 1970 assert( Ralt_count != Rin_high 1971 && Ralt_count != Rin_low 1972 && Ralt_count != Rcount 1973 && Rxfer_bits != Rin_low 1974 && Rxfer_bits != Rin_high 1975 && Rxfer_bits != Rcount 1976 && Rxfer_bits != Rout_high 1977 && Rout_high != Rin_low, 1978 "register alias checks"); 1979 1980 Label big_shift, done; 1981 1982 // This code can be optimized to use the 64 bit shifts in V9. 1983 // Here we use the 32 bit shifts. 1984 1985 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1986 subcc(Rcount, 31, Ralt_count); 1987 br(greater, true, pn, big_shift); 1988 delayed()->dec(Ralt_count); 1989 1990 // shift < 32 bits, Ralt_count = Rcount-31 1991 1992 // We get the transfer bits by shifting left by 32-count the high 1993 // register. This is done by shifting left by 31-count and then by one 1994 // more to take care of the special (rare) case where count is zero 1995 // (shifting by 32 would not work). 1996 1997 neg(Ralt_count); 1998 if (Rcount != Rout_low) { 1999 srl(Rin_low, Rcount, Rout_low); 2000 } 2001 2002 // The order of the next two instructions is critical in the case where 2003 // Rin and Rout are the same and should not be reversed. 2004 2005 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count 2006 sra(Rin_high, Rcount, Rout_high ); // high half 2007 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more 2008 if (Rcount == Rout_low) { 2009 srl(Rin_low, Rcount, Rout_low); 2010 } 2011 ba(done); 2012 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high 2013 2014 // shift >= 32 bits, Ralt_count = Rcount-32 2015 bind(big_shift); 2016 2017 sra(Rin_high, Ralt_count, Rout_low); 2018 sra(Rin_high, 31, Rout_high); // sign into hi 2019 2020 bind( done ); 2021 } 2022 2023 2024 2025 void MacroAssembler::lushr( Register Rin_high, Register Rin_low, 2026 Register Rcount, 2027 Register Rout_high, Register Rout_low, 2028 Register Rtemp ) { 2029 2030 Register Ralt_count = Rtemp; 2031 Register Rxfer_bits = Rtemp; 2032 2033 assert( Ralt_count != Rin_high 2034 && Ralt_count != Rin_low 2035 && Ralt_count != Rcount 2036 && Rxfer_bits != Rin_low 2037 && Rxfer_bits != Rin_high 2038 && Rxfer_bits != Rcount 2039 && Rxfer_bits != Rout_high 2040 && Rout_high != Rin_low, 2041 "register alias checks"); 2042 2043 Label big_shift, done; 2044 2045 // This code can be optimized to use the 64 bit shifts in V9. 2046 // Here we use the 32 bit shifts. 2047 2048 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 2049 subcc(Rcount, 31, Ralt_count); 2050 br(greater, true, pn, big_shift); 2051 delayed()->dec(Ralt_count); 2052 2053 // shift < 32 bits, Ralt_count = Rcount-31 2054 2055 // We get the transfer bits by shifting left by 32-count the high 2056 // register. This is done by shifting left by 31-count and then by one 2057 // more to take care of the special (rare) case where count is zero 2058 // (shifting by 32 would not work). 2059 2060 neg(Ralt_count); 2061 if (Rcount != Rout_low) { 2062 srl(Rin_low, Rcount, Rout_low); 2063 } 2064 2065 // The order of the next two instructions is critical in the case where 2066 // Rin and Rout are the same and should not be reversed. 2067 2068 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count 2069 srl(Rin_high, Rcount, Rout_high ); // high half 2070 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more 2071 if (Rcount == Rout_low) { 2072 srl(Rin_low, Rcount, Rout_low); 2073 } 2074 ba(done); 2075 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high 2076 2077 // shift >= 32 bits, Ralt_count = Rcount-32 2078 bind(big_shift); 2079 2080 srl(Rin_high, Ralt_count, Rout_low); 2081 clr(Rout_high); 2082 2083 bind( done ); 2084 } 2085 2086 #ifdef _LP64 2087 void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) { 2088 cmp(Ra, Rb); 2089 mov(-1, Rresult); 2090 movcc(equal, false, xcc, 0, Rresult); 2091 movcc(greater, false, xcc, 1, Rresult); 2092 } 2093 #endif 2094 2095 2096 void MacroAssembler::load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed) { 2097 switch (size_in_bytes) { 2098 case 8: ld_long(src, dst); break; 2099 case 4: ld( src, dst); break; 2100 case 2: is_signed ? ldsh(src, dst) : lduh(src, dst); break; 2101 case 1: is_signed ? ldsb(src, dst) : ldub(src, dst); break; 2102 default: ShouldNotReachHere(); 2103 } 2104 } 2105 2106 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) { 2107 switch (size_in_bytes) { 2108 case 8: st_long(src, dst); break; 2109 case 4: st( src, dst); break; 2110 case 2: sth( src, dst); break; 2111 case 1: stb( src, dst); break; 2112 default: ShouldNotReachHere(); 2113 } 2114 } 2115 2116 2117 void MacroAssembler::float_cmp( bool is_float, int unordered_result, 2118 FloatRegister Fa, FloatRegister Fb, 2119 Register Rresult) { 2120 2121 fcmp(is_float ? FloatRegisterImpl::S : FloatRegisterImpl::D, fcc0, Fa, Fb); 2122 2123 Condition lt = unordered_result == -1 ? f_unorderedOrLess : f_less; 2124 Condition eq = f_equal; 2125 Condition gt = unordered_result == 1 ? f_unorderedOrGreater : f_greater; 2126 2127 if (VM_Version::v9_instructions_work()) { 2128 2129 mov(-1, Rresult); 2130 movcc(eq, true, fcc0, 0, Rresult); 2131 movcc(gt, true, fcc0, 1, Rresult); 2132 2133 } else { 2134 Label done; 2135 2136 set( -1, Rresult ); 2137 //fb(lt, true, pn, done); delayed()->set( -1, Rresult ); 2138 fb( eq, true, pn, done); delayed()->set( 0, Rresult ); 2139 fb( gt, true, pn, done); delayed()->set( 1, Rresult ); 2140 2141 bind (done); 2142 } 2143 } 2144 2145 2146 void MacroAssembler::fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d) 2147 { 2148 if (VM_Version::v9_instructions_work()) { 2149 Assembler::fneg(w, s, d); 2150 } else { 2151 if (w == FloatRegisterImpl::S) { 2152 Assembler::fneg(w, s, d); 2153 } else if (w == FloatRegisterImpl::D) { 2154 // number() does a sanity check on the alignment. 2155 assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) && 2156 ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check"); 2157 2158 Assembler::fneg(FloatRegisterImpl::S, s, d); 2159 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); 2160 } else { 2161 assert(w == FloatRegisterImpl::Q, "Invalid float register width"); 2162 2163 // number() does a sanity check on the alignment. 2164 assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) && 2165 ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check"); 2166 2167 Assembler::fneg(FloatRegisterImpl::S, s, d); 2168 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); 2169 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor()); 2170 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor()); 2171 } 2172 } 2173 } 2174 2175 void MacroAssembler::fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d) 2176 { 2177 if (VM_Version::v9_instructions_work()) { 2178 Assembler::fmov(w, s, d); 2179 } else { 2180 if (w == FloatRegisterImpl::S) { 2181 Assembler::fmov(w, s, d); 2182 } else if (w == FloatRegisterImpl::D) { 2183 // number() does a sanity check on the alignment. 2184 assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) && 2185 ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check"); 2186 2187 Assembler::fmov(FloatRegisterImpl::S, s, d); 2188 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); 2189 } else { 2190 assert(w == FloatRegisterImpl::Q, "Invalid float register width"); 2191 2192 // number() does a sanity check on the alignment. 2193 assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) && 2194 ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check"); 2195 2196 Assembler::fmov(FloatRegisterImpl::S, s, d); 2197 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); 2198 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor()); 2199 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor()); 2200 } 2201 } 2202 } 2203 2204 void MacroAssembler::fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d) 2205 { 2206 if (VM_Version::v9_instructions_work()) { 2207 Assembler::fabs(w, s, d); 2208 } else { 2209 if (w == FloatRegisterImpl::S) { 2210 Assembler::fabs(w, s, d); 2211 } else if (w == FloatRegisterImpl::D) { 2212 // number() does a sanity check on the alignment. 2213 assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) && 2214 ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check"); 2215 2216 Assembler::fabs(FloatRegisterImpl::S, s, d); 2217 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); 2218 } else { 2219 assert(w == FloatRegisterImpl::Q, "Invalid float register width"); 2220 2221 // number() does a sanity check on the alignment. 2222 assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) && 2223 ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check"); 2224 2225 Assembler::fabs(FloatRegisterImpl::S, s, d); 2226 Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); 2227 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor()); 2228 Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor()); 2229 } 2230 } 2231 } 2232 2233 void MacroAssembler::save_all_globals_into_locals() { 2234 mov(G1,L1); 2235 mov(G2,L2); 2236 mov(G3,L3); 2237 mov(G4,L4); 2238 mov(G5,L5); 2239 mov(G6,L6); 2240 mov(G7,L7); 2241 } 2242 2243 void MacroAssembler::restore_globals_from_locals() { 2244 mov(L1,G1); 2245 mov(L2,G2); 2246 mov(L3,G3); 2247 mov(L4,G4); 2248 mov(L5,G5); 2249 mov(L6,G6); 2250 mov(L7,G7); 2251 } 2252 2253 // Use for 64 bit operation. 2254 void MacroAssembler::casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm) 2255 { 2256 // store ptr_reg as the new top value 2257 #ifdef _LP64 2258 casx(top_ptr_reg, top_reg, ptr_reg); 2259 #else 2260 cas_under_lock(top_ptr_reg, top_reg, ptr_reg, lock_addr, use_call_vm); 2261 #endif // _LP64 2262 } 2263 2264 // [RGV] This routine does not handle 64 bit operations. 2265 // use casx_under_lock() or casx directly!!! 2266 void MacroAssembler::cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm) 2267 { 2268 // store ptr_reg as the new top value 2269 if (VM_Version::v9_instructions_work()) { 2270 cas(top_ptr_reg, top_reg, ptr_reg); 2271 } else { 2272 2273 // If the register is not an out nor global, it is not visible 2274 // after the save. Allocate a register for it, save its 2275 // value in the register save area (the save may not flush 2276 // registers to the save area). 2277 2278 Register top_ptr_reg_after_save; 2279 Register top_reg_after_save; 2280 Register ptr_reg_after_save; 2281 2282 if (top_ptr_reg->is_out() || top_ptr_reg->is_global()) { 2283 top_ptr_reg_after_save = top_ptr_reg->after_save(); 2284 } else { 2285 Address reg_save_addr = top_ptr_reg->address_in_saved_window(); 2286 top_ptr_reg_after_save = L0; 2287 st(top_ptr_reg, reg_save_addr); 2288 } 2289 2290 if (top_reg->is_out() || top_reg->is_global()) { 2291 top_reg_after_save = top_reg->after_save(); 2292 } else { 2293 Address reg_save_addr = top_reg->address_in_saved_window(); 2294 top_reg_after_save = L1; 2295 st(top_reg, reg_save_addr); 2296 } 2297 2298 if (ptr_reg->is_out() || ptr_reg->is_global()) { 2299 ptr_reg_after_save = ptr_reg->after_save(); 2300 } else { 2301 Address reg_save_addr = ptr_reg->address_in_saved_window(); 2302 ptr_reg_after_save = L2; 2303 st(ptr_reg, reg_save_addr); 2304 } 2305 2306 const Register& lock_reg = L3; 2307 const Register& lock_ptr_reg = L4; 2308 const Register& value_reg = L5; 2309 const Register& yield_reg = L6; 2310 const Register& yieldall_reg = L7; 2311 2312 save_frame(); 2313 2314 if (top_ptr_reg_after_save == L0) { 2315 ld(top_ptr_reg->address_in_saved_window().after_save(), top_ptr_reg_after_save); 2316 } 2317 2318 if (top_reg_after_save == L1) { 2319 ld(top_reg->address_in_saved_window().after_save(), top_reg_after_save); 2320 } 2321 2322 if (ptr_reg_after_save == L2) { 2323 ld(ptr_reg->address_in_saved_window().after_save(), ptr_reg_after_save); 2324 } 2325 2326 Label(retry_get_lock); 2327 Label(not_same); 2328 Label(dont_yield); 2329 2330 assert(lock_addr, "lock_address should be non null for v8"); 2331 set((intptr_t)lock_addr, lock_ptr_reg); 2332 // Initialize yield counter 2333 mov(G0,yield_reg); 2334 mov(G0, yieldall_reg); 2335 set(StubRoutines::Sparc::locked, lock_reg); 2336 2337 bind(retry_get_lock); 2338 cmp_and_br_short(yield_reg, V8AtomicOperationUnderLockSpinCount, Assembler::less, Assembler::pt, dont_yield); 2339 2340 if(use_call_vm) { 2341 Untested("Need to verify global reg consistancy"); 2342 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::yield_all), yieldall_reg); 2343 } else { 2344 // Save the regs and make space for a C call 2345 save(SP, -96, SP); 2346 save_all_globals_into_locals(); 2347 call(CAST_FROM_FN_PTR(address,os::yield_all)); 2348 delayed()->mov(yieldall_reg, O0); 2349 restore_globals_from_locals(); 2350 restore(); 2351 } 2352 2353 // reset the counter 2354 mov(G0,yield_reg); 2355 add(yieldall_reg, 1, yieldall_reg); 2356 2357 bind(dont_yield); 2358 // try to get lock 2359 Assembler::swap(lock_ptr_reg, 0, lock_reg); 2360 2361 // did we get the lock? 2362 cmp(lock_reg, StubRoutines::Sparc::unlocked); 2363 br(Assembler::notEqual, true, Assembler::pn, retry_get_lock); 2364 delayed()->add(yield_reg,1,yield_reg); 2365 2366 // yes, got lock. do we have the same top? 2367 ld(top_ptr_reg_after_save, 0, value_reg); 2368 cmp_and_br_short(value_reg, top_reg_after_save, Assembler::notEqual, Assembler::pn, not_same); 2369 2370 // yes, same top. 2371 st(ptr_reg_after_save, top_ptr_reg_after_save, 0); 2372 membar(Assembler::StoreStore); 2373 2374 bind(not_same); 2375 mov(value_reg, ptr_reg_after_save); 2376 st(lock_reg, lock_ptr_reg, 0); // unlock 2377 2378 restore(); 2379 } 2380 } 2381 2382 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, 2383 Register tmp, 2384 int offset) { 2385 intptr_t value = *delayed_value_addr; 2386 if (value != 0) 2387 return RegisterOrConstant(value + offset); 2388 2389 // load indirectly to solve generation ordering problem 2390 AddressLiteral a(delayed_value_addr); 2391 load_ptr_contents(a, tmp); 2392 2393 #ifdef ASSERT 2394 tst(tmp); 2395 breakpoint_trap(zero, xcc); 2396 #endif 2397 2398 if (offset != 0) 2399 add(tmp, offset, tmp); 2400 2401 return RegisterOrConstant(tmp); 2402 } 2403 2404 2405 RegisterOrConstant MacroAssembler::regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 2406 assert(d.register_or_noreg() != G0, "lost side effect"); 2407 if ((s2.is_constant() && s2.as_constant() == 0) || 2408 (s2.is_register() && s2.as_register() == G0)) { 2409 // Do nothing, just move value. 2410 if (s1.is_register()) { 2411 if (d.is_constant()) d = temp; 2412 mov(s1.as_register(), d.as_register()); 2413 return d; 2414 } else { 2415 return s1; 2416 } 2417 } 2418 2419 if (s1.is_register()) { 2420 assert_different_registers(s1.as_register(), temp); 2421 if (d.is_constant()) d = temp; 2422 andn(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 2423 return d; 2424 } else { 2425 if (s2.is_register()) { 2426 assert_different_registers(s2.as_register(), temp); 2427 if (d.is_constant()) d = temp; 2428 set(s1.as_constant(), temp); 2429 andn(temp, s2.as_register(), d.as_register()); 2430 return d; 2431 } else { 2432 intptr_t res = s1.as_constant() & ~s2.as_constant(); 2433 return res; 2434 } 2435 } 2436 } 2437 2438 RegisterOrConstant MacroAssembler::regcon_inc_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 2439 assert(d.register_or_noreg() != G0, "lost side effect"); 2440 if ((s2.is_constant() && s2.as_constant() == 0) || 2441 (s2.is_register() && s2.as_register() == G0)) { 2442 // Do nothing, just move value. 2443 if (s1.is_register()) { 2444 if (d.is_constant()) d = temp; 2445 mov(s1.as_register(), d.as_register()); 2446 return d; 2447 } else { 2448 return s1; 2449 } 2450 } 2451 2452 if (s1.is_register()) { 2453 assert_different_registers(s1.as_register(), temp); 2454 if (d.is_constant()) d = temp; 2455 add(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 2456 return d; 2457 } else { 2458 if (s2.is_register()) { 2459 assert_different_registers(s2.as_register(), temp); 2460 if (d.is_constant()) d = temp; 2461 add(s2.as_register(), ensure_simm13_or_reg(s1, temp), d.as_register()); 2462 return d; 2463 } else { 2464 intptr_t res = s1.as_constant() + s2.as_constant(); 2465 return res; 2466 } 2467 } 2468 } 2469 2470 RegisterOrConstant MacroAssembler::regcon_sll_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 2471 assert(d.register_or_noreg() != G0, "lost side effect"); 2472 if (!is_simm13(s2.constant_or_zero())) 2473 s2 = (s2.as_constant() & 0xFF); 2474 if ((s2.is_constant() && s2.as_constant() == 0) || 2475 (s2.is_register() && s2.as_register() == G0)) { 2476 // Do nothing, just move value. 2477 if (s1.is_register()) { 2478 if (d.is_constant()) d = temp; 2479 mov(s1.as_register(), d.as_register()); 2480 return d; 2481 } else { 2482 return s1; 2483 } 2484 } 2485 2486 if (s1.is_register()) { 2487 assert_different_registers(s1.as_register(), temp); 2488 if (d.is_constant()) d = temp; 2489 sll_ptr(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 2490 return d; 2491 } else { 2492 if (s2.is_register()) { 2493 assert_different_registers(s2.as_register(), temp); 2494 if (d.is_constant()) d = temp; 2495 set(s1.as_constant(), temp); 2496 sll_ptr(temp, s2.as_register(), d.as_register()); 2497 return d; 2498 } else { 2499 intptr_t res = s1.as_constant() << s2.as_constant(); 2500 return res; 2501 } 2502 } 2503 } 2504 2505 2506 // Look up the method for a megamorphic invokeinterface call. 2507 // The target method is determined by <intf_klass, itable_index>. 2508 // The receiver klass is in recv_klass. 2509 // On success, the result will be in method_result, and execution falls through. 2510 // On failure, execution transfers to the given label. 2511 void MacroAssembler::lookup_interface_method(Register recv_klass, 2512 Register intf_klass, 2513 RegisterOrConstant itable_index, 2514 Register method_result, 2515 Register scan_temp, 2516 Register sethi_temp, 2517 Label& L_no_such_interface) { 2518 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp); 2519 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 2520 "caller must use same register for non-constant itable index as for method"); 2521 2522 Label L_no_such_interface_restore; 2523 bool did_save = false; 2524 if (scan_temp == noreg || sethi_temp == noreg) { 2525 Register recv_2 = recv_klass->is_global() ? recv_klass : L0; 2526 Register intf_2 = intf_klass->is_global() ? intf_klass : L1; 2527 assert(method_result->is_global(), "must be able to return value"); 2528 scan_temp = L2; 2529 sethi_temp = L3; 2530 save_frame_and_mov(0, recv_klass, recv_2, intf_klass, intf_2); 2531 recv_klass = recv_2; 2532 intf_klass = intf_2; 2533 did_save = true; 2534 } 2535 2536 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 2537 int vtable_base = InstanceKlass::vtable_start_offset() * wordSize; 2538 int scan_step = itableOffsetEntry::size() * wordSize; 2539 int vte_size = vtableEntry::size() * wordSize; 2540 2541 lduw(recv_klass, InstanceKlass::vtable_length_offset() * wordSize, scan_temp); 2542 // %%% We should store the aligned, prescaled offset in the klassoop. 2543 // Then the next several instructions would fold away. 2544 2545 int round_to_unit = ((HeapWordsPerLong > 1) ? BytesPerLong : 0); 2546 int itb_offset = vtable_base; 2547 if (round_to_unit != 0) { 2548 // hoist first instruction of round_to(scan_temp, BytesPerLong): 2549 itb_offset += round_to_unit - wordSize; 2550 } 2551 int itb_scale = exact_log2(vtableEntry::size() * wordSize); 2552 sll(scan_temp, itb_scale, scan_temp); 2553 add(scan_temp, itb_offset, scan_temp); 2554 if (round_to_unit != 0) { 2555 // Round up to align_object_offset boundary 2556 // see code for InstanceKlass::start_of_itable! 2557 // Was: round_to(scan_temp, BytesPerLong); 2558 // Hoisted: add(scan_temp, BytesPerLong-1, scan_temp); 2559 and3(scan_temp, -round_to_unit, scan_temp); 2560 } 2561 add(recv_klass, scan_temp, scan_temp); 2562 2563 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 2564 RegisterOrConstant itable_offset = itable_index; 2565 itable_offset = regcon_sll_ptr(itable_index, exact_log2(itableMethodEntry::size() * wordSize), itable_offset); 2566 itable_offset = regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes(), itable_offset); 2567 add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass); 2568 2569 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) { 2570 // if (scan->interface() == intf) { 2571 // result = (klass + scan->offset() + itable_index); 2572 // } 2573 // } 2574 Label L_search, L_found_method; 2575 2576 for (int peel = 1; peel >= 0; peel--) { 2577 // %%%% Could load both offset and interface in one ldx, if they were 2578 // in the opposite order. This would save a load. 2579 ld_ptr(scan_temp, itableOffsetEntry::interface_offset_in_bytes(), method_result); 2580 2581 // Check that this entry is non-null. A null entry means that 2582 // the receiver class doesn't implement the interface, and wasn't the 2583 // same as when the caller was compiled. 2584 bpr(Assembler::rc_z, false, Assembler::pn, method_result, did_save ? L_no_such_interface_restore : L_no_such_interface); 2585 delayed()->cmp(method_result, intf_klass); 2586 2587 if (peel) { 2588 brx(Assembler::equal, false, Assembler::pt, L_found_method); 2589 } else { 2590 brx(Assembler::notEqual, false, Assembler::pn, L_search); 2591 // (invert the test to fall through to found_method...) 2592 } 2593 delayed()->add(scan_temp, scan_step, scan_temp); 2594 2595 if (!peel) break; 2596 2597 bind(L_search); 2598 } 2599 2600 bind(L_found_method); 2601 2602 // Got a hit. 2603 int ito_offset = itableOffsetEntry::offset_offset_in_bytes(); 2604 // scan_temp[-scan_step] points to the vtable offset we need 2605 ito_offset -= scan_step; 2606 lduw(scan_temp, ito_offset, scan_temp); 2607 ld_ptr(recv_klass, scan_temp, method_result); 2608 2609 if (did_save) { 2610 Label L_done; 2611 ba(L_done); 2612 delayed()->restore(); 2613 2614 bind(L_no_such_interface_restore); 2615 ba(L_no_such_interface); 2616 delayed()->restore(); 2617 2618 bind(L_done); 2619 } 2620 } 2621 2622 2623 // virtual method calling 2624 void MacroAssembler::lookup_virtual_method(Register recv_klass, 2625 RegisterOrConstant vtable_index, 2626 Register method_result) { 2627 assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg()); 2628 Register sethi_temp = method_result; 2629 const int base = (InstanceKlass::vtable_start_offset() * wordSize + 2630 // method pointer offset within the vtable entry: 2631 vtableEntry::method_offset_in_bytes()); 2632 RegisterOrConstant vtable_offset = vtable_index; 2633 // Each of the following three lines potentially generates an instruction. 2634 // But the total number of address formation instructions will always be 2635 // at most two, and will often be zero. In any case, it will be optimal. 2636 // If vtable_index is a register, we will have (sll_ptr N,x; inc_ptr B,x; ld_ptr k,x). 2637 // If vtable_index is a constant, we will have at most (set B+X<<N,t; ld_ptr k,t). 2638 vtable_offset = regcon_sll_ptr(vtable_index, exact_log2(vtableEntry::size() * wordSize), vtable_offset); 2639 vtable_offset = regcon_inc_ptr(vtable_offset, base, vtable_offset, sethi_temp); 2640 Address vtable_entry_addr(recv_klass, ensure_simm13_or_reg(vtable_offset, sethi_temp)); 2641 ld_ptr(vtable_entry_addr, method_result); 2642 } 2643 2644 2645 void MacroAssembler::check_klass_subtype(Register sub_klass, 2646 Register super_klass, 2647 Register temp_reg, 2648 Register temp2_reg, 2649 Label& L_success) { 2650 Register sub_2 = sub_klass; 2651 Register sup_2 = super_klass; 2652 if (!sub_2->is_global()) sub_2 = L0; 2653 if (!sup_2->is_global()) sup_2 = L1; 2654 bool did_save = false; 2655 if (temp_reg == noreg || temp2_reg == noreg) { 2656 temp_reg = L2; 2657 temp2_reg = L3; 2658 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); 2659 sub_klass = sub_2; 2660 super_klass = sup_2; 2661 did_save = true; 2662 } 2663 Label L_failure, L_pop_to_failure, L_pop_to_success; 2664 check_klass_subtype_fast_path(sub_klass, super_klass, 2665 temp_reg, temp2_reg, 2666 (did_save ? &L_pop_to_success : &L_success), 2667 (did_save ? &L_pop_to_failure : &L_failure), NULL); 2668 2669 if (!did_save) 2670 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); 2671 check_klass_subtype_slow_path(sub_2, sup_2, 2672 L2, L3, L4, L5, 2673 NULL, &L_pop_to_failure); 2674 2675 // on success: 2676 bind(L_pop_to_success); 2677 restore(); 2678 ba_short(L_success); 2679 2680 // on failure: 2681 bind(L_pop_to_failure); 2682 restore(); 2683 bind(L_failure); 2684 } 2685 2686 2687 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 2688 Register super_klass, 2689 Register temp_reg, 2690 Register temp2_reg, 2691 Label* L_success, 2692 Label* L_failure, 2693 Label* L_slow_path, 2694 RegisterOrConstant super_check_offset) { 2695 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 2696 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2697 2698 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 2699 bool need_slow_path = (must_load_sco || 2700 super_check_offset.constant_or_zero() == sco_offset); 2701 2702 assert_different_registers(sub_klass, super_klass, temp_reg); 2703 if (super_check_offset.is_register()) { 2704 assert_different_registers(sub_klass, super_klass, temp_reg, 2705 super_check_offset.as_register()); 2706 } else if (must_load_sco) { 2707 assert(temp2_reg != noreg, "supply either a temp or a register offset"); 2708 } 2709 2710 Label L_fallthrough; 2711 int label_nulls = 0; 2712 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 2713 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 2714 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } 2715 assert(label_nulls <= 1 || 2716 (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path), 2717 "at most one NULL in the batch, usually"); 2718 2719 // If the pointers are equal, we are done (e.g., String[] elements). 2720 // This self-check enables sharing of secondary supertype arrays among 2721 // non-primary types such as array-of-interface. Otherwise, each such 2722 // type would need its own customized SSA. 2723 // We move this check to the front of the fast path because many 2724 // type checks are in fact trivially successful in this manner, 2725 // so we get a nicely predicted branch right at the start of the check. 2726 cmp(super_klass, sub_klass); 2727 brx(Assembler::equal, false, Assembler::pn, *L_success); 2728 delayed()->nop(); 2729 2730 // Check the supertype display: 2731 if (must_load_sco) { 2732 // The super check offset is always positive... 2733 lduw(super_klass, sco_offset, temp2_reg); 2734 super_check_offset = RegisterOrConstant(temp2_reg); 2735 // super_check_offset is register. 2736 assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset.as_register()); 2737 } 2738 ld_ptr(sub_klass, super_check_offset, temp_reg); 2739 cmp(super_klass, temp_reg); 2740 2741 // This check has worked decisively for primary supers. 2742 // Secondary supers are sought in the super_cache ('super_cache_addr'). 2743 // (Secondary supers are interfaces and very deeply nested subtypes.) 2744 // This works in the same check above because of a tricky aliasing 2745 // between the super_cache and the primary super display elements. 2746 // (The 'super_check_addr' can address either, as the case requires.) 2747 // Note that the cache is updated below if it does not help us find 2748 // what we need immediately. 2749 // So if it was a primary super, we can just fail immediately. 2750 // Otherwise, it's the slow path for us (no success at this point). 2751 2752 // Hacked ba(), which may only be used just before L_fallthrough. 2753 #define FINAL_JUMP(label) \ 2754 if (&(label) != &L_fallthrough) { \ 2755 ba(label); delayed()->nop(); \ 2756 } 2757 2758 if (super_check_offset.is_register()) { 2759 brx(Assembler::equal, false, Assembler::pn, *L_success); 2760 delayed()->cmp(super_check_offset.as_register(), sc_offset); 2761 2762 if (L_failure == &L_fallthrough) { 2763 brx(Assembler::equal, false, Assembler::pt, *L_slow_path); 2764 delayed()->nop(); 2765 } else { 2766 brx(Assembler::notEqual, false, Assembler::pn, *L_failure); 2767 delayed()->nop(); 2768 FINAL_JUMP(*L_slow_path); 2769 } 2770 } else if (super_check_offset.as_constant() == sc_offset) { 2771 // Need a slow path; fast failure is impossible. 2772 if (L_slow_path == &L_fallthrough) { 2773 brx(Assembler::equal, false, Assembler::pt, *L_success); 2774 delayed()->nop(); 2775 } else { 2776 brx(Assembler::notEqual, false, Assembler::pn, *L_slow_path); 2777 delayed()->nop(); 2778 FINAL_JUMP(*L_success); 2779 } 2780 } else { 2781 // No slow path; it's a fast decision. 2782 if (L_failure == &L_fallthrough) { 2783 brx(Assembler::equal, false, Assembler::pt, *L_success); 2784 delayed()->nop(); 2785 } else { 2786 brx(Assembler::notEqual, false, Assembler::pn, *L_failure); 2787 delayed()->nop(); 2788 FINAL_JUMP(*L_success); 2789 } 2790 } 2791 2792 bind(L_fallthrough); 2793 2794 #undef FINAL_JUMP 2795 } 2796 2797 2798 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 2799 Register super_klass, 2800 Register count_temp, 2801 Register scan_temp, 2802 Register scratch_reg, 2803 Register coop_reg, 2804 Label* L_success, 2805 Label* L_failure) { 2806 assert_different_registers(sub_klass, super_klass, 2807 count_temp, scan_temp, scratch_reg, coop_reg); 2808 2809 Label L_fallthrough, L_loop; 2810 int label_nulls = 0; 2811 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 2812 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 2813 assert(label_nulls <= 1, "at most one NULL in the batch"); 2814 2815 // a couple of useful fields in sub_klass: 2816 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 2817 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 2818 2819 // Do a linear scan of the secondary super-klass chain. 2820 // This code is rarely used, so simplicity is a virtue here. 2821 2822 #ifndef PRODUCT 2823 int* pst_counter = &SharedRuntime::_partial_subtype_ctr; 2824 inc_counter((address) pst_counter, count_temp, scan_temp); 2825 #endif 2826 2827 // We will consult the secondary-super array. 2828 ld_ptr(sub_klass, ss_offset, scan_temp); 2829 2830 Register search_key = super_klass; 2831 2832 // Load the array length. (Positive movl does right thing on LP64.) 2833 lduw(scan_temp, Array<Klass*>::length_offset_in_bytes(), count_temp); 2834 2835 // Check for empty secondary super list 2836 tst(count_temp); 2837 2838 // In the array of super classes elements are pointer sized. 2839 int element_size = wordSize; 2840 2841 // Top of search loop 2842 bind(L_loop); 2843 br(Assembler::equal, false, Assembler::pn, *L_failure); 2844 delayed()->add(scan_temp, element_size, scan_temp); 2845 2846 // Skip the array header in all array accesses. 2847 int elem_offset = Array<Klass*>::base_offset_in_bytes(); 2848 elem_offset -= element_size; // the scan pointer was pre-incremented also 2849 2850 // Load next super to check 2851 ld_ptr( scan_temp, elem_offset, scratch_reg ); 2852 2853 // Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list 2854 cmp(scratch_reg, search_key); 2855 2856 // A miss means we are NOT a subtype and need to keep looping 2857 brx(Assembler::notEqual, false, Assembler::pn, L_loop); 2858 delayed()->deccc(count_temp); // decrement trip counter in delay slot 2859 2860 // Success. Cache the super we found and proceed in triumph. 2861 st_ptr(super_klass, sub_klass, sc_offset); 2862 2863 if (L_success != &L_fallthrough) { 2864 ba(*L_success); 2865 delayed()->nop(); 2866 } 2867 2868 bind(L_fallthrough); 2869 } 2870 2871 2872 RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot, 2873 Register temp_reg, 2874 int extra_slot_offset) { 2875 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 2876 int stackElementSize = Interpreter::stackElementSize; 2877 int offset = extra_slot_offset * stackElementSize; 2878 if (arg_slot.is_constant()) { 2879 offset += arg_slot.as_constant() * stackElementSize; 2880 return offset; 2881 } else { 2882 assert(temp_reg != noreg, "must specify"); 2883 sll_ptr(arg_slot.as_register(), exact_log2(stackElementSize), temp_reg); 2884 if (offset != 0) 2885 add(temp_reg, offset, temp_reg); 2886 return temp_reg; 2887 } 2888 } 2889 2890 2891 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 2892 Register temp_reg, 2893 int extra_slot_offset) { 2894 return Address(Gargs, argument_offset(arg_slot, temp_reg, extra_slot_offset)); 2895 } 2896 2897 2898 void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg, 2899 Register temp_reg, 2900 Label& done, Label* slow_case, 2901 BiasedLockingCounters* counters) { 2902 assert(UseBiasedLocking, "why call this otherwise?"); 2903 2904 if (PrintBiasedLockingStatistics) { 2905 assert_different_registers(obj_reg, mark_reg, temp_reg, O7); 2906 if (counters == NULL) 2907 counters = BiasedLocking::counters(); 2908 } 2909 2910 Label cas_label; 2911 2912 // Biased locking 2913 // See whether the lock is currently biased toward our thread and 2914 // whether the epoch is still valid 2915 // Note that the runtime guarantees sufficient alignment of JavaThread 2916 // pointers to allow age to be placed into low bits 2917 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); 2918 and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg); 2919 cmp_and_brx_short(temp_reg, markOopDesc::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label); 2920 2921 load_klass(obj_reg, temp_reg); 2922 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2923 or3(G2_thread, temp_reg, temp_reg); 2924 xor3(mark_reg, temp_reg, temp_reg); 2925 andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg); 2926 if (counters != NULL) { 2927 cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg); 2928 // Reload mark_reg as we may need it later 2929 ld_ptr(Address(obj_reg, oopDesc::mark_offset_in_bytes()), mark_reg); 2930 } 2931 brx(Assembler::equal, true, Assembler::pt, done); 2932 delayed()->nop(); 2933 2934 Label try_revoke_bias; 2935 Label try_rebias; 2936 Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes()); 2937 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2938 2939 // At this point we know that the header has the bias pattern and 2940 // that we are not the bias owner in the current epoch. We need to 2941 // figure out more details about the state of the header in order to 2942 // know what operations can be legally performed on the object's 2943 // header. 2944 2945 // If the low three bits in the xor result aren't clear, that means 2946 // the prototype header is no longer biased and we have to revoke 2947 // the bias on this object. 2948 btst(markOopDesc::biased_lock_mask_in_place, temp_reg); 2949 brx(Assembler::notZero, false, Assembler::pn, try_revoke_bias); 2950 2951 // Biasing is still enabled for this data type. See whether the 2952 // epoch of the current bias is still valid, meaning that the epoch 2953 // bits of the mark word are equal to the epoch bits of the 2954 // prototype header. (Note that the prototype header's epoch bits 2955 // only change at a safepoint.) If not, attempt to rebias the object 2956 // toward the current thread. Note that we must be absolutely sure 2957 // that the current epoch is invalid in order to do this because 2958 // otherwise the manipulations it performs on the mark word are 2959 // illegal. 2960 delayed()->btst(markOopDesc::epoch_mask_in_place, temp_reg); 2961 brx(Assembler::notZero, false, Assembler::pn, try_rebias); 2962 2963 // The epoch of the current bias is still valid but we know nothing 2964 // about the owner; it might be set or it might be clear. Try to 2965 // acquire the bias of the object using an atomic operation. If this 2966 // fails we will go in to the runtime to revoke the object's bias. 2967 // Note that we first construct the presumed unbiased header so we 2968 // don't accidentally blow away another thread's valid bias. 2969 delayed()->and3(mark_reg, 2970 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place, 2971 mark_reg); 2972 or3(G2_thread, mark_reg, temp_reg); 2973 casn(mark_addr.base(), mark_reg, temp_reg); 2974 // If the biasing toward our thread failed, this means that 2975 // another thread succeeded in biasing it toward itself and we 2976 // need to revoke that bias. The revocation will occur in the 2977 // interpreter runtime in the slow case. 2978 cmp(mark_reg, temp_reg); 2979 if (counters != NULL) { 2980 cond_inc(Assembler::zero, (address) counters->anonymously_biased_lock_entry_count_addr(), mark_reg, temp_reg); 2981 } 2982 if (slow_case != NULL) { 2983 brx(Assembler::notEqual, true, Assembler::pn, *slow_case); 2984 delayed()->nop(); 2985 } 2986 ba_short(done); 2987 2988 bind(try_rebias); 2989 // At this point we know the epoch has expired, meaning that the 2990 // current "bias owner", if any, is actually invalid. Under these 2991 // circumstances _only_, we are allowed to use the current header's 2992 // value as the comparison value when doing the cas to acquire the 2993 // bias in the current epoch. In other words, we allow transfer of 2994 // the bias from one thread to another directly in this situation. 2995 // 2996 // FIXME: due to a lack of registers we currently blow away the age 2997 // bits in this situation. Should attempt to preserve them. 2998 load_klass(obj_reg, temp_reg); 2999 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 3000 or3(G2_thread, temp_reg, temp_reg); 3001 casn(mark_addr.base(), mark_reg, temp_reg); 3002 // If the biasing toward our thread failed, this means that 3003 // another thread succeeded in biasing it toward itself and we 3004 // need to revoke that bias. The revocation will occur in the 3005 // interpreter runtime in the slow case. 3006 cmp(mark_reg, temp_reg); 3007 if (counters != NULL) { 3008 cond_inc(Assembler::zero, (address) counters->rebiased_lock_entry_count_addr(), mark_reg, temp_reg); 3009 } 3010 if (slow_case != NULL) { 3011 brx(Assembler::notEqual, true, Assembler::pn, *slow_case); 3012 delayed()->nop(); 3013 } 3014 ba_short(done); 3015 3016 bind(try_revoke_bias); 3017 // The prototype mark in the klass doesn't have the bias bit set any 3018 // more, indicating that objects of this data type are not supposed 3019 // to be biased any more. We are going to try to reset the mark of 3020 // this object to the prototype value and fall through to the 3021 // CAS-based locking scheme. Note that if our CAS fails, it means 3022 // that another thread raced us for the privilege of revoking the 3023 // bias of this particular object, so it's okay to continue in the 3024 // normal locking code. 3025 // 3026 // FIXME: due to a lack of registers we currently blow away the age 3027 // bits in this situation. Should attempt to preserve them. 3028 load_klass(obj_reg, temp_reg); 3029 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 3030 casn(mark_addr.base(), mark_reg, temp_reg); 3031 // Fall through to the normal CAS-based lock, because no matter what 3032 // the result of the above CAS, some thread must have succeeded in 3033 // removing the bias bit from the object's header. 3034 if (counters != NULL) { 3035 cmp(mark_reg, temp_reg); 3036 cond_inc(Assembler::zero, (address) counters->revoked_lock_entry_count_addr(), mark_reg, temp_reg); 3037 } 3038 3039 bind(cas_label); 3040 } 3041 3042 void MacroAssembler::biased_locking_exit (Address mark_addr, Register temp_reg, Label& done, 3043 bool allow_delay_slot_filling) { 3044 // Check for biased locking unlock case, which is a no-op 3045 // Note: we do not have to check the thread ID for two reasons. 3046 // First, the interpreter checks for IllegalMonitorStateException at 3047 // a higher level. Second, if the bias was revoked while we held the 3048 // lock, the object could not be rebiased toward another thread, so 3049 // the bias bit would be clear. 3050 ld_ptr(mark_addr, temp_reg); 3051 and3(temp_reg, markOopDesc::biased_lock_mask_in_place, temp_reg); 3052 cmp(temp_reg, markOopDesc::biased_lock_pattern); 3053 brx(Assembler::equal, allow_delay_slot_filling, Assembler::pt, done); 3054 delayed(); 3055 if (!allow_delay_slot_filling) { 3056 nop(); 3057 } 3058 } 3059 3060 3061 // CASN -- 32-64 bit switch hitter similar to the synthetic CASN provided by 3062 // Solaris/SPARC's "as". Another apt name would be cas_ptr() 3063 3064 void MacroAssembler::casn (Register addr_reg, Register cmp_reg, Register set_reg ) { 3065 casx_under_lock (addr_reg, cmp_reg, set_reg, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); 3066 } 3067 3068 3069 3070 // compiler_lock_object() and compiler_unlock_object() are direct transliterations 3071 // of i486.ad fast_lock() and fast_unlock(). See those methods for detailed comments. 3072 // The code could be tightened up considerably. 3073 // 3074 // box->dhw disposition - post-conditions at DONE_LABEL. 3075 // - Successful inflated lock: box->dhw != 0. 3076 // Any non-zero value suffices. 3077 // Consider G2_thread, rsp, boxReg, or unused_mark() 3078 // - Successful Stack-lock: box->dhw == mark. 3079 // box->dhw must contain the displaced mark word value 3080 // - Failure -- icc.ZFlag == 0 and box->dhw is undefined. 3081 // The slow-path fast_enter() and slow_enter() operators 3082 // are responsible for setting box->dhw = NonZero (typically ::unused_mark). 3083 // - Biased: box->dhw is undefined 3084 // 3085 // SPARC refworkload performance - specifically jetstream and scimark - are 3086 // extremely sensitive to the size of the code emitted by compiler_lock_object 3087 // and compiler_unlock_object. Critically, the key factor is code size, not path 3088 // length. (Simply experiments to pad CLO with unexecuted NOPs demonstrte the 3089 // effect). 3090 3091 3092 void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark, 3093 Register Rbox, Register Rscratch, 3094 BiasedLockingCounters* counters, 3095 bool try_bias) { 3096 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); 3097 3098 verify_oop(Roop); 3099 Label done ; 3100 3101 if (counters != NULL) { 3102 inc_counter((address) counters->total_entry_count_addr(), Rmark, Rscratch); 3103 } 3104 3105 if (EmitSync & 1) { 3106 mov(3, Rscratch); 3107 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 3108 cmp(SP, G0); 3109 return ; 3110 } 3111 3112 if (EmitSync & 2) { 3113 3114 // Fetch object's markword 3115 ld_ptr(mark_addr, Rmark); 3116 3117 if (try_bias) { 3118 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 3119 } 3120 3121 // Save Rbox in Rscratch to be used for the cas operation 3122 mov(Rbox, Rscratch); 3123 3124 // set Rmark to markOop | markOopDesc::unlocked_value 3125 or3(Rmark, markOopDesc::unlocked_value, Rmark); 3126 3127 // Initialize the box. (Must happen before we update the object mark!) 3128 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 3129 3130 // compare object markOop with Rmark and if equal exchange Rscratch with object markOop 3131 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 3132 casx_under_lock(mark_addr.base(), Rmark, Rscratch, 3133 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); 3134 3135 // if compare/exchange succeeded we found an unlocked object and we now have locked it 3136 // hence we are done 3137 cmp(Rmark, Rscratch); 3138 #ifdef _LP64 3139 sub(Rscratch, STACK_BIAS, Rscratch); 3140 #endif 3141 brx(Assembler::equal, false, Assembler::pt, done); 3142 delayed()->sub(Rscratch, SP, Rscratch); //pull next instruction into delay slot 3143 3144 // we did not find an unlocked object so see if this is a recursive case 3145 // sub(Rscratch, SP, Rscratch); 3146 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 3147 andcc(Rscratch, 0xfffff003, Rscratch); 3148 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 3149 bind (done); 3150 return ; 3151 } 3152 3153 Label Egress ; 3154 3155 if (EmitSync & 256) { 3156 Label IsInflated ; 3157 3158 ld_ptr(mark_addr, Rmark); // fetch obj->mark 3159 // Triage: biased, stack-locked, neutral, inflated 3160 if (try_bias) { 3161 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 3162 // Invariant: if control reaches this point in the emitted stream 3163 // then Rmark has not been modified. 3164 } 3165 3166 // Store mark into displaced mark field in the on-stack basic-lock "box" 3167 // Critically, this must happen before the CAS 3168 // Maximize the ST-CAS distance to minimize the ST-before-CAS penalty. 3169 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 3170 andcc(Rmark, 2, G0); 3171 brx(Assembler::notZero, false, Assembler::pn, IsInflated); 3172 delayed()-> 3173 3174 // Try stack-lock acquisition. 3175 // Beware: the 1st instruction is in a delay slot 3176 mov(Rbox, Rscratch); 3177 or3(Rmark, markOopDesc::unlocked_value, Rmark); 3178 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 3179 casn(mark_addr.base(), Rmark, Rscratch); 3180 cmp(Rmark, Rscratch); 3181 brx(Assembler::equal, false, Assembler::pt, done); 3182 delayed()->sub(Rscratch, SP, Rscratch); 3183 3184 // Stack-lock attempt failed - check for recursive stack-lock. 3185 // See the comments below about how we might remove this case. 3186 #ifdef _LP64 3187 sub(Rscratch, STACK_BIAS, Rscratch); 3188 #endif 3189 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 3190 andcc(Rscratch, 0xfffff003, Rscratch); 3191 br(Assembler::always, false, Assembler::pt, done); 3192 delayed()-> st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 3193 3194 bind(IsInflated); 3195 if (EmitSync & 64) { 3196 // If m->owner != null goto IsLocked 3197 // Pessimistic form: Test-and-CAS vs CAS 3198 // The optimistic form avoids RTS->RTO cache line upgrades. 3199 ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch); 3200 andcc(Rscratch, Rscratch, G0); 3201 brx(Assembler::notZero, false, Assembler::pn, done); 3202 delayed()->nop(); 3203 // m->owner == null : it's unlocked. 3204 } 3205 3206 // Try to CAS m->owner from null to Self 3207 // Invariant: if we acquire the lock then _recursions should be 0. 3208 add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark); 3209 mov(G2_thread, Rscratch); 3210 casn(Rmark, G0, Rscratch); 3211 cmp(Rscratch, G0); 3212 // Intentional fall-through into done 3213 } else { 3214 // Aggressively avoid the Store-before-CAS penalty 3215 // Defer the store into box->dhw until after the CAS 3216 Label IsInflated, Recursive ; 3217 3218 // Anticipate CAS -- Avoid RTS->RTO upgrade 3219 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); 3220 3221 ld_ptr(mark_addr, Rmark); // fetch obj->mark 3222 // Triage: biased, stack-locked, neutral, inflated 3223 3224 if (try_bias) { 3225 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 3226 // Invariant: if control reaches this point in the emitted stream 3227 // then Rmark has not been modified. 3228 } 3229 andcc(Rmark, 2, G0); 3230 brx(Assembler::notZero, false, Assembler::pn, IsInflated); 3231 delayed()-> // Beware - dangling delay-slot 3232 3233 // Try stack-lock acquisition. 3234 // Transiently install BUSY (0) encoding in the mark word. 3235 // if the CAS of 0 into the mark was successful then we execute: 3236 // ST box->dhw = mark -- save fetched mark in on-stack basiclock box 3237 // ST obj->mark = box -- overwrite transient 0 value 3238 // This presumes TSO, of course. 3239 3240 mov(0, Rscratch); 3241 or3(Rmark, markOopDesc::unlocked_value, Rmark); 3242 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 3243 casn(mark_addr.base(), Rmark, Rscratch); 3244 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); 3245 cmp(Rscratch, Rmark); 3246 brx(Assembler::notZero, false, Assembler::pn, Recursive); 3247 delayed()->st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 3248 if (counters != NULL) { 3249 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); 3250 } 3251 ba(done); 3252 delayed()->st_ptr(Rbox, mark_addr); 3253 3254 bind(Recursive); 3255 // Stack-lock attempt failed - check for recursive stack-lock. 3256 // Tests show that we can remove the recursive case with no impact 3257 // on refworkload 0.83. If we need to reduce the size of the code 3258 // emitted by compiler_lock_object() the recursive case is perfect 3259 // candidate. 3260 // 3261 // A more extreme idea is to always inflate on stack-lock recursion. 3262 // This lets us eliminate the recursive checks in compiler_lock_object 3263 // and compiler_unlock_object and the (box->dhw == 0) encoding. 3264 // A brief experiment - requiring changes to synchronizer.cpp, interpreter, 3265 // and showed a performance *increase*. In the same experiment I eliminated 3266 // the fast-path stack-lock code from the interpreter and always passed 3267 // control to the "slow" operators in synchronizer.cpp. 3268 3269 // RScratch contains the fetched obj->mark value from the failed CASN. 3270 #ifdef _LP64 3271 sub(Rscratch, STACK_BIAS, Rscratch); 3272 #endif 3273 sub(Rscratch, SP, Rscratch); 3274 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 3275 andcc(Rscratch, 0xfffff003, Rscratch); 3276 if (counters != NULL) { 3277 // Accounting needs the Rscratch register 3278 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 3279 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); 3280 ba_short(done); 3281 } else { 3282 ba(done); 3283 delayed()->st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 3284 } 3285 3286 bind (IsInflated); 3287 if (EmitSync & 64) { 3288 // If m->owner != null goto IsLocked 3289 // Test-and-CAS vs CAS 3290 // Pessimistic form avoids futile (doomed) CAS attempts 3291 // The optimistic form avoids RTS->RTO cache line upgrades. 3292 ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch); 3293 andcc(Rscratch, Rscratch, G0); 3294 brx(Assembler::notZero, false, Assembler::pn, done); 3295 delayed()->nop(); 3296 // m->owner == null : it's unlocked. 3297 } 3298 3299 // Try to CAS m->owner from null to Self 3300 // Invariant: if we acquire the lock then _recursions should be 0. 3301 add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark); 3302 mov(G2_thread, Rscratch); 3303 casn(Rmark, G0, Rscratch); 3304 cmp(Rscratch, G0); 3305 // ST box->displaced_header = NonZero. 3306 // Any non-zero value suffices: 3307 // unused_mark(), G2_thread, RBox, RScratch, rsp, etc. 3308 st_ptr(Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes()); 3309 // Intentional fall-through into done 3310 } 3311 3312 bind (done); 3313 } 3314 3315 void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark, 3316 Register Rbox, Register Rscratch, 3317 bool try_bias) { 3318 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); 3319 3320 Label done ; 3321 3322 if (EmitSync & 4) { 3323 cmp(SP, G0); 3324 return ; 3325 } 3326 3327 if (EmitSync & 8) { 3328 if (try_bias) { 3329 biased_locking_exit(mark_addr, Rscratch, done); 3330 } 3331 3332 // Test first if it is a fast recursive unlock 3333 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rmark); 3334 br_null_short(Rmark, Assembler::pt, done); 3335 3336 // Check if it is still a light weight lock, this is is true if we see 3337 // the stack address of the basicLock in the markOop of the object 3338 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 3339 casx_under_lock(mark_addr.base(), Rbox, Rmark, 3340 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); 3341 ba(done); 3342 delayed()->cmp(Rbox, Rmark); 3343 bind(done); 3344 return ; 3345 } 3346 3347 // Beware ... If the aggregate size of the code emitted by CLO and CUO is 3348 // is too large performance rolls abruptly off a cliff. 3349 // This could be related to inlining policies, code cache management, or 3350 // I$ effects. 3351 Label LStacked ; 3352 3353 if (try_bias) { 3354 // TODO: eliminate redundant LDs of obj->mark 3355 biased_locking_exit(mark_addr, Rscratch, done); 3356 } 3357 3358 ld_ptr(Roop, oopDesc::mark_offset_in_bytes(), Rmark); 3359 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rscratch); 3360 andcc(Rscratch, Rscratch, G0); 3361 brx(Assembler::zero, false, Assembler::pn, done); 3362 delayed()->nop(); // consider: relocate fetch of mark, above, into this DS 3363 andcc(Rmark, 2, G0); 3364 brx(Assembler::zero, false, Assembler::pt, LStacked); 3365 delayed()->nop(); 3366 3367 // It's inflated 3368 // Conceptually we need a #loadstore|#storestore "release" MEMBAR before 3369 // the ST of 0 into _owner which releases the lock. This prevents loads 3370 // and stores within the critical section from reordering (floating) 3371 // past the store that releases the lock. But TSO is a strong memory model 3372 // and that particular flavor of barrier is a noop, so we can safely elide it. 3373 // Note that we use 1-0 locking by default for the inflated case. We 3374 // close the resultant (and rare) race by having contented threads in 3375 // monitorenter periodically poll _owner. 3376 ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch); 3377 ld_ptr(Rmark, ObjectMonitor::recursions_offset_in_bytes() - 2, Rbox); 3378 xor3(Rscratch, G2_thread, Rscratch); 3379 orcc(Rbox, Rscratch, Rbox); 3380 brx(Assembler::notZero, false, Assembler::pn, done); 3381 delayed()-> 3382 ld_ptr(Rmark, ObjectMonitor::EntryList_offset_in_bytes() - 2, Rscratch); 3383 ld_ptr(Rmark, ObjectMonitor::cxq_offset_in_bytes() - 2, Rbox); 3384 orcc(Rbox, Rscratch, G0); 3385 if (EmitSync & 65536) { 3386 Label LSucc ; 3387 brx(Assembler::notZero, false, Assembler::pn, LSucc); 3388 delayed()->nop(); 3389 ba(done); 3390 delayed()->st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2); 3391 3392 bind(LSucc); 3393 st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2); 3394 if (os::is_MP()) { membar (StoreLoad); } 3395 ld_ptr(Rmark, ObjectMonitor::succ_offset_in_bytes() - 2, Rscratch); 3396 andcc(Rscratch, Rscratch, G0); 3397 brx(Assembler::notZero, false, Assembler::pt, done); 3398 delayed()->andcc(G0, G0, G0); 3399 add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark); 3400 mov(G2_thread, Rscratch); 3401 casn(Rmark, G0, Rscratch); 3402 // invert icc.zf and goto done 3403 br_notnull(Rscratch, false, Assembler::pt, done); 3404 delayed()->cmp(G0, G0); 3405 ba(done); 3406 delayed()->cmp(G0, 1); 3407 } else { 3408 brx(Assembler::notZero, false, Assembler::pn, done); 3409 delayed()->nop(); 3410 ba(done); 3411 delayed()->st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2); 3412 } 3413 3414 bind (LStacked); 3415 // Consider: we could replace the expensive CAS in the exit 3416 // path with a simple ST of the displaced mark value fetched from 3417 // the on-stack basiclock box. That admits a race where a thread T2 3418 // in the slow lock path -- inflating with monitor M -- could race a 3419 // thread T1 in the fast unlock path, resulting in a missed wakeup for T2. 3420 // More precisely T1 in the stack-lock unlock path could "stomp" the 3421 // inflated mark value M installed by T2, resulting in an orphan 3422 // object monitor M and T2 becoming stranded. We can remedy that situation 3423 // by having T2 periodically poll the object's mark word using timed wait 3424 // operations. If T2 discovers that a stomp has occurred it vacates 3425 // the monitor M and wakes any other threads stranded on the now-orphan M. 3426 // In addition the monitor scavenger, which performs deflation, 3427 // would also need to check for orpan monitors and stranded threads. 3428 // 3429 // Finally, inflation is also used when T2 needs to assign a hashCode 3430 // to O and O is stack-locked by T1. The "stomp" race could cause 3431 // an assigned hashCode value to be lost. We can avoid that condition 3432 // and provide the necessary hashCode stability invariants by ensuring 3433 // that hashCode generation is idempotent between copying GCs. 3434 // For example we could compute the hashCode of an object O as 3435 // O's heap address XOR some high quality RNG value that is refreshed 3436 // at GC-time. The monitor scavenger would install the hashCode 3437 // found in any orphan monitors. Again, the mechanism admits a 3438 // lost-update "stomp" WAW race but detects and recovers as needed. 3439 // 3440 // A prototype implementation showed excellent results, although 3441 // the scavenger and timeout code was rather involved. 3442 3443 casn(mark_addr.base(), Rbox, Rscratch); 3444 cmp(Rbox, Rscratch); 3445 // Intentional fall through into done ... 3446 3447 bind(done); 3448 } 3449 3450 3451 3452 void MacroAssembler::print_CPU_state() { 3453 // %%%%% need to implement this 3454 } 3455 3456 void MacroAssembler::verify_FPU(int stack_depth, const char* s) { 3457 // %%%%% need to implement this 3458 } 3459 3460 void MacroAssembler::push_IU_state() { 3461 // %%%%% need to implement this 3462 } 3463 3464 3465 void MacroAssembler::pop_IU_state() { 3466 // %%%%% need to implement this 3467 } 3468 3469 3470 void MacroAssembler::push_FPU_state() { 3471 // %%%%% need to implement this 3472 } 3473 3474 3475 void MacroAssembler::pop_FPU_state() { 3476 // %%%%% need to implement this 3477 } 3478 3479 3480 void MacroAssembler::push_CPU_state() { 3481 // %%%%% need to implement this 3482 } 3483 3484 3485 void MacroAssembler::pop_CPU_state() { 3486 // %%%%% need to implement this 3487 } 3488 3489 3490 3491 void MacroAssembler::verify_tlab() { 3492 #ifdef ASSERT 3493 if (UseTLAB && VerifyOops) { 3494 Label next, next2, ok; 3495 Register t1 = L0; 3496 Register t2 = L1; 3497 Register t3 = L2; 3498 3499 save_frame(0); 3500 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); 3501 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2); 3502 or3(t1, t2, t3); 3503 cmp_and_br_short(t1, t2, Assembler::greaterEqual, Assembler::pn, next); 3504 STOP("assert(top >= start)"); 3505 should_not_reach_here(); 3506 3507 bind(next); 3508 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); 3509 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2); 3510 or3(t3, t2, t3); 3511 cmp_and_br_short(t1, t2, Assembler::lessEqual, Assembler::pn, next2); 3512 STOP("assert(top <= end)"); 3513 should_not_reach_here(); 3514 3515 bind(next2); 3516 and3(t3, MinObjAlignmentInBytesMask, t3); 3517 cmp_and_br_short(t3, 0, Assembler::lessEqual, Assembler::pn, ok); 3518 STOP("assert(aligned)"); 3519 should_not_reach_here(); 3520 3521 bind(ok); 3522 restore(); 3523 } 3524 #endif 3525 } 3526 3527 3528 void MacroAssembler::eden_allocate( 3529 Register obj, // result: pointer to object after successful allocation 3530 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 3531 int con_size_in_bytes, // object size in bytes if known at compile time 3532 Register t1, // temp register 3533 Register t2, // temp register 3534 Label& slow_case // continuation point if fast allocation fails 3535 ){ 3536 // make sure arguments make sense 3537 assert_different_registers(obj, var_size_in_bytes, t1, t2); 3538 assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size"); 3539 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); 3540 3541 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { 3542 // No allocation in the shared eden. 3543 ba_short(slow_case); 3544 } else { 3545 // get eden boundaries 3546 // note: we need both top & top_addr! 3547 const Register top_addr = t1; 3548 const Register end = t2; 3549 3550 CollectedHeap* ch = Universe::heap(); 3551 set((intx)ch->top_addr(), top_addr); 3552 intx delta = (intx)ch->end_addr() - (intx)ch->top_addr(); 3553 ld_ptr(top_addr, delta, end); 3554 ld_ptr(top_addr, 0, obj); 3555 3556 // try to allocate 3557 Label retry; 3558 bind(retry); 3559 #ifdef ASSERT 3560 // make sure eden top is properly aligned 3561 { 3562 Label L; 3563 btst(MinObjAlignmentInBytesMask, obj); 3564 br(Assembler::zero, false, Assembler::pt, L); 3565 delayed()->nop(); 3566 STOP("eden top is not properly aligned"); 3567 bind(L); 3568 } 3569 #endif // ASSERT 3570 const Register free = end; 3571 sub(end, obj, free); // compute amount of free space 3572 if (var_size_in_bytes->is_valid()) { 3573 // size is unknown at compile time 3574 cmp(free, var_size_in_bytes); 3575 br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case 3576 delayed()->add(obj, var_size_in_bytes, end); 3577 } else { 3578 // size is known at compile time 3579 cmp(free, con_size_in_bytes); 3580 br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case 3581 delayed()->add(obj, con_size_in_bytes, end); 3582 } 3583 // Compare obj with the value at top_addr; if still equal, swap the value of 3584 // end with the value at top_addr. If not equal, read the value at top_addr 3585 // into end. 3586 casx_under_lock(top_addr, obj, end, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); 3587 // if someone beat us on the allocation, try again, otherwise continue 3588 cmp(obj, end); 3589 brx(Assembler::notEqual, false, Assembler::pn, retry); 3590 delayed()->mov(end, obj); // nop if successfull since obj == end 3591 3592 #ifdef ASSERT 3593 // make sure eden top is properly aligned 3594 { 3595 Label L; 3596 const Register top_addr = t1; 3597 3598 set((intx)ch->top_addr(), top_addr); 3599 ld_ptr(top_addr, 0, top_addr); 3600 btst(MinObjAlignmentInBytesMask, top_addr); 3601 br(Assembler::zero, false, Assembler::pt, L); 3602 delayed()->nop(); 3603 STOP("eden top is not properly aligned"); 3604 bind(L); 3605 } 3606 #endif // ASSERT 3607 } 3608 } 3609 3610 3611 void MacroAssembler::tlab_allocate( 3612 Register obj, // result: pointer to object after successful allocation 3613 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 3614 int con_size_in_bytes, // object size in bytes if known at compile time 3615 Register t1, // temp register 3616 Label& slow_case // continuation point if fast allocation fails 3617 ){ 3618 // make sure arguments make sense 3619 assert_different_registers(obj, var_size_in_bytes, t1); 3620 assert(0 <= con_size_in_bytes && is_simm13(con_size_in_bytes), "illegal object size"); 3621 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); 3622 3623 const Register free = t1; 3624 3625 verify_tlab(); 3626 3627 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), obj); 3628 3629 // calculate amount of free space 3630 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), free); 3631 sub(free, obj, free); 3632 3633 Label done; 3634 if (var_size_in_bytes == noreg) { 3635 cmp(free, con_size_in_bytes); 3636 } else { 3637 cmp(free, var_size_in_bytes); 3638 } 3639 br(Assembler::less, false, Assembler::pn, slow_case); 3640 // calculate the new top pointer 3641 if (var_size_in_bytes == noreg) { 3642 delayed()->add(obj, con_size_in_bytes, free); 3643 } else { 3644 delayed()->add(obj, var_size_in_bytes, free); 3645 } 3646 3647 bind(done); 3648 3649 #ifdef ASSERT 3650 // make sure new free pointer is properly aligned 3651 { 3652 Label L; 3653 btst(MinObjAlignmentInBytesMask, free); 3654 br(Assembler::zero, false, Assembler::pt, L); 3655 delayed()->nop(); 3656 STOP("updated TLAB free is not properly aligned"); 3657 bind(L); 3658 } 3659 #endif // ASSERT 3660 3661 // update the tlab top pointer 3662 st_ptr(free, G2_thread, in_bytes(JavaThread::tlab_top_offset())); 3663 verify_tlab(); 3664 } 3665 3666 3667 void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case) { 3668 Register top = O0; 3669 Register t1 = G1; 3670 Register t2 = G3; 3671 Register t3 = O1; 3672 assert_different_registers(top, t1, t2, t3, G4, G5 /* preserve G4 and G5 */); 3673 Label do_refill, discard_tlab; 3674 3675 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { 3676 // No allocation in the shared eden. 3677 ba_short(slow_case); 3678 } 3679 3680 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top); 3681 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t1); 3682 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), t2); 3683 3684 // calculate amount of free space 3685 sub(t1, top, t1); 3686 srl_ptr(t1, LogHeapWordSize, t1); 3687 3688 // Retain tlab and allocate object in shared space if 3689 // the amount free in the tlab is too large to discard. 3690 cmp(t1, t2); 3691 brx(Assembler::lessEqual, false, Assembler::pt, discard_tlab); 3692 3693 // increment waste limit to prevent getting stuck on this slow path 3694 delayed()->add(t2, ThreadLocalAllocBuffer::refill_waste_limit_increment(), t2); 3695 st_ptr(t2, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset())); 3696 if (TLABStats) { 3697 // increment number of slow_allocations 3698 ld(G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()), t2); 3699 add(t2, 1, t2); 3700 stw(t2, G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset())); 3701 } 3702 ba_short(try_eden); 3703 3704 bind(discard_tlab); 3705 if (TLABStats) { 3706 // increment number of refills 3707 ld(G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()), t2); 3708 add(t2, 1, t2); 3709 stw(t2, G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset())); 3710 // accumulate wastage 3711 ld(G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()), t2); 3712 add(t2, t1, t2); 3713 stw(t2, G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset())); 3714 } 3715 3716 // if tlab is currently allocated (top or end != null) then 3717 // fill [top, end + alignment_reserve) with array object 3718 br_null_short(top, Assembler::pn, do_refill); 3719 3720 set((intptr_t)markOopDesc::prototype()->copy_set_hash(0x2), t2); 3721 st_ptr(t2, top, oopDesc::mark_offset_in_bytes()); // set up the mark word 3722 // set klass to intArrayKlass 3723 sub(t1, typeArrayOopDesc::header_size(T_INT), t1); 3724 add(t1, ThreadLocalAllocBuffer::alignment_reserve(), t1); 3725 sll_ptr(t1, log2_intptr(HeapWordSize/sizeof(jint)), t1); 3726 st(t1, top, arrayOopDesc::length_offset_in_bytes()); 3727 set((intptr_t)Universe::intArrayKlassObj_addr(), t2); 3728 ld_ptr(t2, 0, t2); 3729 // store klass last. concurrent gcs assumes klass length is valid if 3730 // klass field is not null. 3731 store_klass(t2, top); 3732 verify_oop(top); 3733 3734 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t1); 3735 sub(top, t1, t1); // size of tlab's allocated portion 3736 incr_allocated_bytes(t1, t2, t3); 3737 3738 // refill the tlab with an eden allocation 3739 bind(do_refill); 3740 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t1); 3741 sll_ptr(t1, LogHeapWordSize, t1); 3742 // allocate new tlab, address returned in top 3743 eden_allocate(top, t1, 0, t2, t3, slow_case); 3744 3745 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_start_offset())); 3746 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_top_offset())); 3747 #ifdef ASSERT 3748 // check that tlab_size (t1) is still valid 3749 { 3750 Label ok; 3751 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t2); 3752 sll_ptr(t2, LogHeapWordSize, t2); 3753 cmp_and_br_short(t1, t2, Assembler::equal, Assembler::pt, ok); 3754 STOP("assert(t1 == tlab_size)"); 3755 should_not_reach_here(); 3756 3757 bind(ok); 3758 } 3759 #endif // ASSERT 3760 add(top, t1, top); // t1 is tlab_size 3761 sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top); 3762 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset())); 3763 verify_tlab(); 3764 ba_short(retry); 3765 } 3766 3767 void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes, 3768 Register t1, Register t2) { 3769 // Bump total bytes allocated by this thread 3770 assert(t1->is_global(), "must be global reg"); // so all 64 bits are saved on a context switch 3771 assert_different_registers(size_in_bytes.register_or_noreg(), t1, t2); 3772 // v8 support has gone the way of the dodo 3773 ldx(G2_thread, in_bytes(JavaThread::allocated_bytes_offset()), t1); 3774 add(t1, ensure_simm13_or_reg(size_in_bytes, t2), t1); 3775 stx(t1, G2_thread, in_bytes(JavaThread::allocated_bytes_offset())); 3776 } 3777 3778 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { 3779 switch (cond) { 3780 // Note some conditions are synonyms for others 3781 case Assembler::never: return Assembler::always; 3782 case Assembler::zero: return Assembler::notZero; 3783 case Assembler::lessEqual: return Assembler::greater; 3784 case Assembler::less: return Assembler::greaterEqual; 3785 case Assembler::lessEqualUnsigned: return Assembler::greaterUnsigned; 3786 case Assembler::lessUnsigned: return Assembler::greaterEqualUnsigned; 3787 case Assembler::negative: return Assembler::positive; 3788 case Assembler::overflowSet: return Assembler::overflowClear; 3789 case Assembler::always: return Assembler::never; 3790 case Assembler::notZero: return Assembler::zero; 3791 case Assembler::greater: return Assembler::lessEqual; 3792 case Assembler::greaterEqual: return Assembler::less; 3793 case Assembler::greaterUnsigned: return Assembler::lessEqualUnsigned; 3794 case Assembler::greaterEqualUnsigned: return Assembler::lessUnsigned; 3795 case Assembler::positive: return Assembler::negative; 3796 case Assembler::overflowClear: return Assembler::overflowSet; 3797 } 3798 3799 ShouldNotReachHere(); return Assembler::overflowClear; 3800 } 3801 3802 void MacroAssembler::cond_inc(Assembler::Condition cond, address counter_ptr, 3803 Register Rtmp1, Register Rtmp2 /*, Register Rtmp3, Register Rtmp4 */) { 3804 Condition negated_cond = negate_condition(cond); 3805 Label L; 3806 brx(negated_cond, false, Assembler::pt, L); 3807 delayed()->nop(); 3808 inc_counter(counter_ptr, Rtmp1, Rtmp2); 3809 bind(L); 3810 } 3811 3812 void MacroAssembler::inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2) { 3813 AddressLiteral addrlit(counter_addr); 3814 sethi(addrlit, Rtmp1); // Move hi22 bits into temporary register. 3815 Address addr(Rtmp1, addrlit.low10()); // Build an address with low10 bits. 3816 ld(addr, Rtmp2); 3817 inc(Rtmp2); 3818 st(Rtmp2, addr); 3819 } 3820 3821 void MacroAssembler::inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2) { 3822 inc_counter((address) counter_addr, Rtmp1, Rtmp2); 3823 } 3824 3825 SkipIfEqual::SkipIfEqual( 3826 MacroAssembler* masm, Register temp, const bool* flag_addr, 3827 Assembler::Condition condition) { 3828 _masm = masm; 3829 AddressLiteral flag(flag_addr); 3830 _masm->sethi(flag, temp); 3831 _masm->ldub(temp, flag.low10(), temp); 3832 _masm->tst(temp); 3833 _masm->br(condition, false, Assembler::pt, _label); 3834 _masm->delayed()->nop(); 3835 } 3836 3837 SkipIfEqual::~SkipIfEqual() { 3838 _masm->bind(_label); 3839 } 3840 3841 3842 // Writes to stack successive pages until offset reached to check for 3843 // stack overflow + shadow pages. This clobbers tsp and scratch. 3844 void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp, 3845 Register Rscratch) { 3846 // Use stack pointer in temp stack pointer 3847 mov(SP, Rtsp); 3848 3849 // Bang stack for total size given plus stack shadow page size. 3850 // Bang one page at a time because a large size can overflow yellow and 3851 // red zones (the bang will fail but stack overflow handling can't tell that 3852 // it was a stack overflow bang vs a regular segv). 3853 int offset = os::vm_page_size(); 3854 Register Roffset = Rscratch; 3855 3856 Label loop; 3857 bind(loop); 3858 set((-offset)+STACK_BIAS, Rscratch); 3859 st(G0, Rtsp, Rscratch); 3860 set(offset, Roffset); 3861 sub(Rsize, Roffset, Rsize); 3862 cmp(Rsize, G0); 3863 br(Assembler::greater, false, Assembler::pn, loop); 3864 delayed()->sub(Rtsp, Roffset, Rtsp); 3865 3866 // Bang down shadow pages too. 3867 // The -1 because we already subtracted 1 page. 3868 for (int i = 0; i< StackShadowPages-1; i++) { 3869 set((-i*offset)+STACK_BIAS, Rscratch); 3870 st(G0, Rtsp, Rscratch); 3871 } 3872 } 3873 3874 /////////////////////////////////////////////////////////////////////////////////// 3875 #if INCLUDE_ALL_GCS 3876 3877 static address satb_log_enqueue_with_frame = NULL; 3878 static u_char* satb_log_enqueue_with_frame_end = NULL; 3879 3880 static address satb_log_enqueue_frameless = NULL; 3881 static u_char* satb_log_enqueue_frameless_end = NULL; 3882 3883 static int EnqueueCodeSize = 128 DEBUG_ONLY( + 256); // Instructions? 3884 3885 static void generate_satb_log_enqueue(bool with_frame) { 3886 BufferBlob* bb = BufferBlob::create("enqueue_with_frame", EnqueueCodeSize); 3887 CodeBuffer buf(bb); 3888 MacroAssembler masm(&buf); 3889 3890 #define __ masm. 3891 3892 address start = __ pc(); 3893 Register pre_val; 3894 3895 Label refill, restart; 3896 if (with_frame) { 3897 __ save_frame(0); 3898 pre_val = I0; // Was O0 before the save. 3899 } else { 3900 pre_val = O0; 3901 } 3902 3903 int satb_q_index_byte_offset = 3904 in_bytes(JavaThread::satb_mark_queue_offset() + 3905 PtrQueue::byte_offset_of_index()); 3906 3907 int satb_q_buf_byte_offset = 3908 in_bytes(JavaThread::satb_mark_queue_offset() + 3909 PtrQueue::byte_offset_of_buf()); 3910 3911 assert(in_bytes(PtrQueue::byte_width_of_index()) == sizeof(intptr_t) && 3912 in_bytes(PtrQueue::byte_width_of_buf()) == sizeof(intptr_t), 3913 "check sizes in assembly below"); 3914 3915 __ bind(restart); 3916 3917 // Load the index into the SATB buffer. PtrQueue::_index is a size_t 3918 // so ld_ptr is appropriate. 3919 __ ld_ptr(G2_thread, satb_q_index_byte_offset, L0); 3920 3921 // index == 0? 3922 __ cmp_and_brx_short(L0, G0, Assembler::equal, Assembler::pn, refill); 3923 3924 __ ld_ptr(G2_thread, satb_q_buf_byte_offset, L1); 3925 __ sub(L0, oopSize, L0); 3926 3927 __ st_ptr(pre_val, L1, L0); // [_buf + index] := I0 3928 if (!with_frame) { 3929 // Use return-from-leaf 3930 __ retl(); 3931 __ delayed()->st_ptr(L0, G2_thread, satb_q_index_byte_offset); 3932 } else { 3933 // Not delayed. 3934 __ st_ptr(L0, G2_thread, satb_q_index_byte_offset); 3935 } 3936 if (with_frame) { 3937 __ ret(); 3938 __ delayed()->restore(); 3939 } 3940 __ bind(refill); 3941 3942 address handle_zero = 3943 CAST_FROM_FN_PTR(address, 3944 &SATBMarkQueueSet::handle_zero_index_for_thread); 3945 // This should be rare enough that we can afford to save all the 3946 // scratch registers that the calling context might be using. 3947 __ mov(G1_scratch, L0); 3948 __ mov(G3_scratch, L1); 3949 __ mov(G4, L2); 3950 // We need the value of O0 above (for the write into the buffer), so we 3951 // save and restore it. 3952 __ mov(O0, L3); 3953 // Since the call will overwrite O7, we save and restore that, as well. 3954 __ mov(O7, L4); 3955 __ call_VM_leaf(L5, handle_zero, G2_thread); 3956 __ mov(L0, G1_scratch); 3957 __ mov(L1, G3_scratch); 3958 __ mov(L2, G4); 3959 __ mov(L3, O0); 3960 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); 3961 __ delayed()->mov(L4, O7); 3962 3963 if (with_frame) { 3964 satb_log_enqueue_with_frame = start; 3965 satb_log_enqueue_with_frame_end = __ pc(); 3966 } else { 3967 satb_log_enqueue_frameless = start; 3968 satb_log_enqueue_frameless_end = __ pc(); 3969 } 3970 3971 #undef __ 3972 } 3973 3974 static inline void generate_satb_log_enqueue_if_necessary(bool with_frame) { 3975 if (with_frame) { 3976 if (satb_log_enqueue_with_frame == 0) { 3977 generate_satb_log_enqueue(with_frame); 3978 assert(satb_log_enqueue_with_frame != 0, "postcondition."); 3979 if (G1SATBPrintStubs) { 3980 tty->print_cr("Generated with-frame satb enqueue:"); 3981 Disassembler::decode((u_char*)satb_log_enqueue_with_frame, 3982 satb_log_enqueue_with_frame_end, 3983 tty); 3984 } 3985 } 3986 } else { 3987 if (satb_log_enqueue_frameless == 0) { 3988 generate_satb_log_enqueue(with_frame); 3989 assert(satb_log_enqueue_frameless != 0, "postcondition."); 3990 if (G1SATBPrintStubs) { 3991 tty->print_cr("Generated frameless satb enqueue:"); 3992 Disassembler::decode((u_char*)satb_log_enqueue_frameless, 3993 satb_log_enqueue_frameless_end, 3994 tty); 3995 } 3996 } 3997 } 3998 } 3999 4000 void MacroAssembler::g1_write_barrier_pre(Register obj, 4001 Register index, 4002 int offset, 4003 Register pre_val, 4004 Register tmp, 4005 bool preserve_o_regs) { 4006 Label filtered; 4007 4008 if (obj == noreg) { 4009 // We are not loading the previous value so make 4010 // sure that we don't trash the value in pre_val 4011 // with the code below. 4012 assert_different_registers(pre_val, tmp); 4013 } else { 4014 // We will be loading the previous value 4015 // in this code so... 4016 assert(offset == 0 || index == noreg, "choose one"); 4017 assert(pre_val == noreg, "check this code"); 4018 } 4019 4020 // Is marking active? 4021 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) { 4022 ld(G2, 4023 in_bytes(JavaThread::satb_mark_queue_offset() + 4024 PtrQueue::byte_offset_of_active()), 4025 tmp); 4026 } else { 4027 guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1, 4028 "Assumption"); 4029 ldsb(G2, 4030 in_bytes(JavaThread::satb_mark_queue_offset() + 4031 PtrQueue::byte_offset_of_active()), 4032 tmp); 4033 } 4034 4035 // Is marking active? 4036 cmp_and_br_short(tmp, G0, Assembler::equal, Assembler::pt, filtered); 4037 4038 // Do we need to load the previous value? 4039 if (obj != noreg) { 4040 // Load the previous value... 4041 if (index == noreg) { 4042 if (Assembler::is_simm13(offset)) { 4043 load_heap_oop(obj, offset, tmp); 4044 } else { 4045 set(offset, tmp); 4046 load_heap_oop(obj, tmp, tmp); 4047 } 4048 } else { 4049 load_heap_oop(obj, index, tmp); 4050 } 4051 // Previous value has been loaded into tmp 4052 pre_val = tmp; 4053 } 4054 4055 assert(pre_val != noreg, "must have a real register"); 4056 4057 // Is the previous value null? 4058 cmp_and_brx_short(pre_val, G0, Assembler::equal, Assembler::pt, filtered); 4059 4060 // OK, it's not filtered, so we'll need to call enqueue. In the normal 4061 // case, pre_val will be a scratch G-reg, but there are some cases in 4062 // which it's an O-reg. In the first case, do a normal call. In the 4063 // latter, do a save here and call the frameless version. 4064 4065 guarantee(pre_val->is_global() || pre_val->is_out(), 4066 "Or we need to think harder."); 4067 4068 if (pre_val->is_global() && !preserve_o_regs) { 4069 generate_satb_log_enqueue_if_necessary(true); // with frame 4070 4071 call(satb_log_enqueue_with_frame); 4072 delayed()->mov(pre_val, O0); 4073 } else { 4074 generate_satb_log_enqueue_if_necessary(false); // frameless 4075 4076 save_frame(0); 4077 call(satb_log_enqueue_frameless); 4078 delayed()->mov(pre_val->after_save(), O0); 4079 restore(); 4080 } 4081 4082 bind(filtered); 4083 } 4084 4085 static address dirty_card_log_enqueue = 0; 4086 static u_char* dirty_card_log_enqueue_end = 0; 4087 4088 // This gets to assume that o0 contains the object address. 4089 static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) { 4090 BufferBlob* bb = BufferBlob::create("dirty_card_enqueue", EnqueueCodeSize*2); 4091 CodeBuffer buf(bb); 4092 MacroAssembler masm(&buf); 4093 #define __ masm. 4094 address start = __ pc(); 4095 4096 Label not_already_dirty, restart, refill; 4097 4098 #ifdef _LP64 4099 __ srlx(O0, CardTableModRefBS::card_shift, O0); 4100 #else 4101 __ srl(O0, CardTableModRefBS::card_shift, O0); 4102 #endif 4103 AddressLiteral addrlit(byte_map_base); 4104 __ set(addrlit, O1); // O1 := <card table base> 4105 __ ldub(O0, O1, O2); // O2 := [O0 + O1] 4106 4107 assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code"); 4108 __ cmp_and_br_short(O2, G0, Assembler::notEqual, Assembler::pt, not_already_dirty); 4109 4110 // We didn't take the branch, so we're already dirty: return. 4111 // Use return-from-leaf 4112 __ retl(); 4113 __ delayed()->nop(); 4114 4115 // Not dirty. 4116 __ bind(not_already_dirty); 4117 4118 // Get O0 + O1 into a reg by itself 4119 __ add(O0, O1, O3); 4120 4121 // First, dirty it. 4122 __ stb(G0, O3, G0); // [cardPtr] := 0 (i.e., dirty). 4123 4124 int dirty_card_q_index_byte_offset = 4125 in_bytes(JavaThread::dirty_card_queue_offset() + 4126 PtrQueue::byte_offset_of_index()); 4127 int dirty_card_q_buf_byte_offset = 4128 in_bytes(JavaThread::dirty_card_queue_offset() + 4129 PtrQueue::byte_offset_of_buf()); 4130 __ bind(restart); 4131 4132 // Load the index into the update buffer. PtrQueue::_index is 4133 // a size_t so ld_ptr is appropriate here. 4134 __ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, L0); 4135 4136 // index == 0? 4137 __ cmp_and_brx_short(L0, G0, Assembler::equal, Assembler::pn, refill); 4138 4139 __ ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, L1); 4140 __ sub(L0, oopSize, L0); 4141 4142 __ st_ptr(O3, L1, L0); // [_buf + index] := I0 4143 // Use return-from-leaf 4144 __ retl(); 4145 __ delayed()->st_ptr(L0, G2_thread, dirty_card_q_index_byte_offset); 4146 4147 __ bind(refill); 4148 address handle_zero = 4149 CAST_FROM_FN_PTR(address, 4150 &DirtyCardQueueSet::handle_zero_index_for_thread); 4151 // This should be rare enough that we can afford to save all the 4152 // scratch registers that the calling context might be using. 4153 __ mov(G1_scratch, L3); 4154 __ mov(G3_scratch, L5); 4155 // We need the value of O3 above (for the write into the buffer), so we 4156 // save and restore it. 4157 __ mov(O3, L6); 4158 // Since the call will overwrite O7, we save and restore that, as well. 4159 __ mov(O7, L4); 4160 4161 __ call_VM_leaf(L7_thread_cache, handle_zero, G2_thread); 4162 __ mov(L3, G1_scratch); 4163 __ mov(L5, G3_scratch); 4164 __ mov(L6, O3); 4165 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); 4166 __ delayed()->mov(L4, O7); 4167 4168 dirty_card_log_enqueue = start; 4169 dirty_card_log_enqueue_end = __ pc(); 4170 // XXX Should have a guarantee here about not going off the end! 4171 // Does it already do so? Do an experiment... 4172 4173 #undef __ 4174 4175 } 4176 4177 static inline void 4178 generate_dirty_card_log_enqueue_if_necessary(jbyte* byte_map_base) { 4179 if (dirty_card_log_enqueue == 0) { 4180 generate_dirty_card_log_enqueue(byte_map_base); 4181 assert(dirty_card_log_enqueue != 0, "postcondition."); 4182 if (G1SATBPrintStubs) { 4183 tty->print_cr("Generated dirty_card enqueue:"); 4184 Disassembler::decode((u_char*)dirty_card_log_enqueue, 4185 dirty_card_log_enqueue_end, 4186 tty); 4187 } 4188 } 4189 } 4190 4191 4192 void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val, Register tmp) { 4193 4194 Label filtered; 4195 MacroAssembler* post_filter_masm = this; 4196 4197 if (new_val == G0) return; 4198 4199 G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set(); 4200 assert(bs->kind() == BarrierSet::G1SATBCT || 4201 bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier"); 4202 4203 if (G1RSBarrierRegionFilter) { 4204 xor3(store_addr, new_val, tmp); 4205 #ifdef _LP64 4206 srlx(tmp, HeapRegion::LogOfHRGrainBytes, tmp); 4207 #else 4208 srl(tmp, HeapRegion::LogOfHRGrainBytes, tmp); 4209 #endif 4210 4211 // XXX Should I predict this taken or not? Does it matter? 4212 cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pt, filtered); 4213 } 4214 4215 // If the "store_addr" register is an "in" or "local" register, move it to 4216 // a scratch reg so we can pass it as an argument. 4217 bool use_scr = !(store_addr->is_global() || store_addr->is_out()); 4218 // Pick a scratch register different from "tmp". 4219 Register scr = (tmp == G1_scratch ? G3_scratch : G1_scratch); 4220 // Make sure we use up the delay slot! 4221 if (use_scr) { 4222 post_filter_masm->mov(store_addr, scr); 4223 } else { 4224 post_filter_masm->nop(); 4225 } 4226 generate_dirty_card_log_enqueue_if_necessary(bs->byte_map_base); 4227 save_frame(0); 4228 call(dirty_card_log_enqueue); 4229 if (use_scr) { 4230 delayed()->mov(scr, O0); 4231 } else { 4232 delayed()->mov(store_addr->after_save(), O0); 4233 } 4234 restore(); 4235 4236 bind(filtered); 4237 } 4238 4239 #endif // INCLUDE_ALL_GCS 4240 /////////////////////////////////////////////////////////////////////////////////// 4241 4242 void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_val, Register tmp) { 4243 // If we're writing constant NULL, we can skip the write barrier. 4244 if (new_val == G0) return; 4245 CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set(); 4246 assert(bs->kind() == BarrierSet::CardTableModRef || 4247 bs->kind() == BarrierSet::CardTableExtension, "wrong barrier"); 4248 card_table_write(bs->byte_map_base, tmp, store_addr); 4249 } 4250 4251 void MacroAssembler::load_klass(Register src_oop, Register klass) { 4252 // The number of bytes in this code is used by 4253 // MachCallDynamicJavaNode::ret_addr_offset() 4254 // if this changes, change that. 4255 if (UseCompressedKlassPointers) { 4256 lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass); 4257 decode_klass_not_null(klass); 4258 } else { 4259 ld_ptr(src_oop, oopDesc::klass_offset_in_bytes(), klass); 4260 } 4261 } 4262 4263 void MacroAssembler::store_klass(Register klass, Register dst_oop) { 4264 if (UseCompressedKlassPointers) { 4265 assert(dst_oop != klass, "not enough registers"); 4266 encode_klass_not_null(klass); 4267 st(klass, dst_oop, oopDesc::klass_offset_in_bytes()); 4268 } else { 4269 st_ptr(klass, dst_oop, oopDesc::klass_offset_in_bytes()); 4270 } 4271 } 4272 4273 void MacroAssembler::store_klass_gap(Register s, Register d) { 4274 if (UseCompressedKlassPointers) { 4275 assert(s != d, "not enough registers"); 4276 st(s, d, oopDesc::klass_gap_offset_in_bytes()); 4277 } 4278 } 4279 4280 void MacroAssembler::load_heap_oop(const Address& s, Register d) { 4281 if (UseCompressedOops) { 4282 lduw(s, d); 4283 decode_heap_oop(d); 4284 } else { 4285 ld_ptr(s, d); 4286 } 4287 } 4288 4289 void MacroAssembler::load_heap_oop(Register s1, Register s2, Register d) { 4290 if (UseCompressedOops) { 4291 lduw(s1, s2, d); 4292 decode_heap_oop(d, d); 4293 } else { 4294 ld_ptr(s1, s2, d); 4295 } 4296 } 4297 4298 void MacroAssembler::load_heap_oop(Register s1, int simm13a, Register d) { 4299 if (UseCompressedOops) { 4300 lduw(s1, simm13a, d); 4301 decode_heap_oop(d, d); 4302 } else { 4303 ld_ptr(s1, simm13a, d); 4304 } 4305 } 4306 4307 void MacroAssembler::load_heap_oop(Register s1, RegisterOrConstant s2, Register d) { 4308 if (s2.is_constant()) load_heap_oop(s1, s2.as_constant(), d); 4309 else load_heap_oop(s1, s2.as_register(), d); 4310 } 4311 4312 void MacroAssembler::store_heap_oop(Register d, Register s1, Register s2) { 4313 if (UseCompressedOops) { 4314 assert(s1 != d && s2 != d, "not enough registers"); 4315 encode_heap_oop(d); 4316 st(d, s1, s2); 4317 } else { 4318 st_ptr(d, s1, s2); 4319 } 4320 } 4321 4322 void MacroAssembler::store_heap_oop(Register d, Register s1, int simm13a) { 4323 if (UseCompressedOops) { 4324 assert(s1 != d, "not enough registers"); 4325 encode_heap_oop(d); 4326 st(d, s1, simm13a); 4327 } else { 4328 st_ptr(d, s1, simm13a); 4329 } 4330 } 4331 4332 void MacroAssembler::store_heap_oop(Register d, const Address& a, int offset) { 4333 if (UseCompressedOops) { 4334 assert(a.base() != d, "not enough registers"); 4335 encode_heap_oop(d); 4336 st(d, a, offset); 4337 } else { 4338 st_ptr(d, a, offset); 4339 } 4340 } 4341 4342 4343 void MacroAssembler::encode_heap_oop(Register src, Register dst) { 4344 assert (UseCompressedOops, "must be compressed"); 4345 assert (Universe::heap() != NULL, "java heap should be initialized"); 4346 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4347 verify_oop(src); 4348 if (Universe::narrow_oop_base() == NULL) { 4349 srlx(src, LogMinObjAlignmentInBytes, dst); 4350 return; 4351 } 4352 Label done; 4353 if (src == dst) { 4354 // optimize for frequent case src == dst 4355 bpr(rc_nz, true, Assembler::pt, src, done); 4356 delayed() -> sub(src, G6_heapbase, dst); // annuled if not taken 4357 bind(done); 4358 srlx(src, LogMinObjAlignmentInBytes, dst); 4359 } else { 4360 bpr(rc_z, false, Assembler::pn, src, done); 4361 delayed() -> mov(G0, dst); 4362 // could be moved before branch, and annulate delay, 4363 // but may add some unneeded work decoding null 4364 sub(src, G6_heapbase, dst); 4365 srlx(dst, LogMinObjAlignmentInBytes, dst); 4366 bind(done); 4367 } 4368 } 4369 4370 4371 void MacroAssembler::encode_heap_oop_not_null(Register r) { 4372 assert (UseCompressedOops, "must be compressed"); 4373 assert (Universe::heap() != NULL, "java heap should be initialized"); 4374 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4375 verify_oop(r); 4376 if (Universe::narrow_oop_base() != NULL) 4377 sub(r, G6_heapbase, r); 4378 srlx(r, LogMinObjAlignmentInBytes, r); 4379 } 4380 4381 void MacroAssembler::encode_heap_oop_not_null(Register src, Register dst) { 4382 assert (UseCompressedOops, "must be compressed"); 4383 assert (Universe::heap() != NULL, "java heap should be initialized"); 4384 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4385 verify_oop(src); 4386 if (Universe::narrow_oop_base() == NULL) { 4387 srlx(src, LogMinObjAlignmentInBytes, dst); 4388 } else { 4389 sub(src, G6_heapbase, dst); 4390 srlx(dst, LogMinObjAlignmentInBytes, dst); 4391 } 4392 } 4393 4394 // Same algorithm as oops.inline.hpp decode_heap_oop. 4395 void MacroAssembler::decode_heap_oop(Register src, Register dst) { 4396 assert (UseCompressedOops, "must be compressed"); 4397 assert (Universe::heap() != NULL, "java heap should be initialized"); 4398 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4399 sllx(src, LogMinObjAlignmentInBytes, dst); 4400 if (Universe::narrow_oop_base() != NULL) { 4401 Label done; 4402 bpr(rc_nz, true, Assembler::pt, dst, done); 4403 delayed() -> add(dst, G6_heapbase, dst); // annuled if not taken 4404 bind(done); 4405 } 4406 verify_oop(dst); 4407 } 4408 4409 void MacroAssembler::decode_heap_oop_not_null(Register r) { 4410 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 4411 // pd_code_size_limit. 4412 // Also do not verify_oop as this is called by verify_oop. 4413 assert (UseCompressedOops, "must be compressed"); 4414 assert (Universe::heap() != NULL, "java heap should be initialized"); 4415 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4416 sllx(r, LogMinObjAlignmentInBytes, r); 4417 if (Universe::narrow_oop_base() != NULL) 4418 add(r, G6_heapbase, r); 4419 } 4420 4421 void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) { 4422 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 4423 // pd_code_size_limit. 4424 // Also do not verify_oop as this is called by verify_oop. 4425 assert (UseCompressedOops, "must be compressed"); 4426 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4427 sllx(src, LogMinObjAlignmentInBytes, dst); 4428 if (Universe::narrow_oop_base() != NULL) 4429 add(dst, G6_heapbase, dst); 4430 } 4431 4432 void MacroAssembler::encode_klass_not_null(Register r) { 4433 assert(Metaspace::is_initialized(), "metaspace should be initialized"); 4434 assert (UseCompressedKlassPointers, "must be compressed"); 4435 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 4436 if (Universe::narrow_klass_base() != NULL) 4437 sub(r, G6_heapbase, r); 4438 srlx(r, LogKlassAlignmentInBytes, r); 4439 } 4440 4441 void MacroAssembler::encode_klass_not_null(Register src, Register dst) { 4442 assert(Metaspace::is_initialized(), "metaspace should be initialized"); 4443 assert (UseCompressedKlassPointers, "must be compressed"); 4444 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 4445 if (Universe::narrow_klass_base() == NULL) { 4446 srlx(src, LogKlassAlignmentInBytes, dst); 4447 } else { 4448 sub(src, G6_heapbase, dst); 4449 srlx(dst, LogKlassAlignmentInBytes, dst); 4450 } 4451 } 4452 4453 void MacroAssembler::decode_klass_not_null(Register r) { 4454 assert(Metaspace::is_initialized(), "metaspace should be initialized"); 4455 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 4456 // pd_code_size_limit. 4457 assert (UseCompressedKlassPointers, "must be compressed"); 4458 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 4459 sllx(r, LogKlassAlignmentInBytes, r); 4460 if (Universe::narrow_klass_base() != NULL) 4461 add(r, G6_heapbase, r); 4462 } 4463 4464 void MacroAssembler::decode_klass_not_null(Register src, Register dst) { 4465 assert(Metaspace::is_initialized(), "metaspace should be initialized"); 4466 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 4467 // pd_code_size_limit. 4468 assert (UseCompressedKlassPointers, "must be compressed"); 4469 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 4470 sllx(src, LogKlassAlignmentInBytes, dst); 4471 if (Universe::narrow_klass_base() != NULL) 4472 add(dst, G6_heapbase, dst); 4473 } 4474 4475 void MacroAssembler::reinit_heapbase() { 4476 if (UseCompressedOops || UseCompressedKlassPointers) { 4477 AddressLiteral base(Universe::narrow_ptrs_base_addr()); 4478 load_ptr_contents(base, G6_heapbase); 4479 } 4480 } 4481 4482 // Compare char[] arrays aligned to 4 bytes. 4483 void MacroAssembler::char_arrays_equals(Register ary1, Register ary2, 4484 Register limit, Register result, 4485 Register chr1, Register chr2, Label& Ldone) { 4486 Label Lvector, Lloop; 4487 assert(chr1 == result, "should be the same"); 4488 4489 // Note: limit contains number of bytes (2*char_elements) != 0. 4490 andcc(limit, 0x2, chr1); // trailing character ? 4491 br(Assembler::zero, false, Assembler::pt, Lvector); 4492 delayed()->nop(); 4493 4494 // compare the trailing char 4495 sub(limit, sizeof(jchar), limit); 4496 lduh(ary1, limit, chr1); 4497 lduh(ary2, limit, chr2); 4498 cmp(chr1, chr2); 4499 br(Assembler::notEqual, true, Assembler::pt, Ldone); 4500 delayed()->mov(G0, result); // not equal 4501 4502 // only one char ? 4503 cmp_zero_and_br(zero, limit, Ldone, true, Assembler::pn); 4504 delayed()->add(G0, 1, result); // zero-length arrays are equal 4505 4506 // word by word compare, dont't need alignment check 4507 bind(Lvector); 4508 // Shift ary1 and ary2 to the end of the arrays, negate limit 4509 add(ary1, limit, ary1); 4510 add(ary2, limit, ary2); 4511 neg(limit, limit); 4512 4513 lduw(ary1, limit, chr1); 4514 bind(Lloop); 4515 lduw(ary2, limit, chr2); 4516 cmp(chr1, chr2); 4517 br(Assembler::notEqual, true, Assembler::pt, Ldone); 4518 delayed()->mov(G0, result); // not equal 4519 inccc(limit, 2*sizeof(jchar)); 4520 // annul LDUW if branch is not taken to prevent access past end of array 4521 br(Assembler::notZero, true, Assembler::pt, Lloop); 4522 delayed()->lduw(ary1, limit, chr1); // hoisted 4523 4524 // Caller should set it: 4525 // add(G0, 1, result); // equals 4526 } 4527 4528 // Use BIS for zeroing (count is in bytes). 4529 void MacroAssembler::bis_zeroing(Register to, Register count, Register temp, Label& Ldone) { 4530 assert(UseBlockZeroing && VM_Version::has_block_zeroing(), "only works with BIS zeroing"); 4531 Register end = count; 4532 int cache_line_size = VM_Version::prefetch_data_size(); 4533 // Minimum count when BIS zeroing can be used since 4534 // it needs membar which is expensive. 4535 int block_zero_size = MAX2(cache_line_size*3, (int)BlockZeroingLowLimit); 4536 4537 Label small_loop; 4538 // Check if count is negative (dead code) or zero. 4539 // Note, count uses 64bit in 64 bit VM. 4540 cmp_and_brx_short(count, 0, Assembler::lessEqual, Assembler::pn, Ldone); 4541 4542 // Use BIS zeroing only for big arrays since it requires membar. 4543 if (Assembler::is_simm13(block_zero_size)) { // < 4096 4544 cmp(count, block_zero_size); 4545 } else { 4546 set(block_zero_size, temp); 4547 cmp(count, temp); 4548 } 4549 br(Assembler::lessUnsigned, false, Assembler::pt, small_loop); 4550 delayed()->add(to, count, end); 4551 4552 // Note: size is >= three (32 bytes) cache lines. 4553 4554 // Clean the beginning of space up to next cache line. 4555 for (int offs = 0; offs < cache_line_size; offs += 8) { 4556 stx(G0, to, offs); 4557 } 4558 4559 // align to next cache line 4560 add(to, cache_line_size, to); 4561 and3(to, -cache_line_size, to); 4562 4563 // Note: size left >= two (32 bytes) cache lines. 4564 4565 // BIS should not be used to zero tail (64 bytes) 4566 // to avoid zeroing a header of the following object. 4567 sub(end, (cache_line_size*2)-8, end); 4568 4569 Label bis_loop; 4570 bind(bis_loop); 4571 stxa(G0, to, G0, Assembler::ASI_ST_BLKINIT_PRIMARY); 4572 add(to, cache_line_size, to); 4573 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, bis_loop); 4574 4575 // BIS needs membar. 4576 membar(Assembler::StoreLoad); 4577 4578 add(end, (cache_line_size*2)-8, end); // restore end 4579 cmp_and_brx_short(to, end, Assembler::greaterEqualUnsigned, Assembler::pn, Ldone); 4580 4581 // Clean the tail. 4582 bind(small_loop); 4583 stx(G0, to, 0); 4584 add(to, 8, to); 4585 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, small_loop); 4586 nop(); // Separate short branches 4587 }