1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "jvm.h" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "compiler/disassembler.hpp" 29 #include "gc/shared/cardTable.hpp" 30 #include "gc/shared/cardTableBarrierSet.hpp" 31 #include "gc/shared/collectedHeap.inline.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "memory/universe.hpp" 35 #include "oops/klass.inline.hpp" 36 #include "prims/methodHandles.hpp" 37 #include "runtime/biasedLocking.hpp" 38 #include "runtime/interfaceSupport.inline.hpp" 39 #include "runtime/jniHandles.inline.hpp" 40 #include "runtime/objectMonitor.hpp" 41 #include "runtime/os.inline.hpp" 42 #include "runtime/safepoint.hpp" 43 #include "runtime/safepointMechanism.hpp" 44 #include "runtime/sharedRuntime.hpp" 45 #include "runtime/stubRoutines.hpp" 46 #include "utilities/align.hpp" 47 #include "utilities/macros.hpp" 48 #if INCLUDE_ALL_GCS 49 #include "gc/g1/g1BarrierSet.hpp" 50 #include "gc/g1/g1CardTable.hpp" 51 #include "gc/g1/heapRegion.hpp" 52 #endif // INCLUDE_ALL_GCS 53 #ifdef COMPILER2 54 #include "opto/intrinsicnode.hpp" 55 #endif 56 57 #ifdef PRODUCT 58 #define BLOCK_COMMENT(str) /* nothing */ 59 #define STOP(error) stop(error) 60 #else 61 #define BLOCK_COMMENT(str) block_comment(str) 62 #define STOP(error) block_comment(error); stop(error) 63 #endif 64 65 // Convert the raw encoding form into the form expected by the 66 // constructor for Address. 67 Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) { 68 assert(scale == 0, "not supported"); 69 RelocationHolder rspec; 70 if (disp_reloc != relocInfo::none) { 71 rspec = Relocation::spec_simple(disp_reloc); 72 } 73 74 Register rindex = as_Register(index); 75 if (rindex != G0) { 76 Address madr(as_Register(base), rindex); 77 madr._rspec = rspec; 78 return madr; 79 } else { 80 Address madr(as_Register(base), disp); 81 madr._rspec = rspec; 82 return madr; 83 } 84 } 85 86 Address Argument::address_in_frame() const { 87 // Warning: In LP64 mode disp will occupy more than 10 bits, but 88 // op codes such as ld or ldx, only access disp() to get 89 // their simm13 argument. 90 int disp = ((_number - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS; 91 if (is_in()) 92 return Address(FP, disp); // In argument. 93 else 94 return Address(SP, disp); // Out argument. 95 } 96 97 static const char* argumentNames[][2] = { 98 {"A0","P0"}, {"A1","P1"}, {"A2","P2"}, {"A3","P3"}, {"A4","P4"}, 99 {"A5","P5"}, {"A6","P6"}, {"A7","P7"}, {"A8","P8"}, {"A9","P9"}, 100 {"A(n>9)","P(n>9)"} 101 }; 102 103 const char* Argument::name() const { 104 int nofArgs = sizeof argumentNames / sizeof argumentNames[0]; 105 int num = number(); 106 if (num >= nofArgs) num = nofArgs - 1; 107 return argumentNames[num][is_in() ? 1 : 0]; 108 } 109 110 #ifdef ASSERT 111 // On RISC, there's no benefit to verifying instruction boundaries. 112 bool AbstractAssembler::pd_check_instruction_mark() { return false; } 113 #endif 114 115 // Patch instruction inst at offset inst_pos to refer to dest_pos 116 // and return the resulting instruction. 117 // We should have pcs, not offsets, but since all is relative, it will work out 118 // OK. 119 int MacroAssembler::patched_branch(int dest_pos, int inst, int inst_pos) { 120 int m; // mask for displacement field 121 int v; // new value for displacement field 122 const int word_aligned_ones = -4; 123 switch (inv_op(inst)) { 124 default: ShouldNotReachHere(); 125 case call_op: m = wdisp(word_aligned_ones, 0, 30); v = wdisp(dest_pos, inst_pos, 30); break; 126 case branch_op: 127 switch (inv_op2(inst)) { 128 case fbp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; 129 case bp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; 130 case fb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; 131 case br_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; 132 case bpr_op2: { 133 if (is_cbcond(inst)) { 134 m = wdisp10(word_aligned_ones, 0); 135 v = wdisp10(dest_pos, inst_pos); 136 } else { 137 m = wdisp16(word_aligned_ones, 0); 138 v = wdisp16(dest_pos, inst_pos); 139 } 140 break; 141 } 142 default: ShouldNotReachHere(); 143 } 144 } 145 return inst & ~m | v; 146 } 147 148 // Return the offset of the branch destionation of instruction inst 149 // at offset pos. 150 // Should have pcs, but since all is relative, it works out. 151 int MacroAssembler::branch_destination(int inst, int pos) { 152 int r; 153 switch (inv_op(inst)) { 154 default: ShouldNotReachHere(); 155 case call_op: r = inv_wdisp(inst, pos, 30); break; 156 case branch_op: 157 switch (inv_op2(inst)) { 158 case fbp_op2: r = inv_wdisp( inst, pos, 19); break; 159 case bp_op2: r = inv_wdisp( inst, pos, 19); break; 160 case fb_op2: r = inv_wdisp( inst, pos, 22); break; 161 case br_op2: r = inv_wdisp( inst, pos, 22); break; 162 case bpr_op2: { 163 if (is_cbcond(inst)) { 164 r = inv_wdisp10(inst, pos); 165 } else { 166 r = inv_wdisp16(inst, pos); 167 } 168 break; 169 } 170 default: ShouldNotReachHere(); 171 } 172 } 173 return r; 174 } 175 176 void MacroAssembler::null_check(Register reg, int offset) { 177 if (needs_explicit_null_check((intptr_t)offset)) { 178 // provoke OS NULL exception if reg = NULL by 179 // accessing M[reg] w/o changing any registers 180 ld_ptr(reg, 0, G0); 181 } 182 else { 183 // nothing to do, (later) access of M[reg + offset] 184 // will provoke OS NULL exception if reg = NULL 185 } 186 } 187 188 // Ring buffer jumps 189 190 191 void MacroAssembler::jmp2(Register r1, Register r2, const char* file, int line ) { 192 assert_not_delayed(); 193 jmpl(r1, r2, G0); 194 } 195 void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) { 196 assert_not_delayed(); 197 jmp(r1, offset); 198 } 199 200 // This code sequence is relocatable to any address, even on LP64. 201 void MacroAssembler::jumpl(const AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line) { 202 assert_not_delayed(); 203 // Force fixed length sethi because NativeJump and NativeFarCall don't handle 204 // variable length instruction streams. 205 patchable_sethi(addrlit, temp); 206 Address a(temp, addrlit.low10() + offset); // Add the offset to the displacement. 207 jmpl(a.base(), a.disp(), d); 208 } 209 210 void MacroAssembler::jump(const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line) { 211 jumpl(addrlit, temp, G0, offset, file, line); 212 } 213 214 215 // Conditional breakpoint (for assertion checks in assembly code) 216 void MacroAssembler::breakpoint_trap(Condition c, CC cc) { 217 trap(c, cc, G0, ST_RESERVED_FOR_USER_0); 218 } 219 220 // We want to use ST_BREAKPOINT here, but the debugger is confused by it. 221 void MacroAssembler::breakpoint_trap() { 222 trap(ST_RESERVED_FOR_USER_0); 223 } 224 225 // Write serialization page so VM thread can do a pseudo remote membar 226 // We use the current thread pointer to calculate a thread specific 227 // offset to write to within the page. This minimizes bus traffic 228 // due to cache line collision. 229 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) { 230 srl(thread, os::get_serialize_page_shift_count(), tmp2); 231 if (Assembler::is_simm13(os::vm_page_size())) { 232 and3(tmp2, (os::vm_page_size() - sizeof(int)), tmp2); 233 } 234 else { 235 set((os::vm_page_size() - sizeof(int)), tmp1); 236 and3(tmp2, tmp1, tmp2); 237 } 238 set(os::get_memory_serialize_page(), tmp1); 239 st(G0, tmp1, tmp2); 240 } 241 242 243 void MacroAssembler::safepoint_poll(Label& slow_path, bool a, Register thread_reg, Register temp_reg) { 244 if (SafepointMechanism::uses_thread_local_poll()) { 245 ldx(Address(thread_reg, Thread::polling_page_offset()), temp_reg, 0); 246 // Armed page has poll bit set. 247 and3(temp_reg, SafepointMechanism::poll_bit(), temp_reg); 248 br_notnull(temp_reg, a, Assembler::pn, slow_path); 249 } else { 250 AddressLiteral sync_state(SafepointSynchronize::address_of_state()); 251 252 load_contents(sync_state, temp_reg); 253 cmp(temp_reg, SafepointSynchronize::_not_synchronized); 254 br(Assembler::notEqual, a, Assembler::pn, slow_path); 255 } 256 } 257 258 void MacroAssembler::enter() { 259 Unimplemented(); 260 } 261 262 void MacroAssembler::leave() { 263 Unimplemented(); 264 } 265 266 // Calls to C land 267 268 #ifdef ASSERT 269 // a hook for debugging 270 static Thread* reinitialize_thread() { 271 return Thread::current(); 272 } 273 #else 274 #define reinitialize_thread Thread::current 275 #endif 276 277 #ifdef ASSERT 278 address last_get_thread = NULL; 279 #endif 280 281 // call this when G2_thread is not known to be valid 282 void MacroAssembler::get_thread() { 283 save_frame(0); // to avoid clobbering O0 284 mov(G1, L0); // avoid clobbering G1 285 mov(G5_method, L1); // avoid clobbering G5 286 mov(G3, L2); // avoid clobbering G3 also 287 mov(G4, L5); // avoid clobbering G4 288 #ifdef ASSERT 289 AddressLiteral last_get_thread_addrlit(&last_get_thread); 290 set(last_get_thread_addrlit, L3); 291 rdpc(L4); 292 inc(L4, 3 * BytesPerInstWord); // skip rdpc + inc + st_ptr to point L4 at call st_ptr(L4, L3, 0); 293 #endif 294 call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type); 295 delayed()->nop(); 296 mov(L0, G1); 297 mov(L1, G5_method); 298 mov(L2, G3); 299 mov(L5, G4); 300 restore(O0, 0, G2_thread); 301 } 302 303 static Thread* verify_thread_subroutine(Thread* gthread_value) { 304 Thread* correct_value = Thread::current(); 305 guarantee(gthread_value == correct_value, "G2_thread value must be the thread"); 306 return correct_value; 307 } 308 309 void MacroAssembler::verify_thread() { 310 if (VerifyThread) { 311 // NOTE: this chops off the heads of the 64-bit O registers. 312 // make sure G2_thread contains the right value 313 save_frame_and_mov(0, Lmethod, Lmethod); // to avoid clobbering O0 (and propagate Lmethod) 314 mov(G1, L1); // avoid clobbering G1 315 // G2 saved below 316 mov(G3, L3); // avoid clobbering G3 317 mov(G4, L4); // avoid clobbering G4 318 mov(G5_method, L5); // avoid clobbering G5_method 319 call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type); 320 delayed()->mov(G2_thread, O0); 321 322 mov(L1, G1); // Restore G1 323 // G2 restored below 324 mov(L3, G3); // restore G3 325 mov(L4, G4); // restore G4 326 mov(L5, G5_method); // restore G5_method 327 restore(O0, 0, G2_thread); 328 } 329 } 330 331 332 void MacroAssembler::save_thread(const Register thread_cache) { 333 verify_thread(); 334 if (thread_cache->is_valid()) { 335 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); 336 mov(G2_thread, thread_cache); 337 } 338 if (VerifyThread) { 339 // smash G2_thread, as if the VM were about to anyway 340 set(0x67676767, G2_thread); 341 } 342 } 343 344 345 void MacroAssembler::restore_thread(const Register thread_cache) { 346 if (thread_cache->is_valid()) { 347 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); 348 mov(thread_cache, G2_thread); 349 verify_thread(); 350 } else { 351 // do it the slow way 352 get_thread(); 353 } 354 } 355 356 357 // %%% maybe get rid of [re]set_last_Java_frame 358 void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Java_pc) { 359 assert_not_delayed(); 360 Address flags(G2_thread, JavaThread::frame_anchor_offset() + 361 JavaFrameAnchor::flags_offset()); 362 Address pc_addr(G2_thread, JavaThread::last_Java_pc_offset()); 363 364 // Always set last_Java_pc and flags first because once last_Java_sp is visible 365 // has_last_Java_frame is true and users will look at the rest of the fields. 366 // (Note: flags should always be zero before we get here so doesn't need to be set.) 367 368 #ifdef ASSERT 369 // Verify that flags was zeroed on return to Java 370 Label PcOk; 371 save_frame(0); // to avoid clobbering O0 372 ld_ptr(pc_addr, L0); 373 br_null_short(L0, Assembler::pt, PcOk); 374 STOP("last_Java_pc not zeroed before leaving Java"); 375 bind(PcOk); 376 377 // Verify that flags was zeroed on return to Java 378 Label FlagsOk; 379 ld(flags, L0); 380 tst(L0); 381 br(Assembler::zero, false, Assembler::pt, FlagsOk); 382 delayed() -> restore(); 383 STOP("flags not zeroed before leaving Java"); 384 bind(FlagsOk); 385 #endif /* ASSERT */ 386 // 387 // When returning from calling out from Java mode the frame anchor's last_Java_pc 388 // will always be set to NULL. It is set here so that if we are doing a call to 389 // native (not VM) that we capture the known pc and don't have to rely on the 390 // native call having a standard frame linkage where we can find the pc. 391 392 if (last_Java_pc->is_valid()) { 393 st_ptr(last_Java_pc, pc_addr); 394 } 395 396 #ifdef ASSERT 397 // Make sure that we have an odd stack 398 Label StackOk; 399 andcc(last_java_sp, 0x01, G0); 400 br(Assembler::notZero, false, Assembler::pt, StackOk); 401 delayed()->nop(); 402 STOP("Stack Not Biased in set_last_Java_frame"); 403 bind(StackOk); 404 #endif // ASSERT 405 assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame"); 406 add( last_java_sp, STACK_BIAS, G4_scratch ); 407 st_ptr(G4_scratch, G2_thread, JavaThread::last_Java_sp_offset()); 408 } 409 410 void MacroAssembler::reset_last_Java_frame(void) { 411 assert_not_delayed(); 412 413 Address sp_addr(G2_thread, JavaThread::last_Java_sp_offset()); 414 Address pc_addr(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); 415 Address flags (G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); 416 417 #ifdef ASSERT 418 // check that it WAS previously set 419 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod to helper frame 420 ld_ptr(sp_addr, L0); 421 tst(L0); 422 breakpoint_trap(Assembler::zero, Assembler::ptr_cc); 423 restore(); 424 #endif // ASSERT 425 426 st_ptr(G0, sp_addr); 427 // Always return last_Java_pc to zero 428 st_ptr(G0, pc_addr); 429 // Always null flags after return to Java 430 st(G0, flags); 431 } 432 433 434 void MacroAssembler::call_VM_base( 435 Register oop_result, 436 Register thread_cache, 437 Register last_java_sp, 438 address entry_point, 439 int number_of_arguments, 440 bool check_exceptions) 441 { 442 assert_not_delayed(); 443 444 // determine last_java_sp register 445 if (!last_java_sp->is_valid()) { 446 last_java_sp = SP; 447 } 448 // debugging support 449 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 450 451 // 64-bit last_java_sp is biased! 452 set_last_Java_frame(last_java_sp, noreg); 453 if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early 454 save_thread(thread_cache); 455 // do the call 456 call(entry_point, relocInfo::runtime_call_type); 457 if (!VerifyThread) 458 delayed()->mov(G2_thread, O0); // pass thread as first argument 459 else 460 delayed()->nop(); // (thread already passed) 461 restore_thread(thread_cache); 462 reset_last_Java_frame(); 463 464 // check for pending exceptions. use Gtemp as scratch register. 465 if (check_exceptions) { 466 check_and_forward_exception(Gtemp); 467 } 468 469 #ifdef ASSERT 470 set(badHeapWordVal, G3); 471 set(badHeapWordVal, G4); 472 set(badHeapWordVal, G5); 473 #endif 474 475 // get oop result if there is one and reset the value in the thread 476 if (oop_result->is_valid()) { 477 get_vm_result(oop_result); 478 } 479 } 480 481 void MacroAssembler::check_and_forward_exception(Register scratch_reg) 482 { 483 Label L; 484 485 check_and_handle_popframe(scratch_reg); 486 check_and_handle_earlyret(scratch_reg); 487 488 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 489 ld_ptr(exception_addr, scratch_reg); 490 br_null_short(scratch_reg, pt, L); 491 // we use O7 linkage so that forward_exception_entry has the issuing PC 492 call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 493 delayed()->nop(); 494 bind(L); 495 } 496 497 498 void MacroAssembler::check_and_handle_popframe(Register scratch_reg) { 499 } 500 501 502 void MacroAssembler::check_and_handle_earlyret(Register scratch_reg) { 503 } 504 505 506 void MacroAssembler::call_VM(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 507 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); 508 } 509 510 511 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) { 512 // O0 is reserved for the thread 513 mov(arg_1, O1); 514 call_VM(oop_result, entry_point, 1, check_exceptions); 515 } 516 517 518 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 519 // O0 is reserved for the thread 520 mov(arg_1, O1); 521 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 522 call_VM(oop_result, entry_point, 2, check_exceptions); 523 } 524 525 526 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { 527 // O0 is reserved for the thread 528 mov(arg_1, O1); 529 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 530 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); 531 call_VM(oop_result, entry_point, 3, check_exceptions); 532 } 533 534 535 536 // Note: The following call_VM overloadings are useful when a "save" 537 // has already been performed by a stub, and the last Java frame is 538 // the previous one. In that case, last_java_sp must be passed as FP 539 // instead of SP. 540 541 542 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments, bool check_exceptions) { 543 call_VM_base(oop_result, noreg, last_java_sp, entry_point, number_of_arguments, check_exceptions); 544 } 545 546 547 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) { 548 // O0 is reserved for the thread 549 mov(arg_1, O1); 550 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 551 } 552 553 554 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 555 // O0 is reserved for the thread 556 mov(arg_1, O1); 557 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 558 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 559 } 560 561 562 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { 563 // O0 is reserved for the thread 564 mov(arg_1, O1); 565 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 566 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); 567 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 568 } 569 570 571 572 void MacroAssembler::call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments) { 573 assert_not_delayed(); 574 save_thread(thread_cache); 575 // do the call 576 call(entry_point, relocInfo::runtime_call_type); 577 delayed()->nop(); 578 restore_thread(thread_cache); 579 #ifdef ASSERT 580 set(badHeapWordVal, G3); 581 set(badHeapWordVal, G4); 582 set(badHeapWordVal, G5); 583 #endif 584 } 585 586 587 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments) { 588 call_VM_leaf_base(thread_cache, entry_point, number_of_arguments); 589 } 590 591 592 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1) { 593 mov(arg_1, O0); 594 call_VM_leaf(thread_cache, entry_point, 1); 595 } 596 597 598 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) { 599 mov(arg_1, O0); 600 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); 601 call_VM_leaf(thread_cache, entry_point, 2); 602 } 603 604 605 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3) { 606 mov(arg_1, O0); 607 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); 608 mov(arg_3, O2); assert(arg_3 != O0 && arg_3 != O1, "smashed argument"); 609 call_VM_leaf(thread_cache, entry_point, 3); 610 } 611 612 613 void MacroAssembler::get_vm_result(Register oop_result) { 614 verify_thread(); 615 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 616 ld_ptr( vm_result_addr, oop_result); 617 st_ptr(G0, vm_result_addr); 618 verify_oop(oop_result); 619 } 620 621 622 void MacroAssembler::get_vm_result_2(Register metadata_result) { 623 verify_thread(); 624 Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset()); 625 ld_ptr(vm_result_addr_2, metadata_result); 626 st_ptr(G0, vm_result_addr_2); 627 } 628 629 630 // We require that C code which does not return a value in vm_result will 631 // leave it undisturbed. 632 void MacroAssembler::set_vm_result(Register oop_result) { 633 verify_thread(); 634 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 635 verify_oop(oop_result); 636 637 # ifdef ASSERT 638 // Check that we are not overwriting any other oop. 639 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod 640 ld_ptr(vm_result_addr, L0); 641 tst(L0); 642 restore(); 643 breakpoint_trap(notZero, Assembler::ptr_cc); 644 // } 645 # endif 646 647 st_ptr(oop_result, vm_result_addr); 648 } 649 650 651 void MacroAssembler::ic_call(address entry, bool emit_delay, jint method_index) { 652 RelocationHolder rspec = virtual_call_Relocation::spec(pc(), method_index); 653 patchable_set((intptr_t)Universe::non_oop_word(), G5_inline_cache_reg); 654 relocate(rspec); 655 call(entry, relocInfo::none); 656 if (emit_delay) { 657 delayed()->nop(); 658 } 659 } 660 661 void MacroAssembler::card_table_write(jbyte* byte_map_base, 662 Register tmp, Register obj) { 663 srlx(obj, CardTable::card_shift, obj); 664 assert(tmp != obj, "need separate temp reg"); 665 set((address) byte_map_base, tmp); 666 stb(G0, tmp, obj); 667 } 668 669 670 void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { 671 address save_pc; 672 int shiftcnt; 673 #ifdef VALIDATE_PIPELINE 674 assert_no_delay("Cannot put two instructions in delay-slot."); 675 #endif 676 v9_dep(); 677 save_pc = pc(); 678 679 int msb32 = (int) (addrlit.value() >> 32); 680 int lsb32 = (int) (addrlit.value()); 681 682 if (msb32 == 0 && lsb32 >= 0) { 683 Assembler::sethi(lsb32, d, addrlit.rspec()); 684 } 685 else if (msb32 == -1) { 686 Assembler::sethi(~lsb32, d, addrlit.rspec()); 687 xor3(d, ~low10(~0), d); 688 } 689 else { 690 Assembler::sethi(msb32, d, addrlit.rspec()); // msb 22-bits 691 if (msb32 & 0x3ff) // Any bits? 692 or3(d, msb32 & 0x3ff, d); // msb 32-bits are now in lsb 32 693 if (lsb32 & 0xFFFFFC00) { // done? 694 if ((lsb32 >> 20) & 0xfff) { // Any bits set? 695 sllx(d, 12, d); // Make room for next 12 bits 696 or3(d, (lsb32 >> 20) & 0xfff, d); // Or in next 12 697 shiftcnt = 0; // We already shifted 698 } 699 else 700 shiftcnt = 12; 701 if ((lsb32 >> 10) & 0x3ff) { 702 sllx(d, shiftcnt + 10, d); // Make room for last 10 bits 703 or3(d, (lsb32 >> 10) & 0x3ff, d); // Or in next 10 704 shiftcnt = 0; 705 } 706 else 707 shiftcnt = 10; 708 sllx(d, shiftcnt + 10, d); // Shift leaving disp field 0'd 709 } 710 else 711 sllx(d, 32, d); 712 } 713 // Pad out the instruction sequence so it can be patched later. 714 if (ForceRelocatable || (addrlit.rtype() != relocInfo::none && 715 addrlit.rtype() != relocInfo::runtime_call_type)) { 716 while (pc() < (save_pc + (7 * BytesPerInstWord))) 717 nop(); 718 } 719 } 720 721 722 void MacroAssembler::sethi(const AddressLiteral& addrlit, Register d) { 723 internal_sethi(addrlit, d, false); 724 } 725 726 727 void MacroAssembler::patchable_sethi(const AddressLiteral& addrlit, Register d) { 728 internal_sethi(addrlit, d, true); 729 } 730 731 732 int MacroAssembler::insts_for_sethi(address a, bool worst_case) { 733 if (worst_case) return 7; 734 intptr_t iaddr = (intptr_t) a; 735 int msb32 = (int) (iaddr >> 32); 736 int lsb32 = (int) (iaddr); 737 int count; 738 if (msb32 == 0 && lsb32 >= 0) 739 count = 1; 740 else if (msb32 == -1) 741 count = 2; 742 else { 743 count = 2; 744 if (msb32 & 0x3ff) 745 count++; 746 if (lsb32 & 0xFFFFFC00 ) { 747 if ((lsb32 >> 20) & 0xfff) count += 2; 748 if ((lsb32 >> 10) & 0x3ff) count += 2; 749 } 750 } 751 return count; 752 } 753 754 int MacroAssembler::worst_case_insts_for_set() { 755 return insts_for_sethi(NULL, true) + 1; 756 } 757 758 759 // Keep in sync with MacroAssembler::insts_for_internal_set 760 void MacroAssembler::internal_set(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { 761 intptr_t value = addrlit.value(); 762 763 if (!ForceRelocatable && addrlit.rspec().type() == relocInfo::none) { 764 // can optimize 765 if (-4096 <= value && value <= 4095) { 766 or3(G0, value, d); // setsw (this leaves upper 32 bits sign-extended) 767 return; 768 } 769 if (inv_hi22(hi22(value)) == value) { 770 sethi(addrlit, d); 771 return; 772 } 773 } 774 assert_no_delay("Cannot put two instructions in delay-slot."); 775 internal_sethi(addrlit, d, ForceRelocatable); 776 if (ForceRelocatable || addrlit.rspec().type() != relocInfo::none || addrlit.low10() != 0) { 777 add(d, addrlit.low10(), d, addrlit.rspec()); 778 } 779 } 780 781 // Keep in sync with MacroAssembler::internal_set 782 int MacroAssembler::insts_for_internal_set(intptr_t value) { 783 // can optimize 784 if (-4096 <= value && value <= 4095) { 785 return 1; 786 } 787 if (inv_hi22(hi22(value)) == value) { 788 return insts_for_sethi((address) value); 789 } 790 int count = insts_for_sethi((address) value); 791 AddressLiteral al(value); 792 if (al.low10() != 0) { 793 count++; 794 } 795 return count; 796 } 797 798 void MacroAssembler::set(const AddressLiteral& al, Register d) { 799 internal_set(al, d, false); 800 } 801 802 void MacroAssembler::set(intptr_t value, Register d) { 803 AddressLiteral al(value); 804 internal_set(al, d, false); 805 } 806 807 void MacroAssembler::set(address addr, Register d, RelocationHolder const& rspec) { 808 AddressLiteral al(addr, rspec); 809 internal_set(al, d, false); 810 } 811 812 void MacroAssembler::patchable_set(const AddressLiteral& al, Register d) { 813 internal_set(al, d, true); 814 } 815 816 void MacroAssembler::patchable_set(intptr_t value, Register d) { 817 AddressLiteral al(value); 818 internal_set(al, d, true); 819 } 820 821 822 void MacroAssembler::set64(jlong value, Register d, Register tmp) { 823 assert_not_delayed(); 824 v9_dep(); 825 826 int hi = (int)(value >> 32); 827 int lo = (int)(value & ~0); 828 int bits_33to2 = (int)((value >> 2) & ~0); 829 // (Matcher::isSimpleConstant64 knows about the following optimizations.) 830 if (Assembler::is_simm13(lo) && value == lo) { 831 or3(G0, lo, d); 832 } else if (hi == 0) { 833 Assembler::sethi(lo, d); // hardware version zero-extends to upper 32 834 if (low10(lo) != 0) 835 or3(d, low10(lo), d); 836 } 837 else if ((hi >> 2) == 0) { 838 Assembler::sethi(bits_33to2, d); // hardware version zero-extends to upper 32 839 sllx(d, 2, d); 840 if (low12(lo) != 0) 841 or3(d, low12(lo), d); 842 } 843 else if (hi == -1) { 844 Assembler::sethi(~lo, d); // hardware version zero-extends to upper 32 845 xor3(d, low10(lo) ^ ~low10(~0), d); 846 } 847 else if (lo == 0) { 848 if (Assembler::is_simm13(hi)) { 849 or3(G0, hi, d); 850 } else { 851 Assembler::sethi(hi, d); // hardware version zero-extends to upper 32 852 if (low10(hi) != 0) 853 or3(d, low10(hi), d); 854 } 855 sllx(d, 32, d); 856 } 857 else { 858 Assembler::sethi(hi, tmp); 859 Assembler::sethi(lo, d); // macro assembler version sign-extends 860 if (low10(hi) != 0) 861 or3 (tmp, low10(hi), tmp); 862 if (low10(lo) != 0) 863 or3 ( d, low10(lo), d); 864 sllx(tmp, 32, tmp); 865 or3 (d, tmp, d); 866 } 867 } 868 869 int MacroAssembler::insts_for_set64(jlong value) { 870 v9_dep(); 871 872 int hi = (int) (value >> 32); 873 int lo = (int) (value & ~0); 874 int count = 0; 875 876 // (Matcher::isSimpleConstant64 knows about the following optimizations.) 877 if (Assembler::is_simm13(lo) && value == lo) { 878 count++; 879 } else if (hi == 0) { 880 count++; 881 if (low10(lo) != 0) 882 count++; 883 } 884 else if (hi == -1) { 885 count += 2; 886 } 887 else if (lo == 0) { 888 if (Assembler::is_simm13(hi)) { 889 count++; 890 } else { 891 count++; 892 if (low10(hi) != 0) 893 count++; 894 } 895 count++; 896 } 897 else { 898 count += 2; 899 if (low10(hi) != 0) 900 count++; 901 if (low10(lo) != 0) 902 count++; 903 count += 2; 904 } 905 return count; 906 } 907 908 // compute size in bytes of sparc frame, given 909 // number of extraWords 910 int MacroAssembler::total_frame_size_in_bytes(int extraWords) { 911 912 int nWords = frame::memory_parameter_word_sp_offset; 913 914 nWords += extraWords; 915 916 if (nWords & 1) ++nWords; // round up to double-word 917 918 return nWords * BytesPerWord; 919 } 920 921 922 // save_frame: given number of "extra" words in frame, 923 // issue approp. save instruction (p 200, v8 manual) 924 925 void MacroAssembler::save_frame(int extraWords) { 926 int delta = -total_frame_size_in_bytes(extraWords); 927 if (is_simm13(delta)) { 928 save(SP, delta, SP); 929 } else { 930 set(delta, G3_scratch); 931 save(SP, G3_scratch, SP); 932 } 933 } 934 935 936 void MacroAssembler::save_frame_c1(int size_in_bytes) { 937 if (is_simm13(-size_in_bytes)) { 938 save(SP, -size_in_bytes, SP); 939 } else { 940 set(-size_in_bytes, G3_scratch); 941 save(SP, G3_scratch, SP); 942 } 943 } 944 945 946 void MacroAssembler::save_frame_and_mov(int extraWords, 947 Register s1, Register d1, 948 Register s2, Register d2) { 949 assert_not_delayed(); 950 951 // The trick here is to use precisely the same memory word 952 // that trap handlers also use to save the register. 953 // This word cannot be used for any other purpose, but 954 // it works fine to save the register's value, whether or not 955 // an interrupt flushes register windows at any given moment! 956 Address s1_addr; 957 if (s1->is_valid() && (s1->is_in() || s1->is_local())) { 958 s1_addr = s1->address_in_saved_window(); 959 st_ptr(s1, s1_addr); 960 } 961 962 Address s2_addr; 963 if (s2->is_valid() && (s2->is_in() || s2->is_local())) { 964 s2_addr = s2->address_in_saved_window(); 965 st_ptr(s2, s2_addr); 966 } 967 968 save_frame(extraWords); 969 970 if (s1_addr.base() == SP) { 971 ld_ptr(s1_addr.after_save(), d1); 972 } else if (s1->is_valid()) { 973 mov(s1->after_save(), d1); 974 } 975 976 if (s2_addr.base() == SP) { 977 ld_ptr(s2_addr.after_save(), d2); 978 } else if (s2->is_valid()) { 979 mov(s2->after_save(), d2); 980 } 981 } 982 983 984 AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) { 985 assert(oop_recorder() != NULL, "this assembler needs a Recorder"); 986 int index = oop_recorder()->allocate_metadata_index(obj); 987 RelocationHolder rspec = metadata_Relocation::spec(index); 988 return AddressLiteral((address)obj, rspec); 989 } 990 991 AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) { 992 assert(oop_recorder() != NULL, "this assembler needs a Recorder"); 993 int index = oop_recorder()->find_index(obj); 994 RelocationHolder rspec = metadata_Relocation::spec(index); 995 return AddressLiteral((address)obj, rspec); 996 } 997 998 999 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) { 1000 #ifdef ASSERT 1001 { 1002 ThreadInVMfromUnknown tiv; 1003 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1004 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop"); 1005 } 1006 #endif 1007 int oop_index = oop_recorder()->find_index(obj); 1008 return AddressLiteral(obj, oop_Relocation::spec(oop_index)); 1009 } 1010 1011 void MacroAssembler::set_narrow_oop(jobject obj, Register d) { 1012 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1013 int oop_index = oop_recorder()->find_index(obj); 1014 RelocationHolder rspec = oop_Relocation::spec(oop_index); 1015 1016 assert_not_delayed(); 1017 // Relocation with special format (see relocInfo_sparc.hpp). 1018 relocate(rspec, 1); 1019 // Assembler::sethi(0x3fffff, d); 1020 emit_int32( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(0x3fffff) ); 1021 // Don't add relocation for 'add'. Do patching during 'sethi' processing. 1022 add(d, 0x3ff, d); 1023 1024 } 1025 1026 void MacroAssembler::set_narrow_klass(Klass* k, Register d) { 1027 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1028 int klass_index = oop_recorder()->find_index(k); 1029 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 1030 narrowOop encoded_k = Klass::encode_klass(k); 1031 1032 assert_not_delayed(); 1033 // Relocation with special format (see relocInfo_sparc.hpp). 1034 relocate(rspec, 1); 1035 // Assembler::sethi(encoded_k, d); 1036 emit_int32( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(encoded_k) ); 1037 // Don't add relocation for 'add'. Do patching during 'sethi' processing. 1038 add(d, low10(encoded_k), d); 1039 1040 } 1041 1042 void MacroAssembler::align(int modulus) { 1043 while (offset() % modulus != 0) nop(); 1044 } 1045 1046 void RegistersForDebugging::print(outputStream* s) { 1047 FlagSetting fs(Debugging, true); 1048 int j; 1049 for (j = 0; j < 8; ++j) { 1050 if (j != 6) { s->print("i%d = ", j); os::print_location(s, i[j]); } 1051 else { s->print( "fp = " ); os::print_location(s, i[j]); } 1052 } 1053 s->cr(); 1054 1055 for (j = 0; j < 8; ++j) { 1056 s->print("l%d = ", j); os::print_location(s, l[j]); 1057 } 1058 s->cr(); 1059 1060 for (j = 0; j < 8; ++j) { 1061 if (j != 6) { s->print("o%d = ", j); os::print_location(s, o[j]); } 1062 else { s->print( "sp = " ); os::print_location(s, o[j]); } 1063 } 1064 s->cr(); 1065 1066 for (j = 0; j < 8; ++j) { 1067 s->print("g%d = ", j); os::print_location(s, g[j]); 1068 } 1069 s->cr(); 1070 1071 // print out floats with compression 1072 for (j = 0; j < 32; ) { 1073 jfloat val = f[j]; 1074 int last = j; 1075 for ( ; last+1 < 32; ++last ) { 1076 char b1[1024], b2[1024]; 1077 sprintf(b1, "%f", val); 1078 sprintf(b2, "%f", f[last+1]); 1079 if (strcmp(b1, b2)) 1080 break; 1081 } 1082 s->print("f%d", j); 1083 if ( j != last ) s->print(" - f%d", last); 1084 s->print(" = %f", val); 1085 s->fill_to(25); 1086 s->print_cr(" (0x%x)", *(int*)&val); 1087 j = last + 1; 1088 } 1089 s->cr(); 1090 1091 // and doubles (evens only) 1092 for (j = 0; j < 32; ) { 1093 jdouble val = d[j]; 1094 int last = j; 1095 for ( ; last+1 < 32; ++last ) { 1096 char b1[1024], b2[1024]; 1097 sprintf(b1, "%f", val); 1098 sprintf(b2, "%f", d[last+1]); 1099 if (strcmp(b1, b2)) 1100 break; 1101 } 1102 s->print("d%d", 2 * j); 1103 if ( j != last ) s->print(" - d%d", last); 1104 s->print(" = %f", val); 1105 s->fill_to(30); 1106 s->print("(0x%x)", *(int*)&val); 1107 s->fill_to(42); 1108 s->print_cr("(0x%x)", *(1 + (int*)&val)); 1109 j = last + 1; 1110 } 1111 s->cr(); 1112 } 1113 1114 void RegistersForDebugging::save_registers(MacroAssembler* a) { 1115 a->sub(FP, align_up(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0); 1116 a->flushw(); 1117 int i; 1118 for (i = 0; i < 8; ++i) { 1119 a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, i_offset(i)); 1120 a->ld_ptr(as_lRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, l_offset(i)); 1121 a->st_ptr(as_oRegister(i)->after_save(), O0, o_offset(i)); 1122 a->st_ptr(as_gRegister(i)->after_save(), O0, g_offset(i)); 1123 } 1124 for (i = 0; i < 32; ++i) { 1125 a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i)); 1126 } 1127 for (i = 0; i < 64; i += 2) { 1128 a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i)); 1129 } 1130 } 1131 1132 void RegistersForDebugging::restore_registers(MacroAssembler* a, Register r) { 1133 for (int i = 1; i < 8; ++i) { 1134 a->ld_ptr(r, g_offset(i), as_gRegister(i)); 1135 } 1136 for (int j = 0; j < 32; ++j) { 1137 a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j)); 1138 } 1139 for (int k = 0; k < 64; k += 2) { 1140 a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k)); 1141 } 1142 } 1143 1144 1145 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack 1146 void MacroAssembler::push_fTOS() { 1147 // %%%%%% need to implement this 1148 } 1149 1150 // pops double TOS element from CPU stack and pushes on FPU stack 1151 void MacroAssembler::pop_fTOS() { 1152 // %%%%%% need to implement this 1153 } 1154 1155 void MacroAssembler::empty_FPU_stack() { 1156 // %%%%%% need to implement this 1157 } 1158 1159 void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * file, int line) { 1160 // plausibility check for oops 1161 if (!VerifyOops) return; 1162 1163 if (reg == G0) return; // always NULL, which is always an oop 1164 1165 BLOCK_COMMENT("verify_oop {"); 1166 char buffer[64]; 1167 #ifdef COMPILER1 1168 if (CommentedAssembly) { 1169 snprintf(buffer, sizeof(buffer), "verify_oop at %d", offset()); 1170 block_comment(buffer); 1171 } 1172 #endif 1173 1174 const char* real_msg = NULL; 1175 { 1176 ResourceMark rm; 1177 stringStream ss; 1178 ss.print("%s at offset %d (%s:%d)", msg, offset(), file, line); 1179 real_msg = code_string(ss.as_string()); 1180 } 1181 1182 // Call indirectly to solve generation ordering problem 1183 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address()); 1184 1185 // Make some space on stack above the current register window. 1186 // Enough to hold 8 64-bit registers. 1187 add(SP,-8*8,SP); 1188 1189 // Save some 64-bit registers; a normal 'save' chops the heads off 1190 // of 64-bit longs in the 32-bit build. 1191 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); 1192 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); 1193 mov(reg,O0); // Move arg into O0; arg might be in O7 which is about to be crushed 1194 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); 1195 1196 // Size of set() should stay the same 1197 patchable_set((intptr_t)real_msg, O1); 1198 // Load address to call to into O7 1199 load_ptr_contents(a, O7); 1200 // Register call to verify_oop_subroutine 1201 callr(O7, G0); 1202 delayed()->nop(); 1203 // recover frame size 1204 add(SP, 8*8,SP); 1205 BLOCK_COMMENT("} verify_oop"); 1206 } 1207 1208 void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char * file, int line) { 1209 // plausibility check for oops 1210 if (!VerifyOops) return; 1211 1212 const char* real_msg = NULL; 1213 { 1214 ResourceMark rm; 1215 stringStream ss; 1216 ss.print("%s at SP+%d (%s:%d)", msg, addr.disp(), file, line); 1217 real_msg = code_string(ss.as_string()); 1218 } 1219 1220 // Call indirectly to solve generation ordering problem 1221 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address()); 1222 1223 // Make some space on stack above the current register window. 1224 // Enough to hold 8 64-bit registers. 1225 add(SP,-8*8,SP); 1226 1227 // Save some 64-bit registers; a normal 'save' chops the heads off 1228 // of 64-bit longs in the 32-bit build. 1229 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); 1230 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); 1231 ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed 1232 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); 1233 1234 // Size of set() should stay the same 1235 patchable_set((intptr_t)real_msg, O1); 1236 // Load address to call to into O7 1237 load_ptr_contents(a, O7); 1238 // Register call to verify_oop_subroutine 1239 callr(O7, G0); 1240 delayed()->nop(); 1241 // recover frame size 1242 add(SP, 8*8,SP); 1243 } 1244 1245 // side-door communication with signalHandler in os_solaris.cpp 1246 address MacroAssembler::_verify_oop_implicit_branch[3] = { NULL }; 1247 1248 // This macro is expanded just once; it creates shared code. Contract: 1249 // receives an oop in O0. Must restore O0 & O7 from TLS. Must not smash ANY 1250 // registers, including flags. May not use a register 'save', as this blows 1251 // the high bits of the O-regs if they contain Long values. Acts as a 'leaf' 1252 // call. 1253 void MacroAssembler::verify_oop_subroutine() { 1254 // Leaf call; no frame. 1255 Label succeed, fail, null_or_fail; 1256 1257 // O0 and O7 were saved already (O0 in O0's TLS home, O7 in O5's TLS home). 1258 // O0 is now the oop to be checked. O7 is the return address. 1259 Register O0_obj = O0; 1260 1261 // Save some more registers for temps. 1262 stx(O2,SP,frame::register_save_words*wordSize+STACK_BIAS+2*8); 1263 stx(O3,SP,frame::register_save_words*wordSize+STACK_BIAS+3*8); 1264 stx(O4,SP,frame::register_save_words*wordSize+STACK_BIAS+4*8); 1265 stx(O5,SP,frame::register_save_words*wordSize+STACK_BIAS+5*8); 1266 1267 // Save flags 1268 Register O5_save_flags = O5; 1269 rdccr( O5_save_flags ); 1270 1271 { // count number of verifies 1272 Register O2_adr = O2; 1273 Register O3_accum = O3; 1274 inc_counter(StubRoutines::verify_oop_count_addr(), O2_adr, O3_accum); 1275 } 1276 1277 Register O2_mask = O2; 1278 Register O3_bits = O3; 1279 Register O4_temp = O4; 1280 1281 // mark lower end of faulting range 1282 assert(_verify_oop_implicit_branch[0] == NULL, "set once"); 1283 _verify_oop_implicit_branch[0] = pc(); 1284 1285 // We can't check the mark oop because it could be in the process of 1286 // locking or unlocking while this is running. 1287 set(Universe::verify_oop_mask (), O2_mask); 1288 set(Universe::verify_oop_bits (), O3_bits); 1289 1290 // assert((obj & oop_mask) == oop_bits); 1291 and3(O0_obj, O2_mask, O4_temp); 1292 cmp_and_brx_short(O4_temp, O3_bits, notEqual, pn, null_or_fail); 1293 1294 if ((NULL_WORD & Universe::verify_oop_mask()) == Universe::verify_oop_bits()) { 1295 // the null_or_fail case is useless; must test for null separately 1296 br_null_short(O0_obj, pn, succeed); 1297 } 1298 1299 // Check the Klass* of this object for being in the right area of memory. 1300 // Cannot do the load in the delay above slot in case O0 is null 1301 load_klass(O0_obj, O0_obj); 1302 // assert((klass != NULL) 1303 br_null_short(O0_obj, pn, fail); 1304 1305 wrccr( O5_save_flags ); // Restore CCR's 1306 1307 // mark upper end of faulting range 1308 _verify_oop_implicit_branch[1] = pc(); 1309 1310 //----------------------- 1311 // all tests pass 1312 bind(succeed); 1313 1314 // Restore prior 64-bit registers 1315 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+0*8,O0); 1316 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+1*8,O1); 1317 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+2*8,O2); 1318 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+3*8,O3); 1319 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+4*8,O4); 1320 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+5*8,O5); 1321 1322 retl(); // Leaf return; restore prior O7 in delay slot 1323 delayed()->ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+7*8,O7); 1324 1325 //----------------------- 1326 bind(null_or_fail); // nulls are less common but OK 1327 br_null(O0_obj, false, pt, succeed); 1328 delayed()->wrccr( O5_save_flags ); // Restore CCR's 1329 1330 //----------------------- 1331 // report failure: 1332 bind(fail); 1333 _verify_oop_implicit_branch[2] = pc(); 1334 1335 wrccr( O5_save_flags ); // Restore CCR's 1336 1337 save_frame(align_up(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1338 1339 // stop_subroutine expects message pointer in I1. 1340 mov(I1, O1); 1341 1342 // Restore prior 64-bit registers 1343 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+0*8,I0); 1344 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+1*8,I1); 1345 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+2*8,I2); 1346 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+3*8,I3); 1347 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+4*8,I4); 1348 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+5*8,I5); 1349 1350 // factor long stop-sequence into subroutine to save space 1351 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); 1352 1353 // call indirectly to solve generation ordering problem 1354 AddressLiteral al(StubRoutines::Sparc::stop_subroutine_entry_address()); 1355 load_ptr_contents(al, O5); 1356 jmpl(O5, 0, O7); 1357 delayed()->nop(); 1358 } 1359 1360 1361 void MacroAssembler::stop(const char* msg) { 1362 // save frame first to get O7 for return address 1363 // add one word to size in case struct is odd number of words long 1364 // It must be doubleword-aligned for storing doubles into it. 1365 1366 save_frame(align_up(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1367 1368 // stop_subroutine expects message pointer in I1. 1369 // Size of set() should stay the same 1370 patchable_set((intptr_t)msg, O1); 1371 1372 // factor long stop-sequence into subroutine to save space 1373 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); 1374 1375 // call indirectly to solve generation ordering problem 1376 AddressLiteral a(StubRoutines::Sparc::stop_subroutine_entry_address()); 1377 load_ptr_contents(a, O5); 1378 jmpl(O5, 0, O7); 1379 delayed()->nop(); 1380 1381 breakpoint_trap(); // make stop actually stop rather than writing 1382 // unnoticeable results in the output files. 1383 1384 // restore(); done in callee to save space! 1385 } 1386 1387 1388 void MacroAssembler::warn(const char* msg) { 1389 save_frame(align_up(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1390 RegistersForDebugging::save_registers(this); 1391 mov(O0, L0); 1392 // Size of set() should stay the same 1393 patchable_set((intptr_t)msg, O0); 1394 call( CAST_FROM_FN_PTR(address, warning) ); 1395 delayed()->nop(); 1396 // ret(); 1397 // delayed()->restore(); 1398 RegistersForDebugging::restore_registers(this, L0); 1399 restore(); 1400 } 1401 1402 1403 void MacroAssembler::untested(const char* what) { 1404 // We must be able to turn interactive prompting off 1405 // in order to run automated test scripts on the VM 1406 // Use the flag ShowMessageBoxOnError 1407 1408 const char* b = NULL; 1409 { 1410 ResourceMark rm; 1411 stringStream ss; 1412 ss.print("untested: %s", what); 1413 b = code_string(ss.as_string()); 1414 } 1415 if (ShowMessageBoxOnError) { STOP(b); } 1416 else { warn(b); } 1417 } 1418 1419 1420 void MacroAssembler::unimplemented(const char* what) { 1421 const char* buf = NULL; 1422 { 1423 ResourceMark rm; 1424 stringStream ss; 1425 ss.print("unimplemented: %s", what); 1426 buf = code_string(ss.as_string()); 1427 } 1428 stop(buf); 1429 } 1430 1431 1432 void MacroAssembler::stop_subroutine() { 1433 RegistersForDebugging::save_registers(this); 1434 1435 // for the sake of the debugger, stick a PC on the current frame 1436 // (this assumes that the caller has performed an extra "save") 1437 mov(I7, L7); 1438 add(O7, -7 * BytesPerInt, I7); 1439 1440 save_frame(); // one more save to free up another O7 register 1441 mov(I0, O1); // addr of reg save area 1442 1443 // We expect pointer to message in I1. Caller must set it up in O1 1444 mov(I1, O0); // get msg 1445 call (CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type); 1446 delayed()->nop(); 1447 1448 restore(); 1449 1450 RegistersForDebugging::restore_registers(this, O0); 1451 1452 save_frame(0); 1453 call(CAST_FROM_FN_PTR(address,breakpoint)); 1454 delayed()->nop(); 1455 restore(); 1456 1457 mov(L7, I7); 1458 retl(); 1459 delayed()->restore(); // see stop above 1460 } 1461 1462 1463 void MacroAssembler::debug(char* msg, RegistersForDebugging* regs) { 1464 if ( ShowMessageBoxOnError ) { 1465 JavaThread* thread = JavaThread::current(); 1466 JavaThreadState saved_state = thread->thread_state(); 1467 thread->set_thread_state(_thread_in_vm); 1468 { 1469 // In order to get locks work, we need to fake a in_VM state 1470 ttyLocker ttyl; 1471 ::tty->print_cr("EXECUTION STOPPED: %s\n", msg); 1472 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 1473 BytecodeCounter::print(); 1474 } 1475 if (os::message_box(msg, "Execution stopped, print registers?")) 1476 regs->print(::tty); 1477 } 1478 BREAKPOINT; 1479 ThreadStateTransition::transition(JavaThread::current(), _thread_in_vm, saved_state); 1480 } 1481 else { 1482 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg); 1483 } 1484 assert(false, "DEBUG MESSAGE: %s", msg); 1485 } 1486 1487 1488 void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresult) { 1489 subcc( Rparam_words, Argument::n_register_parameters, Rresult); // how many mem words? 1490 Label no_extras; 1491 br( negative, true, pt, no_extras ); // if neg, clear reg 1492 delayed()->set(0, Rresult); // annuled, so only if taken 1493 bind( no_extras ); 1494 } 1495 1496 1497 void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) { 1498 add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult); 1499 bclr(1, Rresult); 1500 sll(Rresult, LogBytesPerWord, Rresult); // Rresult has total frame bytes 1501 } 1502 1503 1504 void MacroAssembler::calc_frame_size_and_save(Register Rextra_words, Register Rresult) { 1505 calc_frame_size(Rextra_words, Rresult); 1506 neg(Rresult); 1507 save(SP, Rresult, SP); 1508 } 1509 1510 1511 // --------------------------------------------------------- 1512 Assembler::RCondition cond2rcond(Assembler::Condition c) { 1513 switch (c) { 1514 /*case zero: */ 1515 case Assembler::equal: return Assembler::rc_z; 1516 case Assembler::lessEqual: return Assembler::rc_lez; 1517 case Assembler::less: return Assembler::rc_lz; 1518 /*case notZero:*/ 1519 case Assembler::notEqual: return Assembler::rc_nz; 1520 case Assembler::greater: return Assembler::rc_gz; 1521 case Assembler::greaterEqual: return Assembler::rc_gez; 1522 } 1523 ShouldNotReachHere(); 1524 return Assembler::rc_z; 1525 } 1526 1527 // compares (32 bit) register with zero and branches. NOT FOR USE WITH 64-bit POINTERS 1528 void MacroAssembler::cmp_zero_and_br(Condition c, Register s1, Label& L, bool a, Predict p) { 1529 tst(s1); 1530 br (c, a, p, L); 1531 } 1532 1533 // Compares a pointer register with zero and branches on null. 1534 // Does a test & branch on 32-bit systems and a register-branch on 64-bit. 1535 void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) { 1536 assert_not_delayed(); 1537 bpr( rc_z, a, p, s1, L ); 1538 } 1539 1540 void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) { 1541 assert_not_delayed(); 1542 bpr( rc_nz, a, p, s1, L ); 1543 } 1544 1545 // Compare registers and branch with nop in delay slot or cbcond without delay slot. 1546 1547 // Compare integer (32 bit) values (icc only). 1548 void MacroAssembler::cmp_and_br_short(Register s1, Register s2, Condition c, 1549 Predict p, Label& L) { 1550 assert_not_delayed(); 1551 if (use_cbcond(L)) { 1552 Assembler::cbcond(c, icc, s1, s2, L); 1553 } else { 1554 cmp(s1, s2); 1555 br(c, false, p, L); 1556 delayed()->nop(); 1557 } 1558 } 1559 1560 // Compare integer (32 bit) values (icc only). 1561 void MacroAssembler::cmp_and_br_short(Register s1, int simm13a, Condition c, 1562 Predict p, Label& L) { 1563 assert_not_delayed(); 1564 if (is_simm(simm13a,5) && use_cbcond(L)) { 1565 Assembler::cbcond(c, icc, s1, simm13a, L); 1566 } else { 1567 cmp(s1, simm13a); 1568 br(c, false, p, L); 1569 delayed()->nop(); 1570 } 1571 } 1572 1573 // Branch that tests xcc in LP64 and icc in !LP64 1574 void MacroAssembler::cmp_and_brx_short(Register s1, Register s2, Condition c, 1575 Predict p, Label& L) { 1576 assert_not_delayed(); 1577 if (use_cbcond(L)) { 1578 Assembler::cbcond(c, ptr_cc, s1, s2, L); 1579 } else { 1580 cmp(s1, s2); 1581 brx(c, false, p, L); 1582 delayed()->nop(); 1583 } 1584 } 1585 1586 // Branch that tests xcc in LP64 and icc in !LP64 1587 void MacroAssembler::cmp_and_brx_short(Register s1, int simm13a, Condition c, 1588 Predict p, Label& L) { 1589 assert_not_delayed(); 1590 if (is_simm(simm13a,5) && use_cbcond(L)) { 1591 Assembler::cbcond(c, ptr_cc, s1, simm13a, L); 1592 } else { 1593 cmp(s1, simm13a); 1594 brx(c, false, p, L); 1595 delayed()->nop(); 1596 } 1597 } 1598 1599 // Short branch version for compares a pointer with zero. 1600 1601 void MacroAssembler::br_null_short(Register s1, Predict p, Label& L) { 1602 assert_not_delayed(); 1603 if (use_cbcond(L)) { 1604 Assembler::cbcond(zero, ptr_cc, s1, 0, L); 1605 } else { 1606 br_null(s1, false, p, L); 1607 delayed()->nop(); 1608 } 1609 } 1610 1611 void MacroAssembler::br_notnull_short(Register s1, Predict p, Label& L) { 1612 assert_not_delayed(); 1613 if (use_cbcond(L)) { 1614 Assembler::cbcond(notZero, ptr_cc, s1, 0, L); 1615 } else { 1616 br_notnull(s1, false, p, L); 1617 delayed()->nop(); 1618 } 1619 } 1620 1621 // Unconditional short branch 1622 void MacroAssembler::ba_short(Label& L) { 1623 assert_not_delayed(); 1624 if (use_cbcond(L)) { 1625 Assembler::cbcond(equal, icc, G0, G0, L); 1626 } else { 1627 br(always, false, pt, L); 1628 delayed()->nop(); 1629 } 1630 } 1631 1632 // Branch if 'icc' says zero or not (i.e. icc.z == 1|0). 1633 1634 void MacroAssembler::br_icc_zero(bool iszero, Predict p, Label &L) { 1635 assert_not_delayed(); 1636 Condition cf = (iszero ? Assembler::zero : Assembler::notZero); 1637 br(cf, false, p, L); 1638 delayed()->nop(); 1639 } 1640 1641 // instruction sequences factored across compiler & interpreter 1642 1643 1644 void MacroAssembler::lcmp( Register Ra_hi, Register Ra_low, 1645 Register Rb_hi, Register Rb_low, 1646 Register Rresult) { 1647 1648 Label check_low_parts, done; 1649 1650 cmp(Ra_hi, Rb_hi ); // compare hi parts 1651 br(equal, true, pt, check_low_parts); 1652 delayed()->cmp(Ra_low, Rb_low); // test low parts 1653 1654 // And, with an unsigned comparison, it does not matter if the numbers 1655 // are negative or not. 1656 // E.g., -2 cmp -1: the low parts are 0xfffffffe and 0xffffffff. 1657 // The second one is bigger (unsignedly). 1658 1659 // Other notes: The first move in each triplet can be unconditional 1660 // (and therefore probably prefetchable). 1661 // And the equals case for the high part does not need testing, 1662 // since that triplet is reached only after finding the high halves differ. 1663 1664 mov(-1, Rresult); 1665 ba(done); 1666 delayed()->movcc(greater, false, icc, 1, Rresult); 1667 1668 bind(check_low_parts); 1669 1670 mov( -1, Rresult); 1671 movcc(equal, false, icc, 0, Rresult); 1672 movcc(greaterUnsigned, false, icc, 1, Rresult); 1673 1674 bind(done); 1675 } 1676 1677 void MacroAssembler::lneg( Register Rhi, Register Rlow ) { 1678 subcc( G0, Rlow, Rlow ); 1679 subc( G0, Rhi, Rhi ); 1680 } 1681 1682 void MacroAssembler::lshl( Register Rin_high, Register Rin_low, 1683 Register Rcount, 1684 Register Rout_high, Register Rout_low, 1685 Register Rtemp ) { 1686 1687 1688 Register Ralt_count = Rtemp; 1689 Register Rxfer_bits = Rtemp; 1690 1691 assert( Ralt_count != Rin_high 1692 && Ralt_count != Rin_low 1693 && Ralt_count != Rcount 1694 && Rxfer_bits != Rin_low 1695 && Rxfer_bits != Rin_high 1696 && Rxfer_bits != Rcount 1697 && Rxfer_bits != Rout_low 1698 && Rout_low != Rin_high, 1699 "register alias checks"); 1700 1701 Label big_shift, done; 1702 1703 // This code can be optimized to use the 64 bit shifts in V9. 1704 // Here we use the 32 bit shifts. 1705 1706 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1707 subcc(Rcount, 31, Ralt_count); 1708 br(greater, true, pn, big_shift); 1709 delayed()->dec(Ralt_count); 1710 1711 // shift < 32 bits, Ralt_count = Rcount-31 1712 1713 // We get the transfer bits by shifting right by 32-count the low 1714 // register. This is done by shifting right by 31-count and then by one 1715 // more to take care of the special (rare) case where count is zero 1716 // (shifting by 32 would not work). 1717 1718 neg(Ralt_count); 1719 1720 // The order of the next two instructions is critical in the case where 1721 // Rin and Rout are the same and should not be reversed. 1722 1723 srl(Rin_low, Ralt_count, Rxfer_bits); // shift right by 31-count 1724 if (Rcount != Rout_low) { 1725 sll(Rin_low, Rcount, Rout_low); // low half 1726 } 1727 sll(Rin_high, Rcount, Rout_high); 1728 if (Rcount == Rout_low) { 1729 sll(Rin_low, Rcount, Rout_low); // low half 1730 } 1731 srl(Rxfer_bits, 1, Rxfer_bits ); // shift right by one more 1732 ba(done); 1733 delayed()->or3(Rout_high, Rxfer_bits, Rout_high); // new hi value: or in shifted old hi part and xfer from low 1734 1735 // shift >= 32 bits, Ralt_count = Rcount-32 1736 bind(big_shift); 1737 sll(Rin_low, Ralt_count, Rout_high ); 1738 clr(Rout_low); 1739 1740 bind(done); 1741 } 1742 1743 1744 void MacroAssembler::lshr( Register Rin_high, Register Rin_low, 1745 Register Rcount, 1746 Register Rout_high, Register Rout_low, 1747 Register Rtemp ) { 1748 1749 Register Ralt_count = Rtemp; 1750 Register Rxfer_bits = Rtemp; 1751 1752 assert( Ralt_count != Rin_high 1753 && Ralt_count != Rin_low 1754 && Ralt_count != Rcount 1755 && Rxfer_bits != Rin_low 1756 && Rxfer_bits != Rin_high 1757 && Rxfer_bits != Rcount 1758 && Rxfer_bits != Rout_high 1759 && Rout_high != Rin_low, 1760 "register alias checks"); 1761 1762 Label big_shift, done; 1763 1764 // This code can be optimized to use the 64 bit shifts in V9. 1765 // Here we use the 32 bit shifts. 1766 1767 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1768 subcc(Rcount, 31, Ralt_count); 1769 br(greater, true, pn, big_shift); 1770 delayed()->dec(Ralt_count); 1771 1772 // shift < 32 bits, Ralt_count = Rcount-31 1773 1774 // We get the transfer bits by shifting left by 32-count the high 1775 // register. This is done by shifting left by 31-count and then by one 1776 // more to take care of the special (rare) case where count is zero 1777 // (shifting by 32 would not work). 1778 1779 neg(Ralt_count); 1780 if (Rcount != Rout_low) { 1781 srl(Rin_low, Rcount, Rout_low); 1782 } 1783 1784 // The order of the next two instructions is critical in the case where 1785 // Rin and Rout are the same and should not be reversed. 1786 1787 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count 1788 sra(Rin_high, Rcount, Rout_high ); // high half 1789 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more 1790 if (Rcount == Rout_low) { 1791 srl(Rin_low, Rcount, Rout_low); 1792 } 1793 ba(done); 1794 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high 1795 1796 // shift >= 32 bits, Ralt_count = Rcount-32 1797 bind(big_shift); 1798 1799 sra(Rin_high, Ralt_count, Rout_low); 1800 sra(Rin_high, 31, Rout_high); // sign into hi 1801 1802 bind( done ); 1803 } 1804 1805 1806 1807 void MacroAssembler::lushr( Register Rin_high, Register Rin_low, 1808 Register Rcount, 1809 Register Rout_high, Register Rout_low, 1810 Register Rtemp ) { 1811 1812 Register Ralt_count = Rtemp; 1813 Register Rxfer_bits = Rtemp; 1814 1815 assert( Ralt_count != Rin_high 1816 && Ralt_count != Rin_low 1817 && Ralt_count != Rcount 1818 && Rxfer_bits != Rin_low 1819 && Rxfer_bits != Rin_high 1820 && Rxfer_bits != Rcount 1821 && Rxfer_bits != Rout_high 1822 && Rout_high != Rin_low, 1823 "register alias checks"); 1824 1825 Label big_shift, done; 1826 1827 // This code can be optimized to use the 64 bit shifts in V9. 1828 // Here we use the 32 bit shifts. 1829 1830 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1831 subcc(Rcount, 31, Ralt_count); 1832 br(greater, true, pn, big_shift); 1833 delayed()->dec(Ralt_count); 1834 1835 // shift < 32 bits, Ralt_count = Rcount-31 1836 1837 // We get the transfer bits by shifting left by 32-count the high 1838 // register. This is done by shifting left by 31-count and then by one 1839 // more to take care of the special (rare) case where count is zero 1840 // (shifting by 32 would not work). 1841 1842 neg(Ralt_count); 1843 if (Rcount != Rout_low) { 1844 srl(Rin_low, Rcount, Rout_low); 1845 } 1846 1847 // The order of the next two instructions is critical in the case where 1848 // Rin and Rout are the same and should not be reversed. 1849 1850 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count 1851 srl(Rin_high, Rcount, Rout_high ); // high half 1852 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more 1853 if (Rcount == Rout_low) { 1854 srl(Rin_low, Rcount, Rout_low); 1855 } 1856 ba(done); 1857 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high 1858 1859 // shift >= 32 bits, Ralt_count = Rcount-32 1860 bind(big_shift); 1861 1862 srl(Rin_high, Ralt_count, Rout_low); 1863 clr(Rout_high); 1864 1865 bind( done ); 1866 } 1867 1868 void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) { 1869 cmp(Ra, Rb); 1870 mov(-1, Rresult); 1871 movcc(equal, false, xcc, 0, Rresult); 1872 movcc(greater, false, xcc, 1, Rresult); 1873 } 1874 1875 1876 void MacroAssembler::load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed) { 1877 switch (size_in_bytes) { 1878 case 8: ld_long(src, dst); break; 1879 case 4: ld( src, dst); break; 1880 case 2: is_signed ? ldsh(src, dst) : lduh(src, dst); break; 1881 case 1: is_signed ? ldsb(src, dst) : ldub(src, dst); break; 1882 default: ShouldNotReachHere(); 1883 } 1884 } 1885 1886 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) { 1887 switch (size_in_bytes) { 1888 case 8: st_long(src, dst); break; 1889 case 4: st( src, dst); break; 1890 case 2: sth( src, dst); break; 1891 case 1: stb( src, dst); break; 1892 default: ShouldNotReachHere(); 1893 } 1894 } 1895 1896 1897 void MacroAssembler::float_cmp( bool is_float, int unordered_result, 1898 FloatRegister Fa, FloatRegister Fb, 1899 Register Rresult) { 1900 if (is_float) { 1901 fcmp(FloatRegisterImpl::S, fcc0, Fa, Fb); 1902 } else { 1903 fcmp(FloatRegisterImpl::D, fcc0, Fa, Fb); 1904 } 1905 1906 if (unordered_result == 1) { 1907 mov( -1, Rresult); 1908 movcc(f_equal, true, fcc0, 0, Rresult); 1909 movcc(f_unorderedOrGreater, true, fcc0, 1, Rresult); 1910 } else { 1911 mov( -1, Rresult); 1912 movcc(f_equal, true, fcc0, 0, Rresult); 1913 movcc(f_greater, true, fcc0, 1, Rresult); 1914 } 1915 } 1916 1917 1918 void MacroAssembler::save_all_globals_into_locals() { 1919 mov(G1,L1); 1920 mov(G2,L2); 1921 mov(G3,L3); 1922 mov(G4,L4); 1923 mov(G5,L5); 1924 mov(G6,L6); 1925 mov(G7,L7); 1926 } 1927 1928 void MacroAssembler::restore_globals_from_locals() { 1929 mov(L1,G1); 1930 mov(L2,G2); 1931 mov(L3,G3); 1932 mov(L4,G4); 1933 mov(L5,G5); 1934 mov(L6,G6); 1935 mov(L7,G7); 1936 } 1937 1938 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, 1939 Register tmp, 1940 int offset) { 1941 intptr_t value = *delayed_value_addr; 1942 if (value != 0) 1943 return RegisterOrConstant(value + offset); 1944 1945 // load indirectly to solve generation ordering problem 1946 AddressLiteral a(delayed_value_addr); 1947 load_ptr_contents(a, tmp); 1948 1949 #ifdef ASSERT 1950 tst(tmp); 1951 breakpoint_trap(zero, xcc); 1952 #endif 1953 1954 if (offset != 0) 1955 add(tmp, offset, tmp); 1956 1957 return RegisterOrConstant(tmp); 1958 } 1959 1960 1961 RegisterOrConstant MacroAssembler::regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 1962 assert(d.register_or_noreg() != G0, "lost side effect"); 1963 if ((s2.is_constant() && s2.as_constant() == 0) || 1964 (s2.is_register() && s2.as_register() == G0)) { 1965 // Do nothing, just move value. 1966 if (s1.is_register()) { 1967 if (d.is_constant()) d = temp; 1968 mov(s1.as_register(), d.as_register()); 1969 return d; 1970 } else { 1971 return s1; 1972 } 1973 } 1974 1975 if (s1.is_register()) { 1976 assert_different_registers(s1.as_register(), temp); 1977 if (d.is_constant()) d = temp; 1978 andn(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 1979 return d; 1980 } else { 1981 if (s2.is_register()) { 1982 assert_different_registers(s2.as_register(), temp); 1983 if (d.is_constant()) d = temp; 1984 set(s1.as_constant(), temp); 1985 andn(temp, s2.as_register(), d.as_register()); 1986 return d; 1987 } else { 1988 intptr_t res = s1.as_constant() & ~s2.as_constant(); 1989 return res; 1990 } 1991 } 1992 } 1993 1994 RegisterOrConstant MacroAssembler::regcon_inc_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 1995 assert(d.register_or_noreg() != G0, "lost side effect"); 1996 if ((s2.is_constant() && s2.as_constant() == 0) || 1997 (s2.is_register() && s2.as_register() == G0)) { 1998 // Do nothing, just move value. 1999 if (s1.is_register()) { 2000 if (d.is_constant()) d = temp; 2001 mov(s1.as_register(), d.as_register()); 2002 return d; 2003 } else { 2004 return s1; 2005 } 2006 } 2007 2008 if (s1.is_register()) { 2009 assert_different_registers(s1.as_register(), temp); 2010 if (d.is_constant()) d = temp; 2011 add(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 2012 return d; 2013 } else { 2014 if (s2.is_register()) { 2015 assert_different_registers(s2.as_register(), temp); 2016 if (d.is_constant()) d = temp; 2017 add(s2.as_register(), ensure_simm13_or_reg(s1, temp), d.as_register()); 2018 return d; 2019 } else { 2020 intptr_t res = s1.as_constant() + s2.as_constant(); 2021 return res; 2022 } 2023 } 2024 } 2025 2026 RegisterOrConstant MacroAssembler::regcon_sll_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 2027 assert(d.register_or_noreg() != G0, "lost side effect"); 2028 if (!is_simm13(s2.constant_or_zero())) 2029 s2 = (s2.as_constant() & 0xFF); 2030 if ((s2.is_constant() && s2.as_constant() == 0) || 2031 (s2.is_register() && s2.as_register() == G0)) { 2032 // Do nothing, just move value. 2033 if (s1.is_register()) { 2034 if (d.is_constant()) d = temp; 2035 mov(s1.as_register(), d.as_register()); 2036 return d; 2037 } else { 2038 return s1; 2039 } 2040 } 2041 2042 if (s1.is_register()) { 2043 assert_different_registers(s1.as_register(), temp); 2044 if (d.is_constant()) d = temp; 2045 sll_ptr(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 2046 return d; 2047 } else { 2048 if (s2.is_register()) { 2049 assert_different_registers(s2.as_register(), temp); 2050 if (d.is_constant()) d = temp; 2051 set(s1.as_constant(), temp); 2052 sll_ptr(temp, s2.as_register(), d.as_register()); 2053 return d; 2054 } else { 2055 intptr_t res = s1.as_constant() << s2.as_constant(); 2056 return res; 2057 } 2058 } 2059 } 2060 2061 2062 // Look up the method for a megamorphic invokeinterface call. 2063 // The target method is determined by <intf_klass, itable_index>. 2064 // The receiver klass is in recv_klass. 2065 // On success, the result will be in method_result, and execution falls through. 2066 // On failure, execution transfers to the given label. 2067 void MacroAssembler::lookup_interface_method(Register recv_klass, 2068 Register intf_klass, 2069 RegisterOrConstant itable_index, 2070 Register method_result, 2071 Register scan_temp, 2072 Register sethi_temp, 2073 Label& L_no_such_interface, 2074 bool return_method) { 2075 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp); 2076 assert(!return_method || itable_index.is_constant() || itable_index.as_register() == method_result, 2077 "caller must use same register for non-constant itable index as for method"); 2078 2079 Label L_no_such_interface_restore; 2080 bool did_save = false; 2081 if (scan_temp == noreg || sethi_temp == noreg) { 2082 Register recv_2 = recv_klass->is_global() ? recv_klass : L0; 2083 Register intf_2 = intf_klass->is_global() ? intf_klass : L1; 2084 assert(method_result->is_global(), "must be able to return value"); 2085 scan_temp = L2; 2086 sethi_temp = L3; 2087 save_frame_and_mov(0, recv_klass, recv_2, intf_klass, intf_2); 2088 recv_klass = recv_2; 2089 intf_klass = intf_2; 2090 did_save = true; 2091 } 2092 2093 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 2094 int vtable_base = in_bytes(Klass::vtable_start_offset()); 2095 int scan_step = itableOffsetEntry::size() * wordSize; 2096 int vte_size = vtableEntry::size_in_bytes(); 2097 2098 lduw(recv_klass, in_bytes(Klass::vtable_length_offset()), scan_temp); 2099 // %%% We should store the aligned, prescaled offset in the klassoop. 2100 // Then the next several instructions would fold away. 2101 2102 int itb_offset = vtable_base; 2103 int itb_scale = exact_log2(vtableEntry::size_in_bytes()); 2104 sll(scan_temp, itb_scale, scan_temp); 2105 add(scan_temp, itb_offset, scan_temp); 2106 add(recv_klass, scan_temp, scan_temp); 2107 2108 if (return_method) { 2109 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 2110 RegisterOrConstant itable_offset = itable_index; 2111 itable_offset = regcon_sll_ptr(itable_index, exact_log2(itableMethodEntry::size() * wordSize), itable_offset); 2112 itable_offset = regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes(), itable_offset); 2113 add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass); 2114 } 2115 2116 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) { 2117 // if (scan->interface() == intf) { 2118 // result = (klass + scan->offset() + itable_index); 2119 // } 2120 // } 2121 Label L_search, L_found_method; 2122 2123 for (int peel = 1; peel >= 0; peel--) { 2124 // %%%% Could load both offset and interface in one ldx, if they were 2125 // in the opposite order. This would save a load. 2126 ld_ptr(scan_temp, itableOffsetEntry::interface_offset_in_bytes(), method_result); 2127 2128 // Check that this entry is non-null. A null entry means that 2129 // the receiver class doesn't implement the interface, and wasn't the 2130 // same as when the caller was compiled. 2131 bpr(Assembler::rc_z, false, Assembler::pn, method_result, did_save ? L_no_such_interface_restore : L_no_such_interface); 2132 delayed()->cmp(method_result, intf_klass); 2133 2134 if (peel) { 2135 brx(Assembler::equal, false, Assembler::pt, L_found_method); 2136 } else { 2137 brx(Assembler::notEqual, false, Assembler::pn, L_search); 2138 // (invert the test to fall through to found_method...) 2139 } 2140 delayed()->add(scan_temp, scan_step, scan_temp); 2141 2142 if (!peel) break; 2143 2144 bind(L_search); 2145 } 2146 2147 bind(L_found_method); 2148 2149 if (return_method) { 2150 // Got a hit. 2151 int ito_offset = itableOffsetEntry::offset_offset_in_bytes(); 2152 // scan_temp[-scan_step] points to the vtable offset we need 2153 ito_offset -= scan_step; 2154 lduw(scan_temp, ito_offset, scan_temp); 2155 ld_ptr(recv_klass, scan_temp, method_result); 2156 } 2157 2158 if (did_save) { 2159 Label L_done; 2160 ba(L_done); 2161 delayed()->restore(); 2162 2163 bind(L_no_such_interface_restore); 2164 ba(L_no_such_interface); 2165 delayed()->restore(); 2166 2167 bind(L_done); 2168 } 2169 } 2170 2171 2172 // virtual method calling 2173 void MacroAssembler::lookup_virtual_method(Register recv_klass, 2174 RegisterOrConstant vtable_index, 2175 Register method_result) { 2176 assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg()); 2177 Register sethi_temp = method_result; 2178 const int base = in_bytes(Klass::vtable_start_offset()) + 2179 // method pointer offset within the vtable entry: 2180 vtableEntry::method_offset_in_bytes(); 2181 RegisterOrConstant vtable_offset = vtable_index; 2182 // Each of the following three lines potentially generates an instruction. 2183 // But the total number of address formation instructions will always be 2184 // at most two, and will often be zero. In any case, it will be optimal. 2185 // If vtable_index is a register, we will have (sll_ptr N,x; inc_ptr B,x; ld_ptr k,x). 2186 // If vtable_index is a constant, we will have at most (set B+X<<N,t; ld_ptr k,t). 2187 vtable_offset = regcon_sll_ptr(vtable_index, exact_log2(vtableEntry::size_in_bytes()), vtable_offset); 2188 vtable_offset = regcon_inc_ptr(vtable_offset, base, vtable_offset, sethi_temp); 2189 Address vtable_entry_addr(recv_klass, ensure_simm13_or_reg(vtable_offset, sethi_temp)); 2190 ld_ptr(vtable_entry_addr, method_result); 2191 } 2192 2193 2194 void MacroAssembler::check_klass_subtype(Register sub_klass, 2195 Register super_klass, 2196 Register temp_reg, 2197 Register temp2_reg, 2198 Label& L_success) { 2199 Register sub_2 = sub_klass; 2200 Register sup_2 = super_klass; 2201 if (!sub_2->is_global()) sub_2 = L0; 2202 if (!sup_2->is_global()) sup_2 = L1; 2203 bool did_save = false; 2204 if (temp_reg == noreg || temp2_reg == noreg) { 2205 temp_reg = L2; 2206 temp2_reg = L3; 2207 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); 2208 sub_klass = sub_2; 2209 super_klass = sup_2; 2210 did_save = true; 2211 } 2212 Label L_failure, L_pop_to_failure, L_pop_to_success; 2213 check_klass_subtype_fast_path(sub_klass, super_klass, 2214 temp_reg, temp2_reg, 2215 (did_save ? &L_pop_to_success : &L_success), 2216 (did_save ? &L_pop_to_failure : &L_failure), NULL); 2217 2218 if (!did_save) 2219 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); 2220 check_klass_subtype_slow_path(sub_2, sup_2, 2221 L2, L3, L4, L5, 2222 NULL, &L_pop_to_failure); 2223 2224 // on success: 2225 bind(L_pop_to_success); 2226 restore(); 2227 ba_short(L_success); 2228 2229 // on failure: 2230 bind(L_pop_to_failure); 2231 restore(); 2232 bind(L_failure); 2233 } 2234 2235 2236 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 2237 Register super_klass, 2238 Register temp_reg, 2239 Register temp2_reg, 2240 Label* L_success, 2241 Label* L_failure, 2242 Label* L_slow_path, 2243 RegisterOrConstant super_check_offset) { 2244 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 2245 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2246 2247 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 2248 bool need_slow_path = (must_load_sco || 2249 super_check_offset.constant_or_zero() == sco_offset); 2250 2251 assert_different_registers(sub_klass, super_klass, temp_reg); 2252 if (super_check_offset.is_register()) { 2253 assert_different_registers(sub_klass, super_klass, temp_reg, 2254 super_check_offset.as_register()); 2255 } else if (must_load_sco) { 2256 assert(temp2_reg != noreg, "supply either a temp or a register offset"); 2257 } 2258 2259 Label L_fallthrough; 2260 int label_nulls = 0; 2261 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 2262 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 2263 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } 2264 assert(label_nulls <= 1 || 2265 (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path), 2266 "at most one NULL in the batch, usually"); 2267 2268 // If the pointers are equal, we are done (e.g., String[] elements). 2269 // This self-check enables sharing of secondary supertype arrays among 2270 // non-primary types such as array-of-interface. Otherwise, each such 2271 // type would need its own customized SSA. 2272 // We move this check to the front of the fast path because many 2273 // type checks are in fact trivially successful in this manner, 2274 // so we get a nicely predicted branch right at the start of the check. 2275 cmp(super_klass, sub_klass); 2276 brx(Assembler::equal, false, Assembler::pn, *L_success); 2277 delayed()->nop(); 2278 2279 // Check the supertype display: 2280 if (must_load_sco) { 2281 // The super check offset is always positive... 2282 lduw(super_klass, sco_offset, temp2_reg); 2283 super_check_offset = RegisterOrConstant(temp2_reg); 2284 // super_check_offset is register. 2285 assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset.as_register()); 2286 } 2287 ld_ptr(sub_klass, super_check_offset, temp_reg); 2288 cmp(super_klass, temp_reg); 2289 2290 // This check has worked decisively for primary supers. 2291 // Secondary supers are sought in the super_cache ('super_cache_addr'). 2292 // (Secondary supers are interfaces and very deeply nested subtypes.) 2293 // This works in the same check above because of a tricky aliasing 2294 // between the super_cache and the primary super display elements. 2295 // (The 'super_check_addr' can address either, as the case requires.) 2296 // Note that the cache is updated below if it does not help us find 2297 // what we need immediately. 2298 // So if it was a primary super, we can just fail immediately. 2299 // Otherwise, it's the slow path for us (no success at this point). 2300 2301 // Hacked ba(), which may only be used just before L_fallthrough. 2302 #define FINAL_JUMP(label) \ 2303 if (&(label) != &L_fallthrough) { \ 2304 ba(label); delayed()->nop(); \ 2305 } 2306 2307 if (super_check_offset.is_register()) { 2308 brx(Assembler::equal, false, Assembler::pn, *L_success); 2309 delayed()->cmp(super_check_offset.as_register(), sc_offset); 2310 2311 if (L_failure == &L_fallthrough) { 2312 brx(Assembler::equal, false, Assembler::pt, *L_slow_path); 2313 delayed()->nop(); 2314 } else { 2315 brx(Assembler::notEqual, false, Assembler::pn, *L_failure); 2316 delayed()->nop(); 2317 FINAL_JUMP(*L_slow_path); 2318 } 2319 } else if (super_check_offset.as_constant() == sc_offset) { 2320 // Need a slow path; fast failure is impossible. 2321 if (L_slow_path == &L_fallthrough) { 2322 brx(Assembler::equal, false, Assembler::pt, *L_success); 2323 delayed()->nop(); 2324 } else { 2325 brx(Assembler::notEqual, false, Assembler::pn, *L_slow_path); 2326 delayed()->nop(); 2327 FINAL_JUMP(*L_success); 2328 } 2329 } else { 2330 // No slow path; it's a fast decision. 2331 if (L_failure == &L_fallthrough) { 2332 brx(Assembler::equal, false, Assembler::pt, *L_success); 2333 delayed()->nop(); 2334 } else { 2335 brx(Assembler::notEqual, false, Assembler::pn, *L_failure); 2336 delayed()->nop(); 2337 FINAL_JUMP(*L_success); 2338 } 2339 } 2340 2341 bind(L_fallthrough); 2342 2343 #undef FINAL_JUMP 2344 } 2345 2346 2347 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 2348 Register super_klass, 2349 Register count_temp, 2350 Register scan_temp, 2351 Register scratch_reg, 2352 Register coop_reg, 2353 Label* L_success, 2354 Label* L_failure) { 2355 assert_different_registers(sub_klass, super_klass, 2356 count_temp, scan_temp, scratch_reg, coop_reg); 2357 2358 Label L_fallthrough, L_loop; 2359 int label_nulls = 0; 2360 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 2361 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 2362 assert(label_nulls <= 1, "at most one NULL in the batch"); 2363 2364 // a couple of useful fields in sub_klass: 2365 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 2366 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 2367 2368 // Do a linear scan of the secondary super-klass chain. 2369 // This code is rarely used, so simplicity is a virtue here. 2370 2371 #ifndef PRODUCT 2372 int* pst_counter = &SharedRuntime::_partial_subtype_ctr; 2373 inc_counter((address) pst_counter, count_temp, scan_temp); 2374 #endif 2375 2376 // We will consult the secondary-super array. 2377 ld_ptr(sub_klass, ss_offset, scan_temp); 2378 2379 Register search_key = super_klass; 2380 2381 // Load the array length. (Positive movl does right thing on LP64.) 2382 lduw(scan_temp, Array<Klass*>::length_offset_in_bytes(), count_temp); 2383 2384 // Check for empty secondary super list 2385 tst(count_temp); 2386 2387 // In the array of super classes elements are pointer sized. 2388 int element_size = wordSize; 2389 2390 // Top of search loop 2391 bind(L_loop); 2392 br(Assembler::equal, false, Assembler::pn, *L_failure); 2393 delayed()->add(scan_temp, element_size, scan_temp); 2394 2395 // Skip the array header in all array accesses. 2396 int elem_offset = Array<Klass*>::base_offset_in_bytes(); 2397 elem_offset -= element_size; // the scan pointer was pre-incremented also 2398 2399 // Load next super to check 2400 ld_ptr( scan_temp, elem_offset, scratch_reg ); 2401 2402 // Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list 2403 cmp(scratch_reg, search_key); 2404 2405 // A miss means we are NOT a subtype and need to keep looping 2406 brx(Assembler::notEqual, false, Assembler::pn, L_loop); 2407 delayed()->deccc(count_temp); // decrement trip counter in delay slot 2408 2409 // Success. Cache the super we found and proceed in triumph. 2410 st_ptr(super_klass, sub_klass, sc_offset); 2411 2412 if (L_success != &L_fallthrough) { 2413 ba(*L_success); 2414 delayed()->nop(); 2415 } 2416 2417 bind(L_fallthrough); 2418 } 2419 2420 2421 RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot, 2422 Register temp_reg, 2423 int extra_slot_offset) { 2424 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 2425 int stackElementSize = Interpreter::stackElementSize; 2426 int offset = extra_slot_offset * stackElementSize; 2427 if (arg_slot.is_constant()) { 2428 offset += arg_slot.as_constant() * stackElementSize; 2429 return offset; 2430 } else { 2431 assert(temp_reg != noreg, "must specify"); 2432 sll_ptr(arg_slot.as_register(), exact_log2(stackElementSize), temp_reg); 2433 if (offset != 0) 2434 add(temp_reg, offset, temp_reg); 2435 return temp_reg; 2436 } 2437 } 2438 2439 2440 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 2441 Register temp_reg, 2442 int extra_slot_offset) { 2443 return Address(Gargs, argument_offset(arg_slot, temp_reg, extra_slot_offset)); 2444 } 2445 2446 2447 void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg, 2448 Register temp_reg, 2449 Label& done, Label* slow_case, 2450 BiasedLockingCounters* counters) { 2451 assert(UseBiasedLocking, "why call this otherwise?"); 2452 2453 if (PrintBiasedLockingStatistics) { 2454 assert_different_registers(obj_reg, mark_reg, temp_reg, O7); 2455 if (counters == NULL) 2456 counters = BiasedLocking::counters(); 2457 } 2458 2459 Label cas_label; 2460 2461 // Biased locking 2462 // See whether the lock is currently biased toward our thread and 2463 // whether the epoch is still valid 2464 // Note that the runtime guarantees sufficient alignment of JavaThread 2465 // pointers to allow age to be placed into low bits 2466 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); 2467 and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg); 2468 cmp_and_brx_short(temp_reg, markOopDesc::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label); 2469 2470 load_klass(obj_reg, temp_reg); 2471 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2472 or3(G2_thread, temp_reg, temp_reg); 2473 xor3(mark_reg, temp_reg, temp_reg); 2474 andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg); 2475 if (counters != NULL) { 2476 cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg); 2477 // Reload mark_reg as we may need it later 2478 ld_ptr(Address(obj_reg, oopDesc::mark_offset_in_bytes()), mark_reg); 2479 } 2480 brx(Assembler::equal, true, Assembler::pt, done); 2481 delayed()->nop(); 2482 2483 Label try_revoke_bias; 2484 Label try_rebias; 2485 Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes()); 2486 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2487 2488 // At this point we know that the header has the bias pattern and 2489 // that we are not the bias owner in the current epoch. We need to 2490 // figure out more details about the state of the header in order to 2491 // know what operations can be legally performed on the object's 2492 // header. 2493 2494 // If the low three bits in the xor result aren't clear, that means 2495 // the prototype header is no longer biased and we have to revoke 2496 // the bias on this object. 2497 btst(markOopDesc::biased_lock_mask_in_place, temp_reg); 2498 brx(Assembler::notZero, false, Assembler::pn, try_revoke_bias); 2499 2500 // Biasing is still enabled for this data type. See whether the 2501 // epoch of the current bias is still valid, meaning that the epoch 2502 // bits of the mark word are equal to the epoch bits of the 2503 // prototype header. (Note that the prototype header's epoch bits 2504 // only change at a safepoint.) If not, attempt to rebias the object 2505 // toward the current thread. Note that we must be absolutely sure 2506 // that the current epoch is invalid in order to do this because 2507 // otherwise the manipulations it performs on the mark word are 2508 // illegal. 2509 delayed()->btst(markOopDesc::epoch_mask_in_place, temp_reg); 2510 brx(Assembler::notZero, false, Assembler::pn, try_rebias); 2511 2512 // The epoch of the current bias is still valid but we know nothing 2513 // about the owner; it might be set or it might be clear. Try to 2514 // acquire the bias of the object using an atomic operation. If this 2515 // fails we will go in to the runtime to revoke the object's bias. 2516 // Note that we first construct the presumed unbiased header so we 2517 // don't accidentally blow away another thread's valid bias. 2518 delayed()->and3(mark_reg, 2519 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place, 2520 mark_reg); 2521 or3(G2_thread, mark_reg, temp_reg); 2522 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2523 // If the biasing toward our thread failed, this means that 2524 // another thread succeeded in biasing it toward itself and we 2525 // need to revoke that bias. The revocation will occur in the 2526 // interpreter runtime in the slow case. 2527 cmp(mark_reg, temp_reg); 2528 if (counters != NULL) { 2529 cond_inc(Assembler::zero, (address) counters->anonymously_biased_lock_entry_count_addr(), mark_reg, temp_reg); 2530 } 2531 if (slow_case != NULL) { 2532 brx(Assembler::notEqual, true, Assembler::pn, *slow_case); 2533 delayed()->nop(); 2534 } 2535 ba_short(done); 2536 2537 bind(try_rebias); 2538 // At this point we know the epoch has expired, meaning that the 2539 // current "bias owner", if any, is actually invalid. Under these 2540 // circumstances _only_, we are allowed to use the current header's 2541 // value as the comparison value when doing the cas to acquire the 2542 // bias in the current epoch. In other words, we allow transfer of 2543 // the bias from one thread to another directly in this situation. 2544 // 2545 // FIXME: due to a lack of registers we currently blow away the age 2546 // bits in this situation. Should attempt to preserve them. 2547 load_klass(obj_reg, temp_reg); 2548 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2549 or3(G2_thread, temp_reg, temp_reg); 2550 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2551 // If the biasing toward our thread failed, this means that 2552 // another thread succeeded in biasing it toward itself and we 2553 // need to revoke that bias. The revocation will occur in the 2554 // interpreter runtime in the slow case. 2555 cmp(mark_reg, temp_reg); 2556 if (counters != NULL) { 2557 cond_inc(Assembler::zero, (address) counters->rebiased_lock_entry_count_addr(), mark_reg, temp_reg); 2558 } 2559 if (slow_case != NULL) { 2560 brx(Assembler::notEqual, true, Assembler::pn, *slow_case); 2561 delayed()->nop(); 2562 } 2563 ba_short(done); 2564 2565 bind(try_revoke_bias); 2566 // The prototype mark in the klass doesn't have the bias bit set any 2567 // more, indicating that objects of this data type are not supposed 2568 // to be biased any more. We are going to try to reset the mark of 2569 // this object to the prototype value and fall through to the 2570 // CAS-based locking scheme. Note that if our CAS fails, it means 2571 // that another thread raced us for the privilege of revoking the 2572 // bias of this particular object, so it's okay to continue in the 2573 // normal locking code. 2574 // 2575 // FIXME: due to a lack of registers we currently blow away the age 2576 // bits in this situation. Should attempt to preserve them. 2577 load_klass(obj_reg, temp_reg); 2578 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2579 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2580 // Fall through to the normal CAS-based lock, because no matter what 2581 // the result of the above CAS, some thread must have succeeded in 2582 // removing the bias bit from the object's header. 2583 if (counters != NULL) { 2584 cmp(mark_reg, temp_reg); 2585 cond_inc(Assembler::zero, (address) counters->revoked_lock_entry_count_addr(), mark_reg, temp_reg); 2586 } 2587 2588 bind(cas_label); 2589 } 2590 2591 void MacroAssembler::biased_locking_exit (Address mark_addr, Register temp_reg, Label& done, 2592 bool allow_delay_slot_filling) { 2593 // Check for biased locking unlock case, which is a no-op 2594 // Note: we do not have to check the thread ID for two reasons. 2595 // First, the interpreter checks for IllegalMonitorStateException at 2596 // a higher level. Second, if the bias was revoked while we held the 2597 // lock, the object could not be rebiased toward another thread, so 2598 // the bias bit would be clear. 2599 ld_ptr(mark_addr, temp_reg); 2600 and3(temp_reg, markOopDesc::biased_lock_mask_in_place, temp_reg); 2601 cmp(temp_reg, markOopDesc::biased_lock_pattern); 2602 brx(Assembler::equal, allow_delay_slot_filling, Assembler::pt, done); 2603 delayed(); 2604 if (!allow_delay_slot_filling) { 2605 nop(); 2606 } 2607 } 2608 2609 2610 // compiler_lock_object() and compiler_unlock_object() are direct transliterations 2611 // of i486.ad fast_lock() and fast_unlock(). See those methods for detailed comments. 2612 // The code could be tightened up considerably. 2613 // 2614 // box->dhw disposition - post-conditions at DONE_LABEL. 2615 // - Successful inflated lock: box->dhw != 0. 2616 // Any non-zero value suffices. 2617 // Consider G2_thread, rsp, boxReg, or markOopDesc::unused_mark() 2618 // - Successful Stack-lock: box->dhw == mark. 2619 // box->dhw must contain the displaced mark word value 2620 // - Failure -- icc.ZFlag == 0 and box->dhw is undefined. 2621 // The slow-path fast_enter() and slow_enter() operators 2622 // are responsible for setting box->dhw = NonZero (typically markOopDesc::unused_mark()). 2623 // - Biased: box->dhw is undefined 2624 // 2625 // SPARC refworkload performance - specifically jetstream and scimark - are 2626 // extremely sensitive to the size of the code emitted by compiler_lock_object 2627 // and compiler_unlock_object. Critically, the key factor is code size, not path 2628 // length. (Simply experiments to pad CLO with unexecuted NOPs demonstrte the 2629 // effect). 2630 2631 2632 void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark, 2633 Register Rbox, Register Rscratch, 2634 BiasedLockingCounters* counters, 2635 bool try_bias) { 2636 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); 2637 2638 verify_oop(Roop); 2639 Label done ; 2640 2641 if (counters != NULL) { 2642 inc_counter((address) counters->total_entry_count_addr(), Rmark, Rscratch); 2643 } 2644 2645 if (EmitSync & 1) { 2646 mov(3, Rscratch); 2647 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2648 cmp(SP, G0); 2649 return ; 2650 } 2651 2652 if (EmitSync & 2) { 2653 2654 // Fetch object's markword 2655 ld_ptr(mark_addr, Rmark); 2656 2657 if (try_bias) { 2658 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 2659 } 2660 2661 // Save Rbox in Rscratch to be used for the cas operation 2662 mov(Rbox, Rscratch); 2663 2664 // set Rmark to markOop | markOopDesc::unlocked_value 2665 or3(Rmark, markOopDesc::unlocked_value, Rmark); 2666 2667 // Initialize the box. (Must happen before we update the object mark!) 2668 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2669 2670 // compare object markOop with Rmark and if equal exchange Rscratch with object markOop 2671 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2672 cas_ptr(mark_addr.base(), Rmark, Rscratch); 2673 2674 // if compare/exchange succeeded we found an unlocked object and we now have locked it 2675 // hence we are done 2676 cmp(Rmark, Rscratch); 2677 sub(Rscratch, STACK_BIAS, Rscratch); 2678 brx(Assembler::equal, false, Assembler::pt, done); 2679 delayed()->sub(Rscratch, SP, Rscratch); //pull next instruction into delay slot 2680 2681 // we did not find an unlocked object so see if this is a recursive case 2682 // sub(Rscratch, SP, Rscratch); 2683 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 2684 andcc(Rscratch, 0xfffff003, Rscratch); 2685 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2686 bind (done); 2687 return ; 2688 } 2689 2690 Label Egress ; 2691 2692 if (EmitSync & 256) { 2693 Label IsInflated ; 2694 2695 ld_ptr(mark_addr, Rmark); // fetch obj->mark 2696 // Triage: biased, stack-locked, neutral, inflated 2697 if (try_bias) { 2698 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 2699 // Invariant: if control reaches this point in the emitted stream 2700 // then Rmark has not been modified. 2701 } 2702 2703 // Store mark into displaced mark field in the on-stack basic-lock "box" 2704 // Critically, this must happen before the CAS 2705 // Maximize the ST-CAS distance to minimize the ST-before-CAS penalty. 2706 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2707 andcc(Rmark, 2, G0); 2708 brx(Assembler::notZero, false, Assembler::pn, IsInflated); 2709 delayed()-> 2710 2711 // Try stack-lock acquisition. 2712 // Beware: the 1st instruction is in a delay slot 2713 mov(Rbox, Rscratch); 2714 or3(Rmark, markOopDesc::unlocked_value, Rmark); 2715 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2716 cas_ptr(mark_addr.base(), Rmark, Rscratch); 2717 cmp(Rmark, Rscratch); 2718 brx(Assembler::equal, false, Assembler::pt, done); 2719 delayed()->sub(Rscratch, SP, Rscratch); 2720 2721 // Stack-lock attempt failed - check for recursive stack-lock. 2722 // See the comments below about how we might remove this case. 2723 sub(Rscratch, STACK_BIAS, Rscratch); 2724 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 2725 andcc(Rscratch, 0xfffff003, Rscratch); 2726 br(Assembler::always, false, Assembler::pt, done); 2727 delayed()-> st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2728 2729 bind(IsInflated); 2730 if (EmitSync & 64) { 2731 // If m->owner != null goto IsLocked 2732 // Pessimistic form: Test-and-CAS vs CAS 2733 // The optimistic form avoids RTS->RTO cache line upgrades. 2734 ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rscratch); 2735 andcc(Rscratch, Rscratch, G0); 2736 brx(Assembler::notZero, false, Assembler::pn, done); 2737 delayed()->nop(); 2738 // m->owner == null : it's unlocked. 2739 } 2740 2741 // Try to CAS m->owner from null to Self 2742 // Invariant: if we acquire the lock then _recursions should be 0. 2743 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2744 mov(G2_thread, Rscratch); 2745 cas_ptr(Rmark, G0, Rscratch); 2746 cmp(Rscratch, G0); 2747 // Intentional fall-through into done 2748 } else { 2749 // Aggressively avoid the Store-before-CAS penalty 2750 // Defer the store into box->dhw until after the CAS 2751 Label IsInflated, Recursive ; 2752 2753 // Anticipate CAS -- Avoid RTS->RTO upgrade 2754 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); 2755 2756 ld_ptr(mark_addr, Rmark); // fetch obj->mark 2757 // Triage: biased, stack-locked, neutral, inflated 2758 2759 if (try_bias) { 2760 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 2761 // Invariant: if control reaches this point in the emitted stream 2762 // then Rmark has not been modified. 2763 } 2764 andcc(Rmark, 2, G0); 2765 brx(Assembler::notZero, false, Assembler::pn, IsInflated); 2766 delayed()-> // Beware - dangling delay-slot 2767 2768 // Try stack-lock acquisition. 2769 // Transiently install BUSY (0) encoding in the mark word. 2770 // if the CAS of 0 into the mark was successful then we execute: 2771 // ST box->dhw = mark -- save fetched mark in on-stack basiclock box 2772 // ST obj->mark = box -- overwrite transient 0 value 2773 // This presumes TSO, of course. 2774 2775 mov(0, Rscratch); 2776 or3(Rmark, markOopDesc::unlocked_value, Rmark); 2777 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2778 cas_ptr(mark_addr.base(), Rmark, Rscratch); 2779 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); 2780 cmp(Rscratch, Rmark); 2781 brx(Assembler::notZero, false, Assembler::pn, Recursive); 2782 delayed()->st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2783 if (counters != NULL) { 2784 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); 2785 } 2786 ba(done); 2787 delayed()->st_ptr(Rbox, mark_addr); 2788 2789 bind(Recursive); 2790 // Stack-lock attempt failed - check for recursive stack-lock. 2791 // Tests show that we can remove the recursive case with no impact 2792 // on refworkload 0.83. If we need to reduce the size of the code 2793 // emitted by compiler_lock_object() the recursive case is perfect 2794 // candidate. 2795 // 2796 // A more extreme idea is to always inflate on stack-lock recursion. 2797 // This lets us eliminate the recursive checks in compiler_lock_object 2798 // and compiler_unlock_object and the (box->dhw == 0) encoding. 2799 // A brief experiment - requiring changes to synchronizer.cpp, interpreter, 2800 // and showed a performance *increase*. In the same experiment I eliminated 2801 // the fast-path stack-lock code from the interpreter and always passed 2802 // control to the "slow" operators in synchronizer.cpp. 2803 2804 // RScratch contains the fetched obj->mark value from the failed CAS. 2805 sub(Rscratch, STACK_BIAS, Rscratch); 2806 sub(Rscratch, SP, Rscratch); 2807 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 2808 andcc(Rscratch, 0xfffff003, Rscratch); 2809 if (counters != NULL) { 2810 // Accounting needs the Rscratch register 2811 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2812 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); 2813 ba_short(done); 2814 } else { 2815 ba(done); 2816 delayed()->st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2817 } 2818 2819 bind (IsInflated); 2820 2821 // Try to CAS m->owner from null to Self 2822 // Invariant: if we acquire the lock then _recursions should be 0. 2823 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2824 mov(G2_thread, Rscratch); 2825 cas_ptr(Rmark, G0, Rscratch); 2826 andcc(Rscratch, Rscratch, G0); // set ICCs for done: icc.zf iff success 2827 // set icc.zf : 1=success 0=failure 2828 // ST box->displaced_header = NonZero. 2829 // Any non-zero value suffices: 2830 // markOopDesc::unused_mark(), G2_thread, RBox, RScratch, rsp, etc. 2831 st_ptr(Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2832 // Intentional fall-through into done 2833 } 2834 2835 bind (done); 2836 } 2837 2838 void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark, 2839 Register Rbox, Register Rscratch, 2840 bool try_bias) { 2841 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); 2842 2843 Label done ; 2844 2845 if (EmitSync & 4) { 2846 cmp(SP, G0); 2847 return ; 2848 } 2849 2850 if (EmitSync & 8) { 2851 if (try_bias) { 2852 biased_locking_exit(mark_addr, Rscratch, done); 2853 } 2854 2855 // Test first if it is a fast recursive unlock 2856 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rmark); 2857 br_null_short(Rmark, Assembler::pt, done); 2858 2859 // Check if it is still a light weight lock, this is is true if we see 2860 // the stack address of the basicLock in the markOop of the object 2861 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2862 cas_ptr(mark_addr.base(), Rbox, Rmark); 2863 ba(done); 2864 delayed()->cmp(Rbox, Rmark); 2865 bind(done); 2866 return ; 2867 } 2868 2869 // Beware ... If the aggregate size of the code emitted by CLO and CUO is 2870 // is too large performance rolls abruptly off a cliff. 2871 // This could be related to inlining policies, code cache management, or 2872 // I$ effects. 2873 Label LStacked ; 2874 2875 if (try_bias) { 2876 // TODO: eliminate redundant LDs of obj->mark 2877 biased_locking_exit(mark_addr, Rscratch, done); 2878 } 2879 2880 ld_ptr(Roop, oopDesc::mark_offset_in_bytes(), Rmark); 2881 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rscratch); 2882 andcc(Rscratch, Rscratch, G0); 2883 brx(Assembler::zero, false, Assembler::pn, done); 2884 delayed()->nop(); // consider: relocate fetch of mark, above, into this DS 2885 andcc(Rmark, 2, G0); 2886 brx(Assembler::zero, false, Assembler::pt, LStacked); 2887 delayed()->nop(); 2888 2889 // It's inflated 2890 // Conceptually we need a #loadstore|#storestore "release" MEMBAR before 2891 // the ST of 0 into _owner which releases the lock. This prevents loads 2892 // and stores within the critical section from reordering (floating) 2893 // past the store that releases the lock. But TSO is a strong memory model 2894 // and that particular flavor of barrier is a noop, so we can safely elide it. 2895 // Note that we use 1-0 locking by default for the inflated case. We 2896 // close the resultant (and rare) race by having contended threads in 2897 // monitorenter periodically poll _owner. 2898 2899 if (EmitSync & 1024) { 2900 // Emit code to check that _owner == Self 2901 // We could fold the _owner test into subsequent code more efficiently 2902 // than using a stand-alone check, but since _owner checking is off by 2903 // default we don't bother. We also might consider predicating the 2904 // _owner==Self check on Xcheck:jni or running on a debug build. 2905 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), Rscratch); 2906 orcc(Rscratch, G0, G0); 2907 brx(Assembler::notZero, false, Assembler::pn, done); 2908 delayed()->nop(); 2909 } 2910 2911 if (EmitSync & 512) { 2912 // classic lock release code absent 1-0 locking 2913 // m->Owner = null; 2914 // membar #storeload 2915 // if (m->cxq|m->EntryList) == null goto Success 2916 // if (m->succ != null) goto Success 2917 // if CAS (&m->Owner,0,Self) != 0 goto Success 2918 // goto SlowPath 2919 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), Rbox); 2920 orcc(Rbox, G0, G0); 2921 brx(Assembler::notZero, false, Assembler::pn, done); 2922 delayed()->nop(); 2923 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2924 if (os::is_MP()) { membar(StoreLoad); } 2925 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)), Rscratch); 2926 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)), Rbox); 2927 orcc(Rbox, Rscratch, G0); 2928 brx(Assembler::zero, false, Assembler::pt, done); 2929 delayed()-> 2930 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch); 2931 andcc(Rscratch, Rscratch, G0); 2932 brx(Assembler::notZero, false, Assembler::pt, done); 2933 delayed()->andcc(G0, G0, G0); 2934 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2935 mov(G2_thread, Rscratch); 2936 cas_ptr(Rmark, G0, Rscratch); 2937 cmp(Rscratch, G0); 2938 // invert icc.zf and goto done 2939 brx(Assembler::notZero, false, Assembler::pt, done); 2940 delayed()->cmp(G0, G0); 2941 br(Assembler::always, false, Assembler::pt, done); 2942 delayed()->cmp(G0, 1); 2943 } else { 2944 // 1-0 form : avoids CAS and MEMBAR in the common case 2945 // Do not bother to ratify that m->Owner == Self. 2946 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), Rbox); 2947 orcc(Rbox, G0, G0); 2948 brx(Assembler::notZero, false, Assembler::pn, done); 2949 delayed()-> 2950 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)), Rscratch); 2951 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)), Rbox); 2952 orcc(Rbox, Rscratch, G0); 2953 if (EmitSync & 16384) { 2954 // As an optional optimization, if (EntryList|cxq) != null and _succ is null then 2955 // we should transfer control directly to the slow-path. 2956 // This test makes the reacquire operation below very infrequent. 2957 // The logic is equivalent to : 2958 // if (cxq|EntryList) == null : Owner=null; goto Success 2959 // if succ == null : goto SlowPath 2960 // Owner=null; membar #storeload 2961 // if succ != null : goto Success 2962 // if CAS(&Owner,null,Self) != null goto Success 2963 // goto SlowPath 2964 brx(Assembler::zero, true, Assembler::pt, done); 2965 delayed()-> 2966 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2967 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch); 2968 andcc(Rscratch, Rscratch, G0) ; 2969 brx(Assembler::zero, false, Assembler::pt, done); 2970 delayed()->orcc(G0, 1, G0); 2971 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2972 } else { 2973 brx(Assembler::zero, false, Assembler::pt, done); 2974 delayed()-> 2975 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2976 } 2977 if (os::is_MP()) { membar(StoreLoad); } 2978 // Check that _succ is (or remains) non-zero 2979 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch); 2980 andcc(Rscratch, Rscratch, G0); 2981 brx(Assembler::notZero, false, Assembler::pt, done); 2982 delayed()->andcc(G0, G0, G0); 2983 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2984 mov(G2_thread, Rscratch); 2985 cas_ptr(Rmark, G0, Rscratch); 2986 cmp(Rscratch, G0); 2987 // invert icc.zf and goto done 2988 // A slightly better v8+/v9 idiom would be the following: 2989 // movrnz Rscratch,1,Rscratch 2990 // ba done 2991 // xorcc Rscratch,1,G0 2992 // In v8+ mode the idiom would be valid IFF Rscratch was a G or O register 2993 brx(Assembler::notZero, false, Assembler::pt, done); 2994 delayed()->cmp(G0, G0); 2995 br(Assembler::always, false, Assembler::pt, done); 2996 delayed()->cmp(G0, 1); 2997 } 2998 2999 bind (LStacked); 3000 // Consider: we could replace the expensive CAS in the exit 3001 // path with a simple ST of the displaced mark value fetched from 3002 // the on-stack basiclock box. That admits a race where a thread T2 3003 // in the slow lock path -- inflating with monitor M -- could race a 3004 // thread T1 in the fast unlock path, resulting in a missed wakeup for T2. 3005 // More precisely T1 in the stack-lock unlock path could "stomp" the 3006 // inflated mark value M installed by T2, resulting in an orphan 3007 // object monitor M and T2 becoming stranded. We can remedy that situation 3008 // by having T2 periodically poll the object's mark word using timed wait 3009 // operations. If T2 discovers that a stomp has occurred it vacates 3010 // the monitor M and wakes any other threads stranded on the now-orphan M. 3011 // In addition the monitor scavenger, which performs deflation, 3012 // would also need to check for orpan monitors and stranded threads. 3013 // 3014 // Finally, inflation is also used when T2 needs to assign a hashCode 3015 // to O and O is stack-locked by T1. The "stomp" race could cause 3016 // an assigned hashCode value to be lost. We can avoid that condition 3017 // and provide the necessary hashCode stability invariants by ensuring 3018 // that hashCode generation is idempotent between copying GCs. 3019 // For example we could compute the hashCode of an object O as 3020 // O's heap address XOR some high quality RNG value that is refreshed 3021 // at GC-time. The monitor scavenger would install the hashCode 3022 // found in any orphan monitors. Again, the mechanism admits a 3023 // lost-update "stomp" WAW race but detects and recovers as needed. 3024 // 3025 // A prototype implementation showed excellent results, although 3026 // the scavenger and timeout code was rather involved. 3027 3028 cas_ptr(mark_addr.base(), Rbox, Rscratch); 3029 cmp(Rbox, Rscratch); 3030 // Intentional fall through into done ... 3031 3032 bind(done); 3033 } 3034 3035 3036 3037 void MacroAssembler::print_CPU_state() { 3038 // %%%%% need to implement this 3039 } 3040 3041 void MacroAssembler::verify_FPU(int stack_depth, const char* s) { 3042 // %%%%% need to implement this 3043 } 3044 3045 void MacroAssembler::push_IU_state() { 3046 // %%%%% need to implement this 3047 } 3048 3049 3050 void MacroAssembler::pop_IU_state() { 3051 // %%%%% need to implement this 3052 } 3053 3054 3055 void MacroAssembler::push_FPU_state() { 3056 // %%%%% need to implement this 3057 } 3058 3059 3060 void MacroAssembler::pop_FPU_state() { 3061 // %%%%% need to implement this 3062 } 3063 3064 3065 void MacroAssembler::push_CPU_state() { 3066 // %%%%% need to implement this 3067 } 3068 3069 3070 void MacroAssembler::pop_CPU_state() { 3071 // %%%%% need to implement this 3072 } 3073 3074 3075 3076 void MacroAssembler::verify_tlab() { 3077 #ifdef ASSERT 3078 if (UseTLAB && VerifyOops) { 3079 Label next, next2, ok; 3080 Register t1 = L0; 3081 Register t2 = L1; 3082 Register t3 = L2; 3083 3084 save_frame(0); 3085 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); 3086 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2); 3087 or3(t1, t2, t3); 3088 cmp_and_br_short(t1, t2, Assembler::greaterEqual, Assembler::pn, next); 3089 STOP("assert(top >= start)"); 3090 should_not_reach_here(); 3091 3092 bind(next); 3093 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); 3094 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2); 3095 or3(t3, t2, t3); 3096 cmp_and_br_short(t1, t2, Assembler::lessEqual, Assembler::pn, next2); 3097 STOP("assert(top <= end)"); 3098 should_not_reach_here(); 3099 3100 bind(next2); 3101 and3(t3, MinObjAlignmentInBytesMask, t3); 3102 cmp_and_br_short(t3, 0, Assembler::lessEqual, Assembler::pn, ok); 3103 STOP("assert(aligned)"); 3104 should_not_reach_here(); 3105 3106 bind(ok); 3107 restore(); 3108 } 3109 #endif 3110 } 3111 3112 3113 void MacroAssembler::eden_allocate( 3114 Register obj, // result: pointer to object after successful allocation 3115 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 3116 int con_size_in_bytes, // object size in bytes if known at compile time 3117 Register t1, // temp register 3118 Register t2, // temp register 3119 Label& slow_case // continuation point if fast allocation fails 3120 ){ 3121 // make sure arguments make sense 3122 assert_different_registers(obj, var_size_in_bytes, t1, t2); 3123 assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size"); 3124 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); 3125 3126 if (!Universe::heap()->supports_inline_contig_alloc()) { 3127 // No allocation in the shared eden. 3128 ba(slow_case); 3129 delayed()->nop(); 3130 } else { 3131 // get eden boundaries 3132 // note: we need both top & top_addr! 3133 const Register top_addr = t1; 3134 const Register end = t2; 3135 3136 CollectedHeap* ch = Universe::heap(); 3137 set((intx)ch->top_addr(), top_addr); 3138 intx delta = (intx)ch->end_addr() - (intx)ch->top_addr(); 3139 ld_ptr(top_addr, delta, end); 3140 ld_ptr(top_addr, 0, obj); 3141 3142 // try to allocate 3143 Label retry; 3144 bind(retry); 3145 #ifdef ASSERT 3146 // make sure eden top is properly aligned 3147 { 3148 Label L; 3149 btst(MinObjAlignmentInBytesMask, obj); 3150 br(Assembler::zero, false, Assembler::pt, L); 3151 delayed()->nop(); 3152 STOP("eden top is not properly aligned"); 3153 bind(L); 3154 } 3155 #endif // ASSERT 3156 const Register free = end; 3157 sub(end, obj, free); // compute amount of free space 3158 if (var_size_in_bytes->is_valid()) { 3159 // size is unknown at compile time 3160 cmp(free, var_size_in_bytes); 3161 brx(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case 3162 delayed()->add(obj, var_size_in_bytes, end); 3163 } else { 3164 // size is known at compile time 3165 cmp(free, con_size_in_bytes); 3166 brx(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case 3167 delayed()->add(obj, con_size_in_bytes, end); 3168 } 3169 // Compare obj with the value at top_addr; if still equal, swap the value of 3170 // end with the value at top_addr. If not equal, read the value at top_addr 3171 // into end. 3172 cas_ptr(top_addr, obj, end); 3173 // if someone beat us on the allocation, try again, otherwise continue 3174 cmp(obj, end); 3175 brx(Assembler::notEqual, false, Assembler::pn, retry); 3176 delayed()->mov(end, obj); // nop if successfull since obj == end 3177 3178 #ifdef ASSERT 3179 // make sure eden top is properly aligned 3180 { 3181 Label L; 3182 const Register top_addr = t1; 3183 3184 set((intx)ch->top_addr(), top_addr); 3185 ld_ptr(top_addr, 0, top_addr); 3186 btst(MinObjAlignmentInBytesMask, top_addr); 3187 br(Assembler::zero, false, Assembler::pt, L); 3188 delayed()->nop(); 3189 STOP("eden top is not properly aligned"); 3190 bind(L); 3191 } 3192 #endif // ASSERT 3193 } 3194 } 3195 3196 3197 void MacroAssembler::tlab_allocate( 3198 Register obj, // result: pointer to object after successful allocation 3199 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 3200 int con_size_in_bytes, // object size in bytes if known at compile time 3201 Register t1, // temp register 3202 Label& slow_case // continuation point if fast allocation fails 3203 ){ 3204 // make sure arguments make sense 3205 assert_different_registers(obj, var_size_in_bytes, t1); 3206 assert(0 <= con_size_in_bytes && is_simm13(con_size_in_bytes), "illegal object size"); 3207 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); 3208 3209 const Register free = t1; 3210 3211 verify_tlab(); 3212 3213 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), obj); 3214 3215 // calculate amount of free space 3216 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), free); 3217 sub(free, obj, free); 3218 3219 Label done; 3220 if (var_size_in_bytes == noreg) { 3221 cmp(free, con_size_in_bytes); 3222 } else { 3223 cmp(free, var_size_in_bytes); 3224 } 3225 br(Assembler::less, false, Assembler::pn, slow_case); 3226 // calculate the new top pointer 3227 if (var_size_in_bytes == noreg) { 3228 delayed()->add(obj, con_size_in_bytes, free); 3229 } else { 3230 delayed()->add(obj, var_size_in_bytes, free); 3231 } 3232 3233 bind(done); 3234 3235 #ifdef ASSERT 3236 // make sure new free pointer is properly aligned 3237 { 3238 Label L; 3239 btst(MinObjAlignmentInBytesMask, free); 3240 br(Assembler::zero, false, Assembler::pt, L); 3241 delayed()->nop(); 3242 STOP("updated TLAB free is not properly aligned"); 3243 bind(L); 3244 } 3245 #endif // ASSERT 3246 3247 // update the tlab top pointer 3248 st_ptr(free, G2_thread, in_bytes(JavaThread::tlab_top_offset())); 3249 verify_tlab(); 3250 } 3251 3252 void MacroAssembler::zero_memory(Register base, Register index) { 3253 assert_different_registers(base, index); 3254 Label loop; 3255 bind(loop); 3256 subcc(index, HeapWordSize, index); 3257 brx(Assembler::greaterEqual, true, Assembler::pt, loop); 3258 delayed()->st_ptr(G0, base, index); 3259 } 3260 3261 void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes, 3262 Register t1, Register t2) { 3263 // Bump total bytes allocated by this thread 3264 assert(t1->is_global(), "must be global reg"); // so all 64 bits are saved on a context switch 3265 assert_different_registers(size_in_bytes.register_or_noreg(), t1, t2); 3266 // v8 support has gone the way of the dodo 3267 ldx(G2_thread, in_bytes(JavaThread::allocated_bytes_offset()), t1); 3268 add(t1, ensure_simm13_or_reg(size_in_bytes, t2), t1); 3269 stx(t1, G2_thread, in_bytes(JavaThread::allocated_bytes_offset())); 3270 } 3271 3272 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { 3273 switch (cond) { 3274 // Note some conditions are synonyms for others 3275 case Assembler::never: return Assembler::always; 3276 case Assembler::zero: return Assembler::notZero; 3277 case Assembler::lessEqual: return Assembler::greater; 3278 case Assembler::less: return Assembler::greaterEqual; 3279 case Assembler::lessEqualUnsigned: return Assembler::greaterUnsigned; 3280 case Assembler::lessUnsigned: return Assembler::greaterEqualUnsigned; 3281 case Assembler::negative: return Assembler::positive; 3282 case Assembler::overflowSet: return Assembler::overflowClear; 3283 case Assembler::always: return Assembler::never; 3284 case Assembler::notZero: return Assembler::zero; 3285 case Assembler::greater: return Assembler::lessEqual; 3286 case Assembler::greaterEqual: return Assembler::less; 3287 case Assembler::greaterUnsigned: return Assembler::lessEqualUnsigned; 3288 case Assembler::greaterEqualUnsigned: return Assembler::lessUnsigned; 3289 case Assembler::positive: return Assembler::negative; 3290 case Assembler::overflowClear: return Assembler::overflowSet; 3291 } 3292 3293 ShouldNotReachHere(); return Assembler::overflowClear; 3294 } 3295 3296 void MacroAssembler::cond_inc(Assembler::Condition cond, address counter_ptr, 3297 Register Rtmp1, Register Rtmp2 /*, Register Rtmp3, Register Rtmp4 */) { 3298 Condition negated_cond = negate_condition(cond); 3299 Label L; 3300 brx(negated_cond, false, Assembler::pt, L); 3301 delayed()->nop(); 3302 inc_counter(counter_ptr, Rtmp1, Rtmp2); 3303 bind(L); 3304 } 3305 3306 void MacroAssembler::inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2) { 3307 AddressLiteral addrlit(counter_addr); 3308 sethi(addrlit, Rtmp1); // Move hi22 bits into temporary register. 3309 Address addr(Rtmp1, addrlit.low10()); // Build an address with low10 bits. 3310 ld(addr, Rtmp2); 3311 inc(Rtmp2); 3312 st(Rtmp2, addr); 3313 } 3314 3315 void MacroAssembler::inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2) { 3316 inc_counter((address) counter_addr, Rtmp1, Rtmp2); 3317 } 3318 3319 SkipIfEqual::SkipIfEqual( 3320 MacroAssembler* masm, Register temp, const bool* flag_addr, 3321 Assembler::Condition condition) { 3322 _masm = masm; 3323 AddressLiteral flag(flag_addr); 3324 _masm->sethi(flag, temp); 3325 _masm->ldub(temp, flag.low10(), temp); 3326 _masm->tst(temp); 3327 _masm->br(condition, false, Assembler::pt, _label); 3328 _masm->delayed()->nop(); 3329 } 3330 3331 SkipIfEqual::~SkipIfEqual() { 3332 _masm->bind(_label); 3333 } 3334 3335 3336 // Writes to stack successive pages until offset reached to check for 3337 // stack overflow + shadow pages. This clobbers tsp and scratch. 3338 void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp, 3339 Register Rscratch) { 3340 // Use stack pointer in temp stack pointer 3341 mov(SP, Rtsp); 3342 3343 // Bang stack for total size given plus stack shadow page size. 3344 // Bang one page at a time because a large size can overflow yellow and 3345 // red zones (the bang will fail but stack overflow handling can't tell that 3346 // it was a stack overflow bang vs a regular segv). 3347 int offset = os::vm_page_size(); 3348 Register Roffset = Rscratch; 3349 3350 Label loop; 3351 bind(loop); 3352 set((-offset)+STACK_BIAS, Rscratch); 3353 st(G0, Rtsp, Rscratch); 3354 set(offset, Roffset); 3355 sub(Rsize, Roffset, Rsize); 3356 cmp(Rsize, G0); 3357 br(Assembler::greater, false, Assembler::pn, loop); 3358 delayed()->sub(Rtsp, Roffset, Rtsp); 3359 3360 // Bang down shadow pages too. 3361 // At this point, (tmp-0) is the last address touched, so don't 3362 // touch it again. (It was touched as (tmp-pagesize) but then tmp 3363 // was post-decremented.) Skip this address by starting at i=1, and 3364 // touch a few more pages below. N.B. It is important to touch all 3365 // the way down to and including i=StackShadowPages. 3366 for (int i = 1; i < JavaThread::stack_shadow_zone_size() / os::vm_page_size(); i++) { 3367 set((-i*offset)+STACK_BIAS, Rscratch); 3368 st(G0, Rtsp, Rscratch); 3369 } 3370 } 3371 3372 void MacroAssembler::reserved_stack_check() { 3373 // testing if reserved zone needs to be enabled 3374 Label no_reserved_zone_enabling; 3375 3376 ld_ptr(G2_thread, JavaThread::reserved_stack_activation_offset(), G4_scratch); 3377 cmp_and_brx_short(SP, G4_scratch, Assembler::lessUnsigned, Assembler::pt, no_reserved_zone_enabling); 3378 3379 call_VM_leaf(L0, CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), G2_thread); 3380 3381 AddressLiteral stub(StubRoutines::throw_delayed_StackOverflowError_entry()); 3382 jump_to(stub, G4_scratch); 3383 delayed()->restore(); 3384 3385 should_not_reach_here(); 3386 3387 bind(no_reserved_zone_enabling); 3388 } 3389 3390 /////////////////////////////////////////////////////////////////////////////////// 3391 #if INCLUDE_ALL_GCS 3392 3393 static address satb_log_enqueue_with_frame = NULL; 3394 static u_char* satb_log_enqueue_with_frame_end = NULL; 3395 3396 static address satb_log_enqueue_frameless = NULL; 3397 static u_char* satb_log_enqueue_frameless_end = NULL; 3398 3399 static int EnqueueCodeSize = 128 DEBUG_ONLY( + 256); // Instructions? 3400 3401 static void generate_satb_log_enqueue(bool with_frame) { 3402 BufferBlob* bb = BufferBlob::create("enqueue_with_frame", EnqueueCodeSize); 3403 CodeBuffer buf(bb); 3404 MacroAssembler masm(&buf); 3405 3406 #define __ masm. 3407 3408 address start = __ pc(); 3409 Register pre_val; 3410 3411 Label refill, restart; 3412 if (with_frame) { 3413 __ save_frame(0); 3414 pre_val = I0; // Was O0 before the save. 3415 } else { 3416 pre_val = O0; 3417 } 3418 3419 int satb_q_index_byte_offset = 3420 in_bytes(JavaThread::satb_mark_queue_offset() + 3421 SATBMarkQueue::byte_offset_of_index()); 3422 3423 int satb_q_buf_byte_offset = 3424 in_bytes(JavaThread::satb_mark_queue_offset() + 3425 SATBMarkQueue::byte_offset_of_buf()); 3426 3427 assert(in_bytes(SATBMarkQueue::byte_width_of_index()) == sizeof(intptr_t) && 3428 in_bytes(SATBMarkQueue::byte_width_of_buf()) == sizeof(intptr_t), 3429 "check sizes in assembly below"); 3430 3431 __ bind(restart); 3432 3433 // Load the index into the SATB buffer. SATBMarkQueue::_index is a size_t 3434 // so ld_ptr is appropriate. 3435 __ ld_ptr(G2_thread, satb_q_index_byte_offset, L0); 3436 3437 // index == 0? 3438 __ cmp_and_brx_short(L0, G0, Assembler::equal, Assembler::pn, refill); 3439 3440 __ ld_ptr(G2_thread, satb_q_buf_byte_offset, L1); 3441 __ sub(L0, oopSize, L0); 3442 3443 __ st_ptr(pre_val, L1, L0); // [_buf + index] := I0 3444 if (!with_frame) { 3445 // Use return-from-leaf 3446 __ retl(); 3447 __ delayed()->st_ptr(L0, G2_thread, satb_q_index_byte_offset); 3448 } else { 3449 // Not delayed. 3450 __ st_ptr(L0, G2_thread, satb_q_index_byte_offset); 3451 } 3452 if (with_frame) { 3453 __ ret(); 3454 __ delayed()->restore(); 3455 } 3456 __ bind(refill); 3457 3458 address handle_zero = 3459 CAST_FROM_FN_PTR(address, 3460 &SATBMarkQueueSet::handle_zero_index_for_thread); 3461 // This should be rare enough that we can afford to save all the 3462 // scratch registers that the calling context might be using. 3463 __ mov(G1_scratch, L0); 3464 __ mov(G3_scratch, L1); 3465 __ mov(G4, L2); 3466 // We need the value of O0 above (for the write into the buffer), so we 3467 // save and restore it. 3468 __ mov(O0, L3); 3469 // Since the call will overwrite O7, we save and restore that, as well. 3470 __ mov(O7, L4); 3471 __ call_VM_leaf(L5, handle_zero, G2_thread); 3472 __ mov(L0, G1_scratch); 3473 __ mov(L1, G3_scratch); 3474 __ mov(L2, G4); 3475 __ mov(L3, O0); 3476 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); 3477 __ delayed()->mov(L4, O7); 3478 3479 if (with_frame) { 3480 satb_log_enqueue_with_frame = start; 3481 satb_log_enqueue_with_frame_end = __ pc(); 3482 } else { 3483 satb_log_enqueue_frameless = start; 3484 satb_log_enqueue_frameless_end = __ pc(); 3485 } 3486 3487 #undef __ 3488 } 3489 3490 void MacroAssembler::g1_write_barrier_pre(Register obj, 3491 Register index, 3492 int offset, 3493 Register pre_val, 3494 Register tmp, 3495 bool preserve_o_regs) { 3496 Label filtered; 3497 3498 if (obj == noreg) { 3499 // We are not loading the previous value so make 3500 // sure that we don't trash the value in pre_val 3501 // with the code below. 3502 assert_different_registers(pre_val, tmp); 3503 } else { 3504 // We will be loading the previous value 3505 // in this code so... 3506 assert(offset == 0 || index == noreg, "choose one"); 3507 assert(pre_val == noreg, "check this code"); 3508 } 3509 3510 // Is marking active? 3511 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 3512 ld(G2, 3513 in_bytes(JavaThread::satb_mark_queue_offset() + 3514 SATBMarkQueue::byte_offset_of_active()), 3515 tmp); 3516 } else { 3517 guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, 3518 "Assumption"); 3519 ldsb(G2, 3520 in_bytes(JavaThread::satb_mark_queue_offset() + 3521 SATBMarkQueue::byte_offset_of_active()), 3522 tmp); 3523 } 3524 3525 // Is marking active? 3526 cmp_and_br_short(tmp, G0, Assembler::equal, Assembler::pt, filtered); 3527 3528 // Do we need to load the previous value? 3529 if (obj != noreg) { 3530 // Load the previous value... 3531 if (index == noreg) { 3532 if (Assembler::is_simm13(offset)) { 3533 load_heap_oop(obj, offset, tmp); 3534 } else { 3535 set(offset, tmp); 3536 load_heap_oop(obj, tmp, tmp); 3537 } 3538 } else { 3539 load_heap_oop(obj, index, tmp); 3540 } 3541 // Previous value has been loaded into tmp 3542 pre_val = tmp; 3543 } 3544 3545 assert(pre_val != noreg, "must have a real register"); 3546 3547 // Is the previous value null? 3548 cmp_and_brx_short(pre_val, G0, Assembler::equal, Assembler::pt, filtered); 3549 3550 // OK, it's not filtered, so we'll need to call enqueue. In the normal 3551 // case, pre_val will be a scratch G-reg, but there are some cases in 3552 // which it's an O-reg. In the first case, do a normal call. In the 3553 // latter, do a save here and call the frameless version. 3554 3555 guarantee(pre_val->is_global() || pre_val->is_out(), 3556 "Or we need to think harder."); 3557 3558 if (pre_val->is_global() && !preserve_o_regs) { 3559 call(satb_log_enqueue_with_frame); 3560 delayed()->mov(pre_val, O0); 3561 } else { 3562 save_frame(0); 3563 call(satb_log_enqueue_frameless); 3564 delayed()->mov(pre_val->after_save(), O0); 3565 restore(); 3566 } 3567 3568 bind(filtered); 3569 } 3570 3571 static address dirty_card_log_enqueue = 0; 3572 static u_char* dirty_card_log_enqueue_end = 0; 3573 3574 // This gets to assume that o0 contains the object address. 3575 static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) { 3576 BufferBlob* bb = BufferBlob::create("dirty_card_enqueue", EnqueueCodeSize*2); 3577 CodeBuffer buf(bb); 3578 MacroAssembler masm(&buf); 3579 #define __ masm. 3580 address start = __ pc(); 3581 3582 Label not_already_dirty, restart, refill, young_card; 3583 3584 __ srlx(O0, CardTable::card_shift, O0); 3585 AddressLiteral addrlit(byte_map_base); 3586 __ set(addrlit, O1); // O1 := <card table base> 3587 __ ldub(O0, O1, O2); // O2 := [O0 + O1] 3588 3589 __ cmp_and_br_short(O2, G1CardTable::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card); 3590 3591 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 3592 __ ldub(O0, O1, O2); // O2 := [O0 + O1] 3593 3594 assert(CardTable::dirty_card_val() == 0, "otherwise check this code"); 3595 __ cmp_and_br_short(O2, G0, Assembler::notEqual, Assembler::pt, not_already_dirty); 3596 3597 __ bind(young_card); 3598 // We didn't take the branch, so we're already dirty: return. 3599 // Use return-from-leaf 3600 __ retl(); 3601 __ delayed()->nop(); 3602 3603 // Not dirty. 3604 __ bind(not_already_dirty); 3605 3606 // Get O0 + O1 into a reg by itself 3607 __ add(O0, O1, O3); 3608 3609 // First, dirty it. 3610 __ stb(G0, O3, G0); // [cardPtr] := 0 (i.e., dirty). 3611 3612 int dirty_card_q_index_byte_offset = 3613 in_bytes(JavaThread::dirty_card_queue_offset() + 3614 DirtyCardQueue::byte_offset_of_index()); 3615 int dirty_card_q_buf_byte_offset = 3616 in_bytes(JavaThread::dirty_card_queue_offset() + 3617 DirtyCardQueue::byte_offset_of_buf()); 3618 __ bind(restart); 3619 3620 // Load the index into the update buffer. DirtyCardQueue::_index is 3621 // a size_t so ld_ptr is appropriate here. 3622 __ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, L0); 3623 3624 // index == 0? 3625 __ cmp_and_brx_short(L0, G0, Assembler::equal, Assembler::pn, refill); 3626 3627 __ ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, L1); 3628 __ sub(L0, oopSize, L0); 3629 3630 __ st_ptr(O3, L1, L0); // [_buf + index] := I0 3631 // Use return-from-leaf 3632 __ retl(); 3633 __ delayed()->st_ptr(L0, G2_thread, dirty_card_q_index_byte_offset); 3634 3635 __ bind(refill); 3636 address handle_zero = 3637 CAST_FROM_FN_PTR(address, 3638 &DirtyCardQueueSet::handle_zero_index_for_thread); 3639 // This should be rare enough that we can afford to save all the 3640 // scratch registers that the calling context might be using. 3641 __ mov(G1_scratch, L3); 3642 __ mov(G3_scratch, L5); 3643 // We need the value of O3 above (for the write into the buffer), so we 3644 // save and restore it. 3645 __ mov(O3, L6); 3646 // Since the call will overwrite O7, we save and restore that, as well. 3647 __ mov(O7, L4); 3648 3649 __ call_VM_leaf(L7_thread_cache, handle_zero, G2_thread); 3650 __ mov(L3, G1_scratch); 3651 __ mov(L5, G3_scratch); 3652 __ mov(L6, O3); 3653 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); 3654 __ delayed()->mov(L4, O7); 3655 3656 dirty_card_log_enqueue = start; 3657 dirty_card_log_enqueue_end = __ pc(); 3658 // XXX Should have a guarantee here about not going off the end! 3659 // Does it already do so? Do an experiment... 3660 3661 #undef __ 3662 3663 } 3664 3665 void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val, Register tmp) { 3666 3667 Label filtered; 3668 MacroAssembler* post_filter_masm = this; 3669 3670 if (new_val == G0) return; 3671 3672 G1BarrierSet* bs = 3673 barrier_set_cast<G1BarrierSet>(Universe::heap()->barrier_set()); 3674 CardTable* ct = bs->card_table(); 3675 3676 if (G1RSBarrierRegionFilter) { 3677 xor3(store_addr, new_val, tmp); 3678 srlx(tmp, HeapRegion::LogOfHRGrainBytes, tmp); 3679 3680 // XXX Should I predict this taken or not? Does it matter? 3681 cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pt, filtered); 3682 } 3683 3684 // If the "store_addr" register is an "in" or "local" register, move it to 3685 // a scratch reg so we can pass it as an argument. 3686 bool use_scr = !(store_addr->is_global() || store_addr->is_out()); 3687 // Pick a scratch register different from "tmp". 3688 Register scr = (tmp == G1_scratch ? G3_scratch : G1_scratch); 3689 // Make sure we use up the delay slot! 3690 if (use_scr) { 3691 post_filter_masm->mov(store_addr, scr); 3692 } else { 3693 post_filter_masm->nop(); 3694 } 3695 save_frame(0); 3696 call(dirty_card_log_enqueue); 3697 if (use_scr) { 3698 delayed()->mov(scr, O0); 3699 } else { 3700 delayed()->mov(store_addr->after_save(), O0); 3701 } 3702 restore(); 3703 3704 bind(filtered); 3705 } 3706 3707 // Called from init_globals() after universe_init() and before interpreter_init() 3708 void g1_barrier_stubs_init() { 3709 CollectedHeap* heap = Universe::heap(); 3710 if (heap->kind() == CollectedHeap::G1) { 3711 // Only needed for G1 3712 if (dirty_card_log_enqueue == 0) { 3713 G1BarrierSet* bs = 3714 barrier_set_cast<G1BarrierSet>(heap->barrier_set()); 3715 CardTable *ct = bs->card_table(); 3716 generate_dirty_card_log_enqueue(ct->byte_map_base()); 3717 assert(dirty_card_log_enqueue != 0, "postcondition."); 3718 } 3719 if (satb_log_enqueue_with_frame == 0) { 3720 generate_satb_log_enqueue(true); 3721 assert(satb_log_enqueue_with_frame != 0, "postcondition."); 3722 } 3723 if (satb_log_enqueue_frameless == 0) { 3724 generate_satb_log_enqueue(false); 3725 assert(satb_log_enqueue_frameless != 0, "postcondition."); 3726 } 3727 } 3728 } 3729 3730 #endif // INCLUDE_ALL_GCS 3731 /////////////////////////////////////////////////////////////////////////////////// 3732 3733 void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_val, Register tmp) { 3734 // If we're writing constant NULL, we can skip the write barrier. 3735 if (new_val == G0) return; 3736 CardTableBarrierSet* bs = 3737 barrier_set_cast<CardTableBarrierSet>(Universe::heap()->barrier_set()); 3738 CardTable* ct = bs->card_table(); 3739 3740 assert(bs->kind() == BarrierSet::CardTableBarrierSet, "wrong barrier"); 3741 card_table_write(ct->byte_map_base(), tmp, store_addr); 3742 } 3743 3744 // ((OopHandle)result).resolve(); 3745 void MacroAssembler::resolve_oop_handle(Register result) { 3746 // OopHandle::resolve is an indirection. 3747 ld_ptr(result, 0, result); 3748 } 3749 3750 void MacroAssembler::load_mirror(Register mirror, Register method) { 3751 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 3752 ld_ptr(method, in_bytes(Method::const_offset()), mirror); 3753 ld_ptr(mirror, in_bytes(ConstMethod::constants_offset()), mirror); 3754 ld_ptr(mirror, ConstantPool::pool_holder_offset_in_bytes(), mirror); 3755 ld_ptr(mirror, mirror_offset, mirror); 3756 resolve_oop_handle(mirror); 3757 } 3758 3759 void MacroAssembler::load_klass(Register src_oop, Register klass) { 3760 // The number of bytes in this code is used by 3761 // MachCallDynamicJavaNode::ret_addr_offset() 3762 // if this changes, change that. 3763 if (UseCompressedClassPointers) { 3764 lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass); 3765 decode_klass_not_null(klass); 3766 } else { 3767 ld_ptr(src_oop, oopDesc::klass_offset_in_bytes(), klass); 3768 } 3769 } 3770 3771 void MacroAssembler::store_klass(Register klass, Register dst_oop) { 3772 if (UseCompressedClassPointers) { 3773 assert(dst_oop != klass, "not enough registers"); 3774 encode_klass_not_null(klass); 3775 st(klass, dst_oop, oopDesc::klass_offset_in_bytes()); 3776 } else { 3777 st_ptr(klass, dst_oop, oopDesc::klass_offset_in_bytes()); 3778 } 3779 } 3780 3781 void MacroAssembler::store_klass_gap(Register s, Register d) { 3782 if (UseCompressedClassPointers) { 3783 assert(s != d, "not enough registers"); 3784 st(s, d, oopDesc::klass_gap_offset_in_bytes()); 3785 } 3786 } 3787 3788 void MacroAssembler::load_heap_oop(const Address& s, Register d) { 3789 if (UseCompressedOops) { 3790 lduw(s, d); 3791 decode_heap_oop(d); 3792 } else { 3793 ld_ptr(s, d); 3794 } 3795 } 3796 3797 void MacroAssembler::load_heap_oop(Register s1, Register s2, Register d) { 3798 if (UseCompressedOops) { 3799 lduw(s1, s2, d); 3800 decode_heap_oop(d, d); 3801 } else { 3802 ld_ptr(s1, s2, d); 3803 } 3804 } 3805 3806 void MacroAssembler::load_heap_oop(Register s1, int simm13a, Register d) { 3807 if (UseCompressedOops) { 3808 lduw(s1, simm13a, d); 3809 decode_heap_oop(d, d); 3810 } else { 3811 ld_ptr(s1, simm13a, d); 3812 } 3813 } 3814 3815 void MacroAssembler::load_heap_oop(Register s1, RegisterOrConstant s2, Register d) { 3816 if (s2.is_constant()) load_heap_oop(s1, s2.as_constant(), d); 3817 else load_heap_oop(s1, s2.as_register(), d); 3818 } 3819 3820 void MacroAssembler::store_heap_oop(Register d, Register s1, Register s2) { 3821 if (UseCompressedOops) { 3822 assert(s1 != d && s2 != d, "not enough registers"); 3823 encode_heap_oop(d); 3824 st(d, s1, s2); 3825 } else { 3826 st_ptr(d, s1, s2); 3827 } 3828 } 3829 3830 void MacroAssembler::store_heap_oop(Register d, Register s1, int simm13a) { 3831 if (UseCompressedOops) { 3832 assert(s1 != d, "not enough registers"); 3833 encode_heap_oop(d); 3834 st(d, s1, simm13a); 3835 } else { 3836 st_ptr(d, s1, simm13a); 3837 } 3838 } 3839 3840 void MacroAssembler::store_heap_oop(Register d, const Address& a, int offset) { 3841 if (UseCompressedOops) { 3842 assert(a.base() != d, "not enough registers"); 3843 encode_heap_oop(d); 3844 st(d, a, offset); 3845 } else { 3846 st_ptr(d, a, offset); 3847 } 3848 } 3849 3850 3851 void MacroAssembler::encode_heap_oop(Register src, Register dst) { 3852 assert (UseCompressedOops, "must be compressed"); 3853 assert (Universe::heap() != NULL, "java heap should be initialized"); 3854 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3855 verify_oop(src); 3856 if (Universe::narrow_oop_base() == NULL) { 3857 srlx(src, LogMinObjAlignmentInBytes, dst); 3858 return; 3859 } 3860 Label done; 3861 if (src == dst) { 3862 // optimize for frequent case src == dst 3863 bpr(rc_nz, true, Assembler::pt, src, done); 3864 delayed() -> sub(src, G6_heapbase, dst); // annuled if not taken 3865 bind(done); 3866 srlx(src, LogMinObjAlignmentInBytes, dst); 3867 } else { 3868 bpr(rc_z, false, Assembler::pn, src, done); 3869 delayed() -> mov(G0, dst); 3870 // could be moved before branch, and annulate delay, 3871 // but may add some unneeded work decoding null 3872 sub(src, G6_heapbase, dst); 3873 srlx(dst, LogMinObjAlignmentInBytes, dst); 3874 bind(done); 3875 } 3876 } 3877 3878 3879 void MacroAssembler::encode_heap_oop_not_null(Register r) { 3880 assert (UseCompressedOops, "must be compressed"); 3881 assert (Universe::heap() != NULL, "java heap should be initialized"); 3882 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3883 verify_oop(r); 3884 if (Universe::narrow_oop_base() != NULL) 3885 sub(r, G6_heapbase, r); 3886 srlx(r, LogMinObjAlignmentInBytes, r); 3887 } 3888 3889 void MacroAssembler::encode_heap_oop_not_null(Register src, Register dst) { 3890 assert (UseCompressedOops, "must be compressed"); 3891 assert (Universe::heap() != NULL, "java heap should be initialized"); 3892 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3893 verify_oop(src); 3894 if (Universe::narrow_oop_base() == NULL) { 3895 srlx(src, LogMinObjAlignmentInBytes, dst); 3896 } else { 3897 sub(src, G6_heapbase, dst); 3898 srlx(dst, LogMinObjAlignmentInBytes, dst); 3899 } 3900 } 3901 3902 // Same algorithm as oops.inline.hpp decode_heap_oop. 3903 void MacroAssembler::decode_heap_oop(Register src, Register dst) { 3904 assert (UseCompressedOops, "must be compressed"); 3905 assert (Universe::heap() != NULL, "java heap should be initialized"); 3906 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3907 sllx(src, LogMinObjAlignmentInBytes, dst); 3908 if (Universe::narrow_oop_base() != NULL) { 3909 Label done; 3910 bpr(rc_nz, true, Assembler::pt, dst, done); 3911 delayed() -> add(dst, G6_heapbase, dst); // annuled if not taken 3912 bind(done); 3913 } 3914 verify_oop(dst); 3915 } 3916 3917 void MacroAssembler::decode_heap_oop_not_null(Register r) { 3918 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 3919 // pd_code_size_limit. 3920 // Also do not verify_oop as this is called by verify_oop. 3921 assert (UseCompressedOops, "must be compressed"); 3922 assert (Universe::heap() != NULL, "java heap should be initialized"); 3923 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3924 sllx(r, LogMinObjAlignmentInBytes, r); 3925 if (Universe::narrow_oop_base() != NULL) 3926 add(r, G6_heapbase, r); 3927 } 3928 3929 void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) { 3930 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 3931 // pd_code_size_limit. 3932 // Also do not verify_oop as this is called by verify_oop. 3933 assert (UseCompressedOops, "must be compressed"); 3934 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3935 sllx(src, LogMinObjAlignmentInBytes, dst); 3936 if (Universe::narrow_oop_base() != NULL) 3937 add(dst, G6_heapbase, dst); 3938 } 3939 3940 void MacroAssembler::encode_klass_not_null(Register r) { 3941 assert (UseCompressedClassPointers, "must be compressed"); 3942 if (Universe::narrow_klass_base() != NULL) { 3943 assert(r != G6_heapbase, "bad register choice"); 3944 set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); 3945 sub(r, G6_heapbase, r); 3946 if (Universe::narrow_klass_shift() != 0) { 3947 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 3948 srlx(r, LogKlassAlignmentInBytes, r); 3949 } 3950 reinit_heapbase(); 3951 } else { 3952 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 3953 srlx(r, Universe::narrow_klass_shift(), r); 3954 } 3955 } 3956 3957 void MacroAssembler::encode_klass_not_null(Register src, Register dst) { 3958 if (src == dst) { 3959 encode_klass_not_null(src); 3960 } else { 3961 assert (UseCompressedClassPointers, "must be compressed"); 3962 if (Universe::narrow_klass_base() != NULL) { 3963 set((intptr_t)Universe::narrow_klass_base(), dst); 3964 sub(src, dst, dst); 3965 if (Universe::narrow_klass_shift() != 0) { 3966 srlx(dst, LogKlassAlignmentInBytes, dst); 3967 } 3968 } else { 3969 // shift src into dst 3970 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 3971 srlx(src, Universe::narrow_klass_shift(), dst); 3972 } 3973 } 3974 } 3975 3976 // Function instr_size_for_decode_klass_not_null() counts the instructions 3977 // generated by decode_klass_not_null() and reinit_heapbase(). Hence, if 3978 // the instructions they generate change, then this method needs to be updated. 3979 int MacroAssembler::instr_size_for_decode_klass_not_null() { 3980 assert (UseCompressedClassPointers, "only for compressed klass ptrs"); 3981 int num_instrs = 1; // shift src,dst or add 3982 if (Universe::narrow_klass_base() != NULL) { 3983 // set + add + set 3984 num_instrs += insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) + 3985 insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base()); 3986 if (Universe::narrow_klass_shift() != 0) { 3987 num_instrs += 1; // sllx 3988 } 3989 } 3990 return num_instrs * BytesPerInstWord; 3991 } 3992 3993 // !!! If the instructions that get generated here change then function 3994 // instr_size_for_decode_klass_not_null() needs to get updated. 3995 void MacroAssembler::decode_klass_not_null(Register r) { 3996 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 3997 // pd_code_size_limit. 3998 assert (UseCompressedClassPointers, "must be compressed"); 3999 if (Universe::narrow_klass_base() != NULL) { 4000 assert(r != G6_heapbase, "bad register choice"); 4001 set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); 4002 if (Universe::narrow_klass_shift() != 0) 4003 sllx(r, LogKlassAlignmentInBytes, r); 4004 add(r, G6_heapbase, r); 4005 reinit_heapbase(); 4006 } else { 4007 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 4008 sllx(r, Universe::narrow_klass_shift(), r); 4009 } 4010 } 4011 4012 void MacroAssembler::decode_klass_not_null(Register src, Register dst) { 4013 if (src == dst) { 4014 decode_klass_not_null(src); 4015 } else { 4016 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 4017 // pd_code_size_limit. 4018 assert (UseCompressedClassPointers, "must be compressed"); 4019 if (Universe::narrow_klass_base() != NULL) { 4020 if (Universe::narrow_klass_shift() != 0) { 4021 assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice"); 4022 set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); 4023 sllx(src, LogKlassAlignmentInBytes, dst); 4024 add(dst, G6_heapbase, dst); 4025 reinit_heapbase(); 4026 } else { 4027 set((intptr_t)Universe::narrow_klass_base(), dst); 4028 add(src, dst, dst); 4029 } 4030 } else { 4031 // shift/mov src into dst. 4032 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 4033 sllx(src, Universe::narrow_klass_shift(), dst); 4034 } 4035 } 4036 } 4037 4038 void MacroAssembler::reinit_heapbase() { 4039 if (UseCompressedOops || UseCompressedClassPointers) { 4040 if (Universe::heap() != NULL) { 4041 set((intptr_t)Universe::narrow_ptrs_base(), G6_heapbase); 4042 } else { 4043 AddressLiteral base(Universe::narrow_ptrs_base_addr()); 4044 load_ptr_contents(base, G6_heapbase); 4045 } 4046 } 4047 } 4048 4049 #ifdef COMPILER2 4050 4051 // Compress char[] to byte[] by compressing 16 bytes at once. Return 0 on failure. 4052 void MacroAssembler::string_compress_16(Register src, Register dst, Register cnt, Register result, 4053 Register tmp1, Register tmp2, Register tmp3, Register tmp4, 4054 FloatRegister ftmp1, FloatRegister ftmp2, FloatRegister ftmp3, Label& Ldone) { 4055 Label Lloop, Lslow; 4056 assert(UseVIS >= 3, "VIS3 is required"); 4057 assert_different_registers(src, dst, cnt, tmp1, tmp2, tmp3, tmp4, result); 4058 assert_different_registers(ftmp1, ftmp2, ftmp3); 4059 4060 // Check if cnt >= 8 (= 16 bytes) 4061 cmp(cnt, 8); 4062 br(Assembler::less, false, Assembler::pn, Lslow); 4063 delayed()->mov(cnt, result); // copy count 4064 4065 // Check for 8-byte alignment of src and dst 4066 or3(src, dst, tmp1); 4067 andcc(tmp1, 7, G0); 4068 br(Assembler::notZero, false, Assembler::pn, Lslow); 4069 delayed()->nop(); 4070 4071 // Set mask for bshuffle instruction 4072 Register mask = tmp4; 4073 set(0x13579bdf, mask); 4074 bmask(mask, G0, G0); 4075 4076 // Set mask to 0xff00 ff00 ff00 ff00 to check for non-latin1 characters 4077 Assembler::sethi(0xff00fc00, mask); // mask = 0x0000 0000 ff00 fc00 4078 add(mask, 0x300, mask); // mask = 0x0000 0000 ff00 ff00 4079 sllx(mask, 32, tmp1); // tmp1 = 0xff00 ff00 0000 0000 4080 or3(mask, tmp1, mask); // mask = 0xff00 ff00 ff00 ff00 4081 4082 // Load first 8 bytes 4083 ldx(src, 0, tmp1); 4084 4085 bind(Lloop); 4086 // Load next 8 bytes 4087 ldx(src, 8, tmp2); 4088 4089 // Check for non-latin1 character by testing if the most significant byte of a char is set. 4090 // Although we have to move the data between integer and floating point registers, this is 4091 // still faster than the corresponding VIS instructions (ford/fand/fcmpd). 4092 or3(tmp1, tmp2, tmp3); 4093 btst(tmp3, mask); 4094 // annul zeroing if branch is not taken to preserve original count 4095 brx(Assembler::notZero, true, Assembler::pn, Ldone); 4096 delayed()->mov(G0, result); // 0 - failed 4097 4098 // Move bytes into float register 4099 movxtod(tmp1, ftmp1); 4100 movxtod(tmp2, ftmp2); 4101 4102 // Compress by copying one byte per char from ftmp1 and ftmp2 to ftmp3 4103 bshuffle(ftmp1, ftmp2, ftmp3); 4104 stf(FloatRegisterImpl::D, ftmp3, dst, 0); 4105 4106 // Increment addresses and decrement count 4107 inc(src, 16); 4108 inc(dst, 8); 4109 dec(cnt, 8); 4110 4111 cmp(cnt, 8); 4112 // annul LDX if branch is not taken to prevent access past end of string 4113 br(Assembler::greaterEqual, true, Assembler::pt, Lloop); 4114 delayed()->ldx(src, 0, tmp1); 4115 4116 // Fallback to slow version 4117 bind(Lslow); 4118 } 4119 4120 // Compress char[] to byte[]. Return 0 on failure. 4121 void MacroAssembler::string_compress(Register src, Register dst, Register cnt, Register result, Register tmp, Label& Ldone) { 4122 Label Lloop; 4123 assert_different_registers(src, dst, cnt, tmp, result); 4124 4125 lduh(src, 0, tmp); 4126 4127 bind(Lloop); 4128 inc(src, sizeof(jchar)); 4129 cmp(tmp, 0xff); 4130 // annul zeroing if branch is not taken to preserve original count 4131 br(Assembler::greater, true, Assembler::pn, Ldone); // don't check xcc 4132 delayed()->mov(G0, result); // 0 - failed 4133 deccc(cnt); 4134 stb(tmp, dst, 0); 4135 inc(dst); 4136 // annul LDUH if branch is not taken to prevent access past end of string 4137 br(Assembler::notZero, true, Assembler::pt, Lloop); 4138 delayed()->lduh(src, 0, tmp); // hoisted 4139 } 4140 4141 // Inflate byte[] to char[] by inflating 16 bytes at once. 4142 void MacroAssembler::string_inflate_16(Register src, Register dst, Register cnt, Register tmp, 4143 FloatRegister ftmp1, FloatRegister ftmp2, FloatRegister ftmp3, FloatRegister ftmp4, Label& Ldone) { 4144 Label Lloop, Lslow; 4145 assert(UseVIS >= 3, "VIS3 is required"); 4146 assert_different_registers(src, dst, cnt, tmp); 4147 assert_different_registers(ftmp1, ftmp2, ftmp3, ftmp4); 4148 4149 // Check if cnt >= 8 (= 16 bytes) 4150 cmp(cnt, 8); 4151 br(Assembler::less, false, Assembler::pn, Lslow); 4152 delayed()->nop(); 4153 4154 // Check for 8-byte alignment of src and dst 4155 or3(src, dst, tmp); 4156 andcc(tmp, 7, G0); 4157 br(Assembler::notZero, false, Assembler::pn, Lslow); 4158 // Initialize float register to zero 4159 FloatRegister zerof = ftmp4; 4160 delayed()->fzero(FloatRegisterImpl::D, zerof); 4161 4162 // Load first 8 bytes 4163 ldf(FloatRegisterImpl::D, src, 0, ftmp1); 4164 4165 bind(Lloop); 4166 inc(src, 8); 4167 dec(cnt, 8); 4168 4169 // Inflate the string by interleaving each byte from the source array 4170 // with a zero byte and storing the result in the destination array. 4171 fpmerge(zerof, ftmp1->successor(), ftmp2); 4172 stf(FloatRegisterImpl::D, ftmp2, dst, 8); 4173 fpmerge(zerof, ftmp1, ftmp3); 4174 stf(FloatRegisterImpl::D, ftmp3, dst, 0); 4175 4176 inc(dst, 16); 4177 4178 cmp(cnt, 8); 4179 // annul LDX if branch is not taken to prevent access past end of string 4180 br(Assembler::greaterEqual, true, Assembler::pt, Lloop); 4181 delayed()->ldf(FloatRegisterImpl::D, src, 0, ftmp1); 4182 4183 // Fallback to slow version 4184 bind(Lslow); 4185 } 4186 4187 // Inflate byte[] to char[]. 4188 void MacroAssembler::string_inflate(Register src, Register dst, Register cnt, Register tmp, Label& Ldone) { 4189 Label Loop; 4190 assert_different_registers(src, dst, cnt, tmp); 4191 4192 ldub(src, 0, tmp); 4193 bind(Loop); 4194 inc(src); 4195 deccc(cnt); 4196 sth(tmp, dst, 0); 4197 inc(dst, sizeof(jchar)); 4198 // annul LDUB if branch is not taken to prevent access past end of string 4199 br(Assembler::notZero, true, Assembler::pt, Loop); 4200 delayed()->ldub(src, 0, tmp); // hoisted 4201 } 4202 4203 void MacroAssembler::string_compare(Register str1, Register str2, 4204 Register cnt1, Register cnt2, 4205 Register tmp1, Register tmp2, 4206 Register result, int ae) { 4207 Label Ldone, Lloop; 4208 assert_different_registers(str1, str2, cnt1, cnt2, tmp1, result); 4209 int stride1, stride2; 4210 4211 // Note: Making use of the fact that compareTo(a, b) == -compareTo(b, a) 4212 // we interchange str1 and str2 in the UL case and negate the result. 4213 // Like this, str1 is always latin1 encoded, expect for the UU case. 4214 4215 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 4216 srl(cnt2, 1, cnt2); 4217 } 4218 4219 // See if the lengths are different, and calculate min in cnt1. 4220 // Save diff in case we need it for a tie-breaker. 4221 Label Lskip; 4222 Register diff = tmp1; 4223 subcc(cnt1, cnt2, diff); 4224 br(Assembler::greater, true, Assembler::pt, Lskip); 4225 // cnt2 is shorter, so use its count: 4226 delayed()->mov(cnt2, cnt1); 4227 bind(Lskip); 4228 4229 // Rename registers 4230 Register limit1 = cnt1; 4231 Register limit2 = limit1; 4232 Register chr1 = result; 4233 Register chr2 = cnt2; 4234 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 4235 // We need an additional register to keep track of two limits 4236 assert_different_registers(str1, str2, cnt1, cnt2, tmp1, tmp2, result); 4237 limit2 = tmp2; 4238 } 4239 4240 // Is the minimum length zero? 4241 cmp(limit1, (int)0); // use cast to resolve overloading ambiguity 4242 br(Assembler::equal, true, Assembler::pn, Ldone); 4243 // result is difference in lengths 4244 if (ae == StrIntrinsicNode::UU) { 4245 delayed()->sra(diff, 1, result); // Divide by 2 to get number of chars 4246 } else { 4247 delayed()->mov(diff, result); 4248 } 4249 4250 // Load first characters 4251 if (ae == StrIntrinsicNode::LL) { 4252 stride1 = stride2 = sizeof(jbyte); 4253 ldub(str1, 0, chr1); 4254 ldub(str2, 0, chr2); 4255 } else if (ae == StrIntrinsicNode::UU) { 4256 stride1 = stride2 = sizeof(jchar); 4257 lduh(str1, 0, chr1); 4258 lduh(str2, 0, chr2); 4259 } else { 4260 stride1 = sizeof(jbyte); 4261 stride2 = sizeof(jchar); 4262 ldub(str1, 0, chr1); 4263 lduh(str2, 0, chr2); 4264 } 4265 4266 // Compare first characters 4267 subcc(chr1, chr2, chr1); 4268 br(Assembler::notZero, false, Assembler::pt, Ldone); 4269 assert(chr1 == result, "result must be pre-placed"); 4270 delayed()->nop(); 4271 4272 // Check if the strings start at same location 4273 cmp(str1, str2); 4274 brx(Assembler::equal, true, Assembler::pn, Ldone); 4275 delayed()->mov(G0, result); // result is zero 4276 4277 // We have no guarantee that on 64 bit the higher half of limit is 0 4278 signx(limit1); 4279 4280 // Get limit 4281 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 4282 sll(limit1, 1, limit2); 4283 subcc(limit2, stride2, chr2); 4284 } 4285 subcc(limit1, stride1, chr1); 4286 br(Assembler::zero, true, Assembler::pn, Ldone); 4287 // result is difference in lengths 4288 if (ae == StrIntrinsicNode::UU) { 4289 delayed()->sra(diff, 1, result); // Divide by 2 to get number of chars 4290 } else { 4291 delayed()->mov(diff, result); 4292 } 4293 4294 // Shift str1 and str2 to the end of the arrays, negate limit 4295 add(str1, limit1, str1); 4296 add(str2, limit2, str2); 4297 neg(chr1, limit1); // limit1 = -(limit1-stride1) 4298 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 4299 neg(chr2, limit2); // limit2 = -(limit2-stride2) 4300 } 4301 4302 // Compare the rest of the characters 4303 load_sized_value(Address(str1, limit1), chr1, (ae == StrIntrinsicNode::UU) ? 2 : 1, false); 4304 4305 bind(Lloop); 4306 load_sized_value(Address(str2, limit2), chr2, (ae == StrIntrinsicNode::LL) ? 1 : 2, false); 4307 4308 subcc(chr1, chr2, chr1); 4309 br(Assembler::notZero, false, Assembler::pt, Ldone); 4310 assert(chr1 == result, "result must be pre-placed"); 4311 delayed()->inccc(limit1, stride1); 4312 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 4313 inccc(limit2, stride2); 4314 } 4315 4316 // annul LDUB if branch is not taken to prevent access past end of string 4317 br(Assembler::notZero, true, Assembler::pt, Lloop); 4318 delayed()->load_sized_value(Address(str1, limit1), chr1, (ae == StrIntrinsicNode::UU) ? 2 : 1, false); 4319 4320 // If strings are equal up to min length, return the length difference. 4321 if (ae == StrIntrinsicNode::UU) { 4322 // Divide by 2 to get number of chars 4323 sra(diff, 1, result); 4324 } else { 4325 mov(diff, result); 4326 } 4327 4328 // Otherwise, return the difference between the first mismatched chars. 4329 bind(Ldone); 4330 if(ae == StrIntrinsicNode::UL) { 4331 // Negate result (see note above) 4332 neg(result); 4333 } 4334 } 4335 4336 void MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register ary2, 4337 Register limit, Register tmp, Register result, bool is_byte) { 4338 Label Ldone, Lloop, Lremaining; 4339 assert_different_registers(ary1, ary2, limit, tmp, result); 4340 4341 int length_offset = arrayOopDesc::length_offset_in_bytes(); 4342 int base_offset = arrayOopDesc::base_offset_in_bytes(is_byte ? T_BYTE : T_CHAR); 4343 assert(base_offset % 8 == 0, "Base offset must be 8-byte aligned"); 4344 4345 if (is_array_equ) { 4346 // return true if the same array 4347 cmp(ary1, ary2); 4348 brx(Assembler::equal, true, Assembler::pn, Ldone); 4349 delayed()->mov(1, result); // equal 4350 4351 br_null(ary1, true, Assembler::pn, Ldone); 4352 delayed()->clr(result); // not equal 4353 4354 br_null(ary2, true, Assembler::pn, Ldone); 4355 delayed()->clr(result); // not equal 4356 4357 // load the lengths of arrays 4358 ld(Address(ary1, length_offset), limit); 4359 ld(Address(ary2, length_offset), tmp); 4360 4361 // return false if the two arrays are not equal length 4362 cmp(limit, tmp); 4363 br(Assembler::notEqual, true, Assembler::pn, Ldone); 4364 delayed()->clr(result); // not equal 4365 } 4366 4367 cmp_zero_and_br(Assembler::zero, limit, Ldone, true, Assembler::pn); 4368 delayed()->mov(1, result); // zero-length arrays are equal 4369 4370 if (is_array_equ) { 4371 // load array addresses 4372 add(ary1, base_offset, ary1); 4373 add(ary2, base_offset, ary2); 4374 // set byte count 4375 if (!is_byte) { 4376 sll(limit, exact_log2(sizeof(jchar)), limit); 4377 } 4378 } else { 4379 // We have no guarantee that on 64 bit the higher half of limit is 0 4380 signx(limit); 4381 } 4382 4383 #ifdef ASSERT 4384 // Sanity check for doubleword (8-byte) alignment of ary1 and ary2. 4385 // Guaranteed on 64-bit systems (see arrayOopDesc::header_size_in_bytes()). 4386 Label Laligned; 4387 or3(ary1, ary2, tmp); 4388 andcc(tmp, 7, tmp); 4389 br_null_short(tmp, Assembler::pn, Laligned); 4390 STOP("First array element is not 8-byte aligned."); 4391 should_not_reach_here(); 4392 bind(Laligned); 4393 #endif 4394 4395 // Shift ary1 and ary2 to the end of the arrays, negate limit 4396 add(ary1, limit, ary1); 4397 add(ary2, limit, ary2); 4398 neg(limit, limit); 4399 4400 // MAIN LOOP 4401 // Load and compare array elements of size 'byte_width' until the elements are not 4402 // equal or we reached the end of the arrays. If the size of the arrays is not a 4403 // multiple of 'byte_width', we simply read over the end of the array, bail out and 4404 // compare the remaining bytes below by skipping the garbage bytes. 4405 ldx(ary1, limit, result); 4406 bind(Lloop); 4407 ldx(ary2, limit, tmp); 4408 inccc(limit, 8); 4409 // Bail out if we reached the end (but still do the comparison) 4410 br(Assembler::positive, false, Assembler::pn, Lremaining); 4411 delayed()->cmp(result, tmp); 4412 // Check equality of elements 4413 brx(Assembler::equal, false, Assembler::pt, target(Lloop)); 4414 delayed()->ldx(ary1, limit, result); 4415 4416 ba(Ldone); 4417 delayed()->clr(result); // not equal 4418 4419 // TAIL COMPARISON 4420 // We got here because we reached the end of the arrays. 'limit' is the number of 4421 // garbage bytes we may have compared by reading over the end of the arrays. Shift 4422 // out the garbage and compare the remaining elements. 4423 bind(Lremaining); 4424 // Optimistic shortcut: elements potentially including garbage are equal 4425 brx(Assembler::equal, true, Assembler::pt, target(Ldone)); 4426 delayed()->mov(1, result); // equal 4427 // Shift 'limit' bytes to the right and compare 4428 sll(limit, 3, limit); // bytes to bits 4429 srlx(result, limit, result); 4430 srlx(tmp, limit, tmp); 4431 cmp(result, tmp); 4432 clr(result); 4433 movcc(Assembler::equal, false, xcc, 1, result); 4434 4435 bind(Ldone); 4436 } 4437 4438 void MacroAssembler::has_negatives(Register inp, Register size, Register result, Register t2, Register t3, Register t4, Register t5) { 4439 4440 // test for negative bytes in input string of a given size 4441 // result 1 if found, 0 otherwise. 4442 4443 Label Lcore, Ltail, Lreturn, Lcore_rpt; 4444 4445 assert_different_registers(inp, size, t2, t3, t4, t5, result); 4446 4447 Register i = result; // result used as integer index i until very end 4448 Register lmask = t2; // t2 is aliased to lmask 4449 4450 // INITIALIZATION 4451 // =========================================================== 4452 // initialize highbits mask -> lmask = 0x8080808080808080 (8B/64b) 4453 // compute unaligned offset -> i 4454 // compute core end index -> t5 4455 Assembler::sethi(0x80808000, t2); //! sethi macro fails to emit optimal 4456 add(t2, 0x80, t2); 4457 sllx(t2, 32, t3); 4458 or3(t3, t2, lmask); // 0x8080808080808080 -> lmask 4459 sra(size,0,size); 4460 andcc(inp, 0x7, i); // unaligned offset -> i 4461 br(Assembler::zero, true, Assembler::pn, Lcore); // starts 8B aligned? 4462 delayed()->add(size, -8, t5); // (annuled) core end index -> t5 4463 4464 // =========================================================== 4465 4466 // UNALIGNED HEAD 4467 // =========================================================== 4468 // * unaligned head handling: grab aligned 8B containing unaligned inp(ut) 4469 // * obliterate (ignore) bytes outside string by shifting off reg ends 4470 // * compare with bitmask, short circuit return true if one or more high 4471 // bits set. 4472 cmp(size, 0); 4473 br(Assembler::zero, true, Assembler::pn, Lreturn); // short-circuit? 4474 delayed()->mov(0,result); // annuled so i not clobbered for following 4475 neg(i, t4); 4476 add(i, size, t5); 4477 ldx(inp, t4, t3); // raw aligned 8B containing unaligned head -> t3 4478 mov(8, t4); 4479 sub(t4, t5, t4); 4480 sra(t4, 31, t5); 4481 andn(t4, t5, t5); 4482 add(i, t5, t4); 4483 sll(t5, 3, t5); 4484 sll(t4, 3, t4); // # bits to shift right, left -> t5,t4 4485 srlx(t3, t5, t3); 4486 sllx(t3, t4, t3); // bytes outside string in 8B header obliterated -> t3 4487 andcc(lmask, t3, G0); 4488 brx(Assembler::notZero, true, Assembler::pn, Lreturn); // short circuit? 4489 delayed()->mov(1,result); // annuled so i not clobbered for following 4490 add(size, -8, t5); // core end index -> t5 4491 mov(8, t4); 4492 sub(t4, i, i); // # bytes examined in unalgn head (<8) -> i 4493 // =========================================================== 4494 4495 // ALIGNED CORE 4496 // =========================================================== 4497 // * iterate index i over aligned 8B sections of core, comparing with 4498 // bitmask, short circuit return true if one or more high bits set 4499 // t5 contains core end index/loop limit which is the index 4500 // of the MSB of last (unaligned) 8B fully contained in the string. 4501 // inp contains address of first byte in string/array 4502 // lmask contains 8B high bit mask for comparison 4503 // i contains next index to be processed (adr. inp+i is on 8B boundary) 4504 bind(Lcore); 4505 cmp_and_br_short(i, t5, Assembler::greater, Assembler::pn, Ltail); 4506 bind(Lcore_rpt); 4507 ldx(inp, i, t3); 4508 andcc(t3, lmask, G0); 4509 brx(Assembler::notZero, true, Assembler::pn, Lreturn); 4510 delayed()->mov(1, result); // annuled so i not clobbered for following 4511 add(i, 8, i); 4512 cmp_and_br_short(i, t5, Assembler::lessEqual, Assembler::pn, Lcore_rpt); 4513 // =========================================================== 4514 4515 // ALIGNED TAIL (<8B) 4516 // =========================================================== 4517 // handle aligned tail of 7B or less as complete 8B, obliterating end of 4518 // string bytes by shifting them off end, compare what's left with bitmask 4519 // inp contains address of first byte in string/array 4520 // lmask contains 8B high bit mask for comparison 4521 // i contains next index to be processed (adr. inp+i is on 8B boundary) 4522 bind(Ltail); 4523 subcc(size, i, t4); // # of remaining bytes in string -> t4 4524 // return 0 if no more remaining bytes 4525 br(Assembler::lessEqual, true, Assembler::pn, Lreturn); 4526 delayed()->mov(0, result); // annuled so i not clobbered for following 4527 ldx(inp, i, t3); // load final 8B (aligned) containing tail -> t3 4528 mov(8, t5); 4529 sub(t5, t4, t4); 4530 mov(0, result); // ** i clobbered at this point 4531 sll(t4, 3, t4); // bits beyond end of string -> t4 4532 srlx(t3, t4, t3); // bytes beyond end now obliterated -> t3 4533 andcc(lmask, t3, G0); 4534 movcc(Assembler::notZero, false, xcc, 1, result); 4535 bind(Lreturn); 4536 } 4537 4538 #endif 4539 4540 4541 // Use BIS for zeroing (count is in bytes). 4542 void MacroAssembler::bis_zeroing(Register to, Register count, Register temp, Label& Ldone) { 4543 assert(UseBlockZeroing && VM_Version::has_blk_zeroing(), "only works with BIS zeroing"); 4544 Register end = count; 4545 int cache_line_size = VM_Version::prefetch_data_size(); 4546 assert(cache_line_size > 0, "cache line size should be known for this code"); 4547 // Minimum count when BIS zeroing can be used since 4548 // it needs membar which is expensive. 4549 int block_zero_size = MAX2(cache_line_size*3, (int)BlockZeroingLowLimit); 4550 4551 Label small_loop; 4552 // Check if count is negative (dead code) or zero. 4553 // Note, count uses 64bit in 64 bit VM. 4554 cmp_and_brx_short(count, 0, Assembler::lessEqual, Assembler::pn, Ldone); 4555 4556 // Use BIS zeroing only for big arrays since it requires membar. 4557 if (Assembler::is_simm13(block_zero_size)) { // < 4096 4558 cmp(count, block_zero_size); 4559 } else { 4560 set(block_zero_size, temp); 4561 cmp(count, temp); 4562 } 4563 br(Assembler::lessUnsigned, false, Assembler::pt, small_loop); 4564 delayed()->add(to, count, end); 4565 4566 // Note: size is >= three (32 bytes) cache lines. 4567 4568 // Clean the beginning of space up to next cache line. 4569 for (int offs = 0; offs < cache_line_size; offs += 8) { 4570 stx(G0, to, offs); 4571 } 4572 4573 // align to next cache line 4574 add(to, cache_line_size, to); 4575 and3(to, -cache_line_size, to); 4576 4577 // Note: size left >= two (32 bytes) cache lines. 4578 4579 // BIS should not be used to zero tail (64 bytes) 4580 // to avoid zeroing a header of the following object. 4581 sub(end, (cache_line_size*2)-8, end); 4582 4583 Label bis_loop; 4584 bind(bis_loop); 4585 stxa(G0, to, G0, Assembler::ASI_ST_BLKINIT_PRIMARY); 4586 add(to, cache_line_size, to); 4587 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, bis_loop); 4588 4589 // BIS needs membar. 4590 membar(Assembler::StoreLoad); 4591 4592 add(end, (cache_line_size*2)-8, end); // restore end 4593 cmp_and_brx_short(to, end, Assembler::greaterEqualUnsigned, Assembler::pn, Ldone); 4594 4595 // Clean the tail. 4596 bind(small_loop); 4597 stx(G0, to, 0); 4598 add(to, 8, to); 4599 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, small_loop); 4600 nop(); // Separate short branches 4601 } 4602 4603 /** 4604 * Update CRC-32[C] with a byte value according to constants in table 4605 * 4606 * @param [in,out]crc Register containing the crc. 4607 * @param [in]val Register containing the byte to fold into the CRC. 4608 * @param [in]table Register containing the table of crc constants. 4609 * 4610 * uint32_t crc; 4611 * val = crc_table[(val ^ crc) & 0xFF]; 4612 * crc = val ^ (crc >> 8); 4613 */ 4614 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 4615 xor3(val, crc, val); 4616 and3(val, 0xFF, val); 4617 sllx(val, 2, val); 4618 lduw(table, val, val); 4619 srlx(crc, 8, crc); 4620 xor3(val, crc, crc); 4621 } 4622 4623 // Reverse byte order of lower 32 bits, assuming upper 32 bits all zeros 4624 void MacroAssembler::reverse_bytes_32(Register src, Register dst, Register tmp) { 4625 srlx(src, 24, dst); 4626 4627 sllx(src, 32+8, tmp); 4628 srlx(tmp, 32+24, tmp); 4629 sllx(tmp, 8, tmp); 4630 or3(dst, tmp, dst); 4631 4632 sllx(src, 32+16, tmp); 4633 srlx(tmp, 32+24, tmp); 4634 sllx(tmp, 16, tmp); 4635 or3(dst, tmp, dst); 4636 4637 sllx(src, 32+24, tmp); 4638 srlx(tmp, 32, tmp); 4639 or3(dst, tmp, dst); 4640 } 4641 4642 void MacroAssembler::movitof_revbytes(Register src, FloatRegister dst, Register tmp1, Register tmp2) { 4643 reverse_bytes_32(src, tmp1, tmp2); 4644 movxtod(tmp1, dst); 4645 } 4646 4647 void MacroAssembler::movftoi_revbytes(FloatRegister src, Register dst, Register tmp1, Register tmp2) { 4648 movdtox(src, tmp1); 4649 reverse_bytes_32(tmp1, dst, tmp2); 4650 } 4651 4652 void MacroAssembler::fold_128bit_crc32(Register xcrc_hi, Register xcrc_lo, Register xK_hi, Register xK_lo, Register xtmp_hi, Register xtmp_lo, Register buf, int offset) { 4653 xmulx(xcrc_hi, xK_hi, xtmp_lo); 4654 xmulxhi(xcrc_hi, xK_hi, xtmp_hi); 4655 xmulxhi(xcrc_lo, xK_lo, xcrc_hi); 4656 xmulx(xcrc_lo, xK_lo, xcrc_lo); 4657 xor3(xcrc_lo, xtmp_lo, xcrc_lo); 4658 xor3(xcrc_hi, xtmp_hi, xcrc_hi); 4659 ldxl(buf, G0, xtmp_lo); 4660 inc(buf, 8); 4661 ldxl(buf, G0, xtmp_hi); 4662 inc(buf, 8); 4663 xor3(xcrc_lo, xtmp_lo, xcrc_lo); 4664 xor3(xcrc_hi, xtmp_hi, xcrc_hi); 4665 } 4666 4667 void MacroAssembler::fold_128bit_crc32(Register xcrc_hi, Register xcrc_lo, Register xK_hi, Register xK_lo, Register xtmp_hi, Register xtmp_lo, Register xbuf_hi, Register xbuf_lo) { 4668 mov(xcrc_lo, xtmp_lo); 4669 mov(xcrc_hi, xtmp_hi); 4670 xmulx(xtmp_hi, xK_hi, xtmp_lo); 4671 xmulxhi(xtmp_hi, xK_hi, xtmp_hi); 4672 xmulxhi(xcrc_lo, xK_lo, xcrc_hi); 4673 xmulx(xcrc_lo, xK_lo, xcrc_lo); 4674 xor3(xcrc_lo, xbuf_lo, xcrc_lo); 4675 xor3(xcrc_hi, xbuf_hi, xcrc_hi); 4676 xor3(xcrc_lo, xtmp_lo, xcrc_lo); 4677 xor3(xcrc_hi, xtmp_hi, xcrc_hi); 4678 } 4679 4680 void MacroAssembler::fold_8bit_crc32(Register xcrc, Register table, Register xtmp, Register tmp) { 4681 and3(xcrc, 0xFF, tmp); 4682 sllx(tmp, 2, tmp); 4683 lduw(table, tmp, xtmp); 4684 srlx(xcrc, 8, xcrc); 4685 xor3(xtmp, xcrc, xcrc); 4686 } 4687 4688 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { 4689 and3(crc, 0xFF, tmp); 4690 srlx(crc, 8, crc); 4691 sllx(tmp, 2, tmp); 4692 lduw(table, tmp, tmp); 4693 xor3(tmp, crc, crc); 4694 } 4695 4696 #define CRC32_TMP_REG_NUM 18 4697 4698 #define CRC32_CONST_64 0x163cd6124 4699 #define CRC32_CONST_96 0x0ccaa009e 4700 #define CRC32_CONST_160 0x1751997d0 4701 #define CRC32_CONST_480 0x1c6e41596 4702 #define CRC32_CONST_544 0x154442bd4 4703 4704 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table) { 4705 4706 Label L_cleanup_loop, L_cleanup_check, L_align_loop, L_align_check; 4707 Label L_main_loop_prologue; 4708 Label L_fold_512b, L_fold_512b_loop, L_fold_128b; 4709 Label L_fold_tail, L_fold_tail_loop; 4710 Label L_8byte_fold_loop, L_8byte_fold_check; 4711 4712 const Register tmp[CRC32_TMP_REG_NUM] = {L0, L1, L2, L3, L4, L5, L6, G1, I0, I1, I2, I3, I4, I5, I7, O4, O5, G3}; 4713 4714 Register const_64 = tmp[CRC32_TMP_REG_NUM-1]; 4715 Register const_96 = tmp[CRC32_TMP_REG_NUM-1]; 4716 Register const_160 = tmp[CRC32_TMP_REG_NUM-2]; 4717 Register const_480 = tmp[CRC32_TMP_REG_NUM-1]; 4718 Register const_544 = tmp[CRC32_TMP_REG_NUM-2]; 4719 4720 set(ExternalAddress(StubRoutines::crc_table_addr()), table); 4721 4722 not1(crc); // ~c 4723 clruwu(crc); // clear upper 32 bits of crc 4724 4725 // Check if below cutoff, proceed directly to cleanup code 4726 mov(31, G4); 4727 cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_cleanup_check); 4728 4729 // Align buffer to 8 byte boundry 4730 mov(8, O5); 4731 and3(buf, 0x7, O4); 4732 sub(O5, O4, O5); 4733 and3(O5, 0x7, O5); 4734 sub(len, O5, len); 4735 ba(L_align_check); 4736 delayed()->nop(); 4737 4738 // Alignment loop, table look up method for up to 7 bytes 4739 bind(L_align_loop); 4740 ldub(buf, 0, O4); 4741 inc(buf); 4742 dec(O5); 4743 xor3(O4, crc, O4); 4744 and3(O4, 0xFF, O4); 4745 sllx(O4, 2, O4); 4746 lduw(table, O4, O4); 4747 srlx(crc, 8, crc); 4748 xor3(O4, crc, crc); 4749 bind(L_align_check); 4750 nop(); 4751 cmp_and_br_short(O5, 0, Assembler::notEqual, Assembler::pt, L_align_loop); 4752 4753 // Aligned on 64-bit (8-byte) boundry at this point 4754 // Check if still above cutoff (31-bytes) 4755 mov(31, G4); 4756 cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_cleanup_check); 4757 // At least 32 bytes left to process 4758 4759 // Free up registers by storing them to FP registers 4760 for (int i = 0; i < CRC32_TMP_REG_NUM; i++) { 4761 movxtod(tmp[i], as_FloatRegister(2*i)); 4762 } 4763 4764 // Determine which loop to enter 4765 // Shared prologue 4766 ldxl(buf, G0, tmp[0]); 4767 inc(buf, 8); 4768 ldxl(buf, G0, tmp[1]); 4769 inc(buf, 8); 4770 xor3(tmp[0], crc, tmp[0]); // Fold CRC into first few bytes 4771 and3(crc, 0, crc); // Clear out the crc register 4772 // Main loop needs 128-bytes at least 4773 mov(128, G4); 4774 mov(64, tmp[2]); 4775 cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_main_loop_prologue); 4776 // Less than 64 bytes 4777 nop(); 4778 cmp_and_br_short(len, tmp[2], Assembler::lessUnsigned, Assembler::pt, L_fold_tail); 4779 // Between 64 and 127 bytes 4780 set64(CRC32_CONST_96, const_96, tmp[8]); 4781 set64(CRC32_CONST_160, const_160, tmp[9]); 4782 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[2], tmp[3], buf, 0); 4783 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[4], tmp[5], buf, 16); 4784 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[6], tmp[7], buf, 32); 4785 dec(len, 48); 4786 ba(L_fold_tail); 4787 delayed()->nop(); 4788 4789 bind(L_main_loop_prologue); 4790 for (int i = 2; i < 8; i++) { 4791 ldxl(buf, G0, tmp[i]); 4792 inc(buf, 8); 4793 } 4794 4795 // Fold total 512 bits of polynomial on each iteration, 4796 // 128 bits per each of 4 parallel streams 4797 set64(CRC32_CONST_480, const_480, tmp[8]); 4798 set64(CRC32_CONST_544, const_544, tmp[9]); 4799 4800 mov(128, G4); 4801 bind(L_fold_512b_loop); 4802 fold_128bit_crc32(tmp[1], tmp[0], const_480, const_544, tmp[9], tmp[8], buf, 0); 4803 fold_128bit_crc32(tmp[3], tmp[2], const_480, const_544, tmp[11], tmp[10], buf, 16); 4804 fold_128bit_crc32(tmp[5], tmp[4], const_480, const_544, tmp[13], tmp[12], buf, 32); 4805 fold_128bit_crc32(tmp[7], tmp[6], const_480, const_544, tmp[15], tmp[14], buf, 64); 4806 dec(len, 64); 4807 cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_fold_512b_loop); 4808 4809 // Fold 512 bits to 128 bits 4810 bind(L_fold_512b); 4811 set64(CRC32_CONST_96, const_96, tmp[8]); 4812 set64(CRC32_CONST_160, const_160, tmp[9]); 4813 4814 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[3], tmp[2]); 4815 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[5], tmp[4]); 4816 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[7], tmp[6]); 4817 dec(len, 48); 4818 4819 // Fold the rest of 128 bits data chunks 4820 bind(L_fold_tail); 4821 mov(32, G4); 4822 cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_fold_128b); 4823 4824 set64(CRC32_CONST_96, const_96, tmp[8]); 4825 set64(CRC32_CONST_160, const_160, tmp[9]); 4826 4827 bind(L_fold_tail_loop); 4828 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[2], tmp[3], buf, 0); 4829 sub(len, 16, len); 4830 cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_fold_tail_loop); 4831 4832 // Fold the 128 bits in tmps 0 - 1 into tmp 1 4833 bind(L_fold_128b); 4834 4835 set64(CRC32_CONST_64, const_64, tmp[4]); 4836 4837 xmulx(const_64, tmp[0], tmp[2]); 4838 xmulxhi(const_64, tmp[0], tmp[3]); 4839 4840 srl(tmp[2], G0, tmp[4]); 4841 xmulx(const_64, tmp[4], tmp[4]); 4842 4843 srlx(tmp[2], 32, tmp[2]); 4844 sllx(tmp[3], 32, tmp[3]); 4845 or3(tmp[2], tmp[3], tmp[2]); 4846 4847 xor3(tmp[4], tmp[1], tmp[4]); 4848 xor3(tmp[4], tmp[2], tmp[1]); 4849 dec(len, 8); 4850 4851 // Use table lookup for the 8 bytes left in tmp[1] 4852 dec(len, 8); 4853 4854 // 8 8-bit folds to compute 32-bit CRC. 4855 for (int j = 0; j < 4; j++) { 4856 fold_8bit_crc32(tmp[1], table, tmp[2], tmp[3]); 4857 } 4858 srl(tmp[1], G0, crc); // move 32 bits to general register 4859 for (int j = 0; j < 4; j++) { 4860 fold_8bit_crc32(crc, table, tmp[3]); 4861 } 4862 4863 bind(L_8byte_fold_check); 4864 4865 // Restore int registers saved in FP registers 4866 for (int i = 0; i < CRC32_TMP_REG_NUM; i++) { 4867 movdtox(as_FloatRegister(2*i), tmp[i]); 4868 } 4869 4870 ba(L_cleanup_check); 4871 delayed()->nop(); 4872 4873 // Table look-up method for the remaining few bytes 4874 bind(L_cleanup_loop); 4875 ldub(buf, 0, O4); 4876 inc(buf); 4877 dec(len); 4878 xor3(O4, crc, O4); 4879 and3(O4, 0xFF, O4); 4880 sllx(O4, 2, O4); 4881 lduw(table, O4, O4); 4882 srlx(crc, 8, crc); 4883 xor3(O4, crc, crc); 4884 bind(L_cleanup_check); 4885 nop(); 4886 cmp_and_br_short(len, 0, Assembler::greaterUnsigned, Assembler::pt, L_cleanup_loop); 4887 4888 not1(crc); 4889 } 4890 4891 #define CHUNK_LEN 128 /* 128 x 8B = 1KB */ 4892 #define CHUNK_K1 0x1307a0206 /* reverseBits(pow(x, CHUNK_LEN*8*8*3 - 32) mod P(x)) << 1 */ 4893 #define CHUNK_K2 0x1a0f717c4 /* reverseBits(pow(x, CHUNK_LEN*8*8*2 - 32) mod P(x)) << 1 */ 4894 #define CHUNK_K3 0x0170076fa /* reverseBits(pow(x, CHUNK_LEN*8*8*1 - 32) mod P(x)) << 1 */ 4895 4896 void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len, Register table) { 4897 4898 Label L_crc32c_head, L_crc32c_aligned; 4899 Label L_crc32c_parallel, L_crc32c_parallel_loop; 4900 Label L_crc32c_serial, L_crc32c_x32_loop, L_crc32c_x8, L_crc32c_x8_loop; 4901 Label L_crc32c_done, L_crc32c_tail, L_crc32c_return; 4902 4903 set(ExternalAddress(StubRoutines::crc32c_table_addr()), table); 4904 4905 cmp_and_br_short(len, 0, Assembler::lessEqual, Assembler::pn, L_crc32c_return); 4906 4907 // clear upper 32 bits of crc 4908 clruwu(crc); 4909 4910 and3(buf, 7, G4); 4911 cmp_and_brx_short(G4, 0, Assembler::equal, Assembler::pt, L_crc32c_aligned); 4912 4913 mov(8, G1); 4914 sub(G1, G4, G4); 4915 4916 // ------ process the misaligned head (7 bytes or less) ------ 4917 bind(L_crc32c_head); 4918 4919 // crc = (crc >>> 8) ^ byteTable[(crc ^ b) & 0xFF]; 4920 ldub(buf, 0, G1); 4921 update_byte_crc32(crc, G1, table); 4922 4923 inc(buf); 4924 dec(len); 4925 cmp_and_br_short(len, 0, Assembler::equal, Assembler::pn, L_crc32c_return); 4926 dec(G4); 4927 cmp_and_br_short(G4, 0, Assembler::greater, Assembler::pt, L_crc32c_head); 4928 4929 // ------ process the 8-byte-aligned body ------ 4930 bind(L_crc32c_aligned); 4931 nop(); 4932 cmp_and_br_short(len, 8, Assembler::less, Assembler::pn, L_crc32c_tail); 4933 4934 // reverse the byte order of lower 32 bits to big endian, and move to FP side 4935 movitof_revbytes(crc, F0, G1, G3); 4936 4937 set(CHUNK_LEN*8*4, G4); 4938 cmp_and_br_short(len, G4, Assembler::less, Assembler::pt, L_crc32c_serial); 4939 4940 // ------ process four 1KB chunks in parallel ------ 4941 bind(L_crc32c_parallel); 4942 4943 fzero(FloatRegisterImpl::D, F2); 4944 fzero(FloatRegisterImpl::D, F4); 4945 fzero(FloatRegisterImpl::D, F6); 4946 4947 mov(CHUNK_LEN - 1, G4); 4948 bind(L_crc32c_parallel_loop); 4949 // schedule ldf's ahead of crc32c's to hide the load-use latency 4950 ldf(FloatRegisterImpl::D, buf, 0, F8); 4951 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*8, F10); 4952 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*16, F12); 4953 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*24, F14); 4954 crc32c(F0, F8, F0); 4955 crc32c(F2, F10, F2); 4956 crc32c(F4, F12, F4); 4957 crc32c(F6, F14, F6); 4958 inc(buf, 8); 4959 dec(G4); 4960 cmp_and_br_short(G4, 0, Assembler::greater, Assembler::pt, L_crc32c_parallel_loop); 4961 4962 ldf(FloatRegisterImpl::D, buf, 0, F8); 4963 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*8, F10); 4964 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*16, F12); 4965 crc32c(F0, F8, F0); 4966 crc32c(F2, F10, F2); 4967 crc32c(F4, F12, F4); 4968 4969 inc(buf, CHUNK_LEN*24); 4970 ldfl(FloatRegisterImpl::D, buf, G0, F14); // load in little endian 4971 inc(buf, 8); 4972 4973 prefetch(buf, 0, Assembler::severalReads); 4974 prefetch(buf, CHUNK_LEN*8, Assembler::severalReads); 4975 prefetch(buf, CHUNK_LEN*16, Assembler::severalReads); 4976 prefetch(buf, CHUNK_LEN*24, Assembler::severalReads); 4977 4978 // move to INT side, and reverse the byte order of lower 32 bits to little endian 4979 movftoi_revbytes(F0, O4, G1, G4); 4980 movftoi_revbytes(F2, O5, G1, G4); 4981 movftoi_revbytes(F4, G5, G1, G4); 4982 4983 // combine the results of 4 chunks 4984 set64(CHUNK_K1, G3, G1); 4985 xmulx(O4, G3, O4); 4986 set64(CHUNK_K2, G3, G1); 4987 xmulx(O5, G3, O5); 4988 set64(CHUNK_K3, G3, G1); 4989 xmulx(G5, G3, G5); 4990 4991 movdtox(F14, G4); 4992 xor3(O4, O5, O5); 4993 xor3(G5, O5, O5); 4994 xor3(G4, O5, O5); 4995 4996 // reverse the byte order to big endian, via stack, and move to FP side 4997 // TODO: use new revb instruction 4998 add(SP, -8, G1); 4999 srlx(G1, 3, G1); 5000 sllx(G1, 3, G1); 5001 stx(O5, G1, G0); 5002 ldfl(FloatRegisterImpl::D, G1, G0, F2); // load in little endian 5003 5004 crc32c(F6, F2, F0); 5005 5006 set(CHUNK_LEN*8*4, G4); 5007 sub(len, G4, len); 5008 cmp_and_br_short(len, G4, Assembler::greaterEqual, Assembler::pt, L_crc32c_parallel); 5009 nop(); 5010 cmp_and_br_short(len, 0, Assembler::equal, Assembler::pt, L_crc32c_done); 5011 5012 bind(L_crc32c_serial); 5013 5014 mov(32, G4); 5015 cmp_and_br_short(len, G4, Assembler::less, Assembler::pn, L_crc32c_x8); 5016 5017 // ------ process 32B chunks ------ 5018 bind(L_crc32c_x32_loop); 5019 ldf(FloatRegisterImpl::D, buf, 0, F2); 5020 crc32c(F0, F2, F0); 5021 ldf(FloatRegisterImpl::D, buf, 8, F2); 5022 crc32c(F0, F2, F0); 5023 ldf(FloatRegisterImpl::D, buf, 16, F2); 5024 crc32c(F0, F2, F0); 5025 ldf(FloatRegisterImpl::D, buf, 24, F2); 5026 inc(buf, 32); 5027 crc32c(F0, F2, F0); 5028 dec(len, 32); 5029 cmp_and_br_short(len, G4, Assembler::greaterEqual, Assembler::pt, L_crc32c_x32_loop); 5030 5031 bind(L_crc32c_x8); 5032 nop(); 5033 cmp_and_br_short(len, 8, Assembler::less, Assembler::pt, L_crc32c_done); 5034 5035 // ------ process 8B chunks ------ 5036 bind(L_crc32c_x8_loop); 5037 ldf(FloatRegisterImpl::D, buf, 0, F2); 5038 inc(buf, 8); 5039 crc32c(F0, F2, F0); 5040 dec(len, 8); 5041 cmp_and_br_short(len, 8, Assembler::greaterEqual, Assembler::pt, L_crc32c_x8_loop); 5042 5043 bind(L_crc32c_done); 5044 5045 // move to INT side, and reverse the byte order of lower 32 bits to little endian 5046 movftoi_revbytes(F0, crc, G1, G3); 5047 5048 cmp_and_br_short(len, 0, Assembler::equal, Assembler::pt, L_crc32c_return); 5049 5050 // ------ process the misaligned tail (7 bytes or less) ------ 5051 bind(L_crc32c_tail); 5052 5053 // crc = (crc >>> 8) ^ byteTable[(crc ^ b) & 0xFF]; 5054 ldub(buf, 0, G1); 5055 update_byte_crc32(crc, G1, table); 5056 5057 inc(buf); 5058 dec(len); 5059 cmp_and_br_short(len, 0, Assembler::greater, Assembler::pt, L_crc32c_tail); 5060 5061 bind(L_crc32c_return); 5062 nop(); 5063 }