1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "jvm.h" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "compiler/disassembler.hpp" 29 #include "gc/shared/cardTable.hpp" 30 #include "gc/shared/cardTableBarrierSet.hpp" 31 #include "gc/shared/collectedHeap.inline.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "memory/universe.hpp" 35 #include "oops/klass.inline.hpp" 36 #include "prims/methodHandles.hpp" 37 #include "runtime/biasedLocking.hpp" 38 #include "runtime/interfaceSupport.inline.hpp" 39 #include "runtime/jniHandles.inline.hpp" 40 #include "runtime/objectMonitor.hpp" 41 #include "runtime/os.inline.hpp" 42 #include "runtime/safepoint.hpp" 43 #include "runtime/safepointMechanism.hpp" 44 #include "runtime/sharedRuntime.hpp" 45 #include "runtime/stubRoutines.hpp" 46 #include "utilities/align.hpp" 47 #include "utilities/macros.hpp" 48 #if INCLUDE_ALL_GCS 49 #include "gc/g1/g1BarrierSet.hpp" 50 #include "gc/g1/g1CardTable.hpp" 51 #include "gc/g1/g1CollectedHeap.inline.hpp" 52 #include "gc/g1/heapRegion.hpp" 53 #endif // INCLUDE_ALL_GCS 54 #ifdef COMPILER2 55 #include "opto/intrinsicnode.hpp" 56 #endif 57 58 #ifdef PRODUCT 59 #define BLOCK_COMMENT(str) /* nothing */ 60 #define STOP(error) stop(error) 61 #else 62 #define BLOCK_COMMENT(str) block_comment(str) 63 #define STOP(error) block_comment(error); stop(error) 64 #endif 65 66 // Convert the raw encoding form into the form expected by the 67 // constructor for Address. 68 Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) { 69 assert(scale == 0, "not supported"); 70 RelocationHolder rspec; 71 if (disp_reloc != relocInfo::none) { 72 rspec = Relocation::spec_simple(disp_reloc); 73 } 74 75 Register rindex = as_Register(index); 76 if (rindex != G0) { 77 Address madr(as_Register(base), rindex); 78 madr._rspec = rspec; 79 return madr; 80 } else { 81 Address madr(as_Register(base), disp); 82 madr._rspec = rspec; 83 return madr; 84 } 85 } 86 87 Address Argument::address_in_frame() const { 88 // Warning: In LP64 mode disp will occupy more than 10 bits, but 89 // op codes such as ld or ldx, only access disp() to get 90 // their simm13 argument. 91 int disp = ((_number - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS; 92 if (is_in()) 93 return Address(FP, disp); // In argument. 94 else 95 return Address(SP, disp); // Out argument. 96 } 97 98 static const char* argumentNames[][2] = { 99 {"A0","P0"}, {"A1","P1"}, {"A2","P2"}, {"A3","P3"}, {"A4","P4"}, 100 {"A5","P5"}, {"A6","P6"}, {"A7","P7"}, {"A8","P8"}, {"A9","P9"}, 101 {"A(n>9)","P(n>9)"} 102 }; 103 104 const char* Argument::name() const { 105 int nofArgs = sizeof argumentNames / sizeof argumentNames[0]; 106 int num = number(); 107 if (num >= nofArgs) num = nofArgs - 1; 108 return argumentNames[num][is_in() ? 1 : 0]; 109 } 110 111 #ifdef ASSERT 112 // On RISC, there's no benefit to verifying instruction boundaries. 113 bool AbstractAssembler::pd_check_instruction_mark() { return false; } 114 #endif 115 116 // Patch instruction inst at offset inst_pos to refer to dest_pos 117 // and return the resulting instruction. 118 // We should have pcs, not offsets, but since all is relative, it will work out 119 // OK. 120 int MacroAssembler::patched_branch(int dest_pos, int inst, int inst_pos) { 121 int m; // mask for displacement field 122 int v; // new value for displacement field 123 const int word_aligned_ones = -4; 124 switch (inv_op(inst)) { 125 default: ShouldNotReachHere(); 126 case call_op: m = wdisp(word_aligned_ones, 0, 30); v = wdisp(dest_pos, inst_pos, 30); break; 127 case branch_op: 128 switch (inv_op2(inst)) { 129 case fbp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; 130 case bp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; 131 case fb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; 132 case br_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; 133 case bpr_op2: { 134 if (is_cbcond(inst)) { 135 m = wdisp10(word_aligned_ones, 0); 136 v = wdisp10(dest_pos, inst_pos); 137 } else { 138 m = wdisp16(word_aligned_ones, 0); 139 v = wdisp16(dest_pos, inst_pos); 140 } 141 break; 142 } 143 default: ShouldNotReachHere(); 144 } 145 } 146 return inst & ~m | v; 147 } 148 149 // Return the offset of the branch destionation of instruction inst 150 // at offset pos. 151 // Should have pcs, but since all is relative, it works out. 152 int MacroAssembler::branch_destination(int inst, int pos) { 153 int r; 154 switch (inv_op(inst)) { 155 default: ShouldNotReachHere(); 156 case call_op: r = inv_wdisp(inst, pos, 30); break; 157 case branch_op: 158 switch (inv_op2(inst)) { 159 case fbp_op2: r = inv_wdisp( inst, pos, 19); break; 160 case bp_op2: r = inv_wdisp( inst, pos, 19); break; 161 case fb_op2: r = inv_wdisp( inst, pos, 22); break; 162 case br_op2: r = inv_wdisp( inst, pos, 22); break; 163 case bpr_op2: { 164 if (is_cbcond(inst)) { 165 r = inv_wdisp10(inst, pos); 166 } else { 167 r = inv_wdisp16(inst, pos); 168 } 169 break; 170 } 171 default: ShouldNotReachHere(); 172 } 173 } 174 return r; 175 } 176 177 void MacroAssembler::null_check(Register reg, int offset) { 178 if (needs_explicit_null_check((intptr_t)offset)) { 179 // provoke OS NULL exception if reg = NULL by 180 // accessing M[reg] w/o changing any registers 181 ld_ptr(reg, 0, G0); 182 } 183 else { 184 // nothing to do, (later) access of M[reg + offset] 185 // will provoke OS NULL exception if reg = NULL 186 } 187 } 188 189 // Ring buffer jumps 190 191 192 void MacroAssembler::jmp2(Register r1, Register r2, const char* file, int line ) { 193 assert_not_delayed(); 194 jmpl(r1, r2, G0); 195 } 196 void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) { 197 assert_not_delayed(); 198 jmp(r1, offset); 199 } 200 201 // This code sequence is relocatable to any address, even on LP64. 202 void MacroAssembler::jumpl(const AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line) { 203 assert_not_delayed(); 204 // Force fixed length sethi because NativeJump and NativeFarCall don't handle 205 // variable length instruction streams. 206 patchable_sethi(addrlit, temp); 207 Address a(temp, addrlit.low10() + offset); // Add the offset to the displacement. 208 jmpl(a.base(), a.disp(), d); 209 } 210 211 void MacroAssembler::jump(const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line) { 212 jumpl(addrlit, temp, G0, offset, file, line); 213 } 214 215 216 // Conditional breakpoint (for assertion checks in assembly code) 217 void MacroAssembler::breakpoint_trap(Condition c, CC cc) { 218 trap(c, cc, G0, ST_RESERVED_FOR_USER_0); 219 } 220 221 // We want to use ST_BREAKPOINT here, but the debugger is confused by it. 222 void MacroAssembler::breakpoint_trap() { 223 trap(ST_RESERVED_FOR_USER_0); 224 } 225 226 // Write serialization page so VM thread can do a pseudo remote membar 227 // We use the current thread pointer to calculate a thread specific 228 // offset to write to within the page. This minimizes bus traffic 229 // due to cache line collision. 230 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) { 231 srl(thread, os::get_serialize_page_shift_count(), tmp2); 232 if (Assembler::is_simm13(os::vm_page_size())) { 233 and3(tmp2, (os::vm_page_size() - sizeof(int)), tmp2); 234 } 235 else { 236 set((os::vm_page_size() - sizeof(int)), tmp1); 237 and3(tmp2, tmp1, tmp2); 238 } 239 set(os::get_memory_serialize_page(), tmp1); 240 st(G0, tmp1, tmp2); 241 } 242 243 244 void MacroAssembler::safepoint_poll(Label& slow_path, bool a, Register thread_reg, Register temp_reg) { 245 if (SafepointMechanism::uses_thread_local_poll()) { 246 ldx(Address(thread_reg, Thread::polling_page_offset()), temp_reg, 0); 247 // Armed page has poll bit set. 248 and3(temp_reg, SafepointMechanism::poll_bit(), temp_reg); 249 br_notnull(temp_reg, a, Assembler::pn, slow_path); 250 } else { 251 AddressLiteral sync_state(SafepointSynchronize::address_of_state()); 252 253 load_contents(sync_state, temp_reg); 254 cmp(temp_reg, SafepointSynchronize::_not_synchronized); 255 br(Assembler::notEqual, a, Assembler::pn, slow_path); 256 } 257 } 258 259 void MacroAssembler::enter() { 260 Unimplemented(); 261 } 262 263 void MacroAssembler::leave() { 264 Unimplemented(); 265 } 266 267 // Calls to C land 268 269 #ifdef ASSERT 270 // a hook for debugging 271 static Thread* reinitialize_thread() { 272 return Thread::current(); 273 } 274 #else 275 #define reinitialize_thread Thread::current 276 #endif 277 278 #ifdef ASSERT 279 address last_get_thread = NULL; 280 #endif 281 282 // call this when G2_thread is not known to be valid 283 void MacroAssembler::get_thread() { 284 save_frame(0); // to avoid clobbering O0 285 mov(G1, L0); // avoid clobbering G1 286 mov(G5_method, L1); // avoid clobbering G5 287 mov(G3, L2); // avoid clobbering G3 also 288 mov(G4, L5); // avoid clobbering G4 289 #ifdef ASSERT 290 AddressLiteral last_get_thread_addrlit(&last_get_thread); 291 set(last_get_thread_addrlit, L3); 292 rdpc(L4); 293 inc(L4, 3 * BytesPerInstWord); // skip rdpc + inc + st_ptr to point L4 at call st_ptr(L4, L3, 0); 294 #endif 295 call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type); 296 delayed()->nop(); 297 mov(L0, G1); 298 mov(L1, G5_method); 299 mov(L2, G3); 300 mov(L5, G4); 301 restore(O0, 0, G2_thread); 302 } 303 304 static Thread* verify_thread_subroutine(Thread* gthread_value) { 305 Thread* correct_value = Thread::current(); 306 guarantee(gthread_value == correct_value, "G2_thread value must be the thread"); 307 return correct_value; 308 } 309 310 void MacroAssembler::verify_thread() { 311 if (VerifyThread) { 312 // NOTE: this chops off the heads of the 64-bit O registers. 313 // make sure G2_thread contains the right value 314 save_frame_and_mov(0, Lmethod, Lmethod); // to avoid clobbering O0 (and propagate Lmethod) 315 mov(G1, L1); // avoid clobbering G1 316 // G2 saved below 317 mov(G3, L3); // avoid clobbering G3 318 mov(G4, L4); // avoid clobbering G4 319 mov(G5_method, L5); // avoid clobbering G5_method 320 call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type); 321 delayed()->mov(G2_thread, O0); 322 323 mov(L1, G1); // Restore G1 324 // G2 restored below 325 mov(L3, G3); // restore G3 326 mov(L4, G4); // restore G4 327 mov(L5, G5_method); // restore G5_method 328 restore(O0, 0, G2_thread); 329 } 330 } 331 332 333 void MacroAssembler::save_thread(const Register thread_cache) { 334 verify_thread(); 335 if (thread_cache->is_valid()) { 336 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); 337 mov(G2_thread, thread_cache); 338 } 339 if (VerifyThread) { 340 // smash G2_thread, as if the VM were about to anyway 341 set(0x67676767, G2_thread); 342 } 343 } 344 345 346 void MacroAssembler::restore_thread(const Register thread_cache) { 347 if (thread_cache->is_valid()) { 348 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); 349 mov(thread_cache, G2_thread); 350 verify_thread(); 351 } else { 352 // do it the slow way 353 get_thread(); 354 } 355 } 356 357 358 // %%% maybe get rid of [re]set_last_Java_frame 359 void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Java_pc) { 360 assert_not_delayed(); 361 Address flags(G2_thread, JavaThread::frame_anchor_offset() + 362 JavaFrameAnchor::flags_offset()); 363 Address pc_addr(G2_thread, JavaThread::last_Java_pc_offset()); 364 365 // Always set last_Java_pc and flags first because once last_Java_sp is visible 366 // has_last_Java_frame is true and users will look at the rest of the fields. 367 // (Note: flags should always be zero before we get here so doesn't need to be set.) 368 369 #ifdef ASSERT 370 // Verify that flags was zeroed on return to Java 371 Label PcOk; 372 save_frame(0); // to avoid clobbering O0 373 ld_ptr(pc_addr, L0); 374 br_null_short(L0, Assembler::pt, PcOk); 375 STOP("last_Java_pc not zeroed before leaving Java"); 376 bind(PcOk); 377 378 // Verify that flags was zeroed on return to Java 379 Label FlagsOk; 380 ld(flags, L0); 381 tst(L0); 382 br(Assembler::zero, false, Assembler::pt, FlagsOk); 383 delayed() -> restore(); 384 STOP("flags not zeroed before leaving Java"); 385 bind(FlagsOk); 386 #endif /* ASSERT */ 387 // 388 // When returning from calling out from Java mode the frame anchor's last_Java_pc 389 // will always be set to NULL. It is set here so that if we are doing a call to 390 // native (not VM) that we capture the known pc and don't have to rely on the 391 // native call having a standard frame linkage where we can find the pc. 392 393 if (last_Java_pc->is_valid()) { 394 st_ptr(last_Java_pc, pc_addr); 395 } 396 397 #ifdef ASSERT 398 // Make sure that we have an odd stack 399 Label StackOk; 400 andcc(last_java_sp, 0x01, G0); 401 br(Assembler::notZero, false, Assembler::pt, StackOk); 402 delayed()->nop(); 403 STOP("Stack Not Biased in set_last_Java_frame"); 404 bind(StackOk); 405 #endif // ASSERT 406 assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame"); 407 add( last_java_sp, STACK_BIAS, G4_scratch ); 408 st_ptr(G4_scratch, G2_thread, JavaThread::last_Java_sp_offset()); 409 } 410 411 void MacroAssembler::reset_last_Java_frame(void) { 412 assert_not_delayed(); 413 414 Address sp_addr(G2_thread, JavaThread::last_Java_sp_offset()); 415 Address pc_addr(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); 416 Address flags (G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); 417 418 #ifdef ASSERT 419 // check that it WAS previously set 420 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod to helper frame 421 ld_ptr(sp_addr, L0); 422 tst(L0); 423 breakpoint_trap(Assembler::zero, Assembler::ptr_cc); 424 restore(); 425 #endif // ASSERT 426 427 st_ptr(G0, sp_addr); 428 // Always return last_Java_pc to zero 429 st_ptr(G0, pc_addr); 430 // Always null flags after return to Java 431 st(G0, flags); 432 } 433 434 435 void MacroAssembler::call_VM_base( 436 Register oop_result, 437 Register thread_cache, 438 Register last_java_sp, 439 address entry_point, 440 int number_of_arguments, 441 bool check_exceptions) 442 { 443 assert_not_delayed(); 444 445 // determine last_java_sp register 446 if (!last_java_sp->is_valid()) { 447 last_java_sp = SP; 448 } 449 // debugging support 450 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 451 452 // 64-bit last_java_sp is biased! 453 set_last_Java_frame(last_java_sp, noreg); 454 if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early 455 save_thread(thread_cache); 456 // do the call 457 call(entry_point, relocInfo::runtime_call_type); 458 if (!VerifyThread) 459 delayed()->mov(G2_thread, O0); // pass thread as first argument 460 else 461 delayed()->nop(); // (thread already passed) 462 restore_thread(thread_cache); 463 reset_last_Java_frame(); 464 465 // check for pending exceptions. use Gtemp as scratch register. 466 if (check_exceptions) { 467 check_and_forward_exception(Gtemp); 468 } 469 470 #ifdef ASSERT 471 set(badHeapWordVal, G3); 472 set(badHeapWordVal, G4); 473 set(badHeapWordVal, G5); 474 #endif 475 476 // get oop result if there is one and reset the value in the thread 477 if (oop_result->is_valid()) { 478 get_vm_result(oop_result); 479 } 480 } 481 482 void MacroAssembler::check_and_forward_exception(Register scratch_reg) 483 { 484 Label L; 485 486 check_and_handle_popframe(scratch_reg); 487 check_and_handle_earlyret(scratch_reg); 488 489 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 490 ld_ptr(exception_addr, scratch_reg); 491 br_null_short(scratch_reg, pt, L); 492 // we use O7 linkage so that forward_exception_entry has the issuing PC 493 call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 494 delayed()->nop(); 495 bind(L); 496 } 497 498 499 void MacroAssembler::check_and_handle_popframe(Register scratch_reg) { 500 } 501 502 503 void MacroAssembler::check_and_handle_earlyret(Register scratch_reg) { 504 } 505 506 507 void MacroAssembler::call_VM(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 508 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); 509 } 510 511 512 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) { 513 // O0 is reserved for the thread 514 mov(arg_1, O1); 515 call_VM(oop_result, entry_point, 1, check_exceptions); 516 } 517 518 519 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 520 // O0 is reserved for the thread 521 mov(arg_1, O1); 522 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 523 call_VM(oop_result, entry_point, 2, check_exceptions); 524 } 525 526 527 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { 528 // O0 is reserved for the thread 529 mov(arg_1, O1); 530 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 531 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); 532 call_VM(oop_result, entry_point, 3, check_exceptions); 533 } 534 535 536 537 // Note: The following call_VM overloadings are useful when a "save" 538 // has already been performed by a stub, and the last Java frame is 539 // the previous one. In that case, last_java_sp must be passed as FP 540 // instead of SP. 541 542 543 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments, bool check_exceptions) { 544 call_VM_base(oop_result, noreg, last_java_sp, entry_point, number_of_arguments, check_exceptions); 545 } 546 547 548 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) { 549 // O0 is reserved for the thread 550 mov(arg_1, O1); 551 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 552 } 553 554 555 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 556 // O0 is reserved for the thread 557 mov(arg_1, O1); 558 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 559 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 560 } 561 562 563 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { 564 // O0 is reserved for the thread 565 mov(arg_1, O1); 566 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 567 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); 568 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 569 } 570 571 572 573 void MacroAssembler::call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments) { 574 assert_not_delayed(); 575 save_thread(thread_cache); 576 // do the call 577 call(entry_point, relocInfo::runtime_call_type); 578 delayed()->nop(); 579 restore_thread(thread_cache); 580 #ifdef ASSERT 581 set(badHeapWordVal, G3); 582 set(badHeapWordVal, G4); 583 set(badHeapWordVal, G5); 584 #endif 585 } 586 587 588 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments) { 589 call_VM_leaf_base(thread_cache, entry_point, number_of_arguments); 590 } 591 592 593 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1) { 594 mov(arg_1, O0); 595 call_VM_leaf(thread_cache, entry_point, 1); 596 } 597 598 599 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) { 600 mov(arg_1, O0); 601 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); 602 call_VM_leaf(thread_cache, entry_point, 2); 603 } 604 605 606 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3) { 607 mov(arg_1, O0); 608 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); 609 mov(arg_3, O2); assert(arg_3 != O0 && arg_3 != O1, "smashed argument"); 610 call_VM_leaf(thread_cache, entry_point, 3); 611 } 612 613 614 void MacroAssembler::get_vm_result(Register oop_result) { 615 verify_thread(); 616 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 617 ld_ptr( vm_result_addr, oop_result); 618 st_ptr(G0, vm_result_addr); 619 verify_oop(oop_result); 620 } 621 622 623 void MacroAssembler::get_vm_result_2(Register metadata_result) { 624 verify_thread(); 625 Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset()); 626 ld_ptr(vm_result_addr_2, metadata_result); 627 st_ptr(G0, vm_result_addr_2); 628 } 629 630 631 // We require that C code which does not return a value in vm_result will 632 // leave it undisturbed. 633 void MacroAssembler::set_vm_result(Register oop_result) { 634 verify_thread(); 635 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 636 verify_oop(oop_result); 637 638 # ifdef ASSERT 639 // Check that we are not overwriting any other oop. 640 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod 641 ld_ptr(vm_result_addr, L0); 642 tst(L0); 643 restore(); 644 breakpoint_trap(notZero, Assembler::ptr_cc); 645 // } 646 # endif 647 648 st_ptr(oop_result, vm_result_addr); 649 } 650 651 652 void MacroAssembler::ic_call(address entry, bool emit_delay, jint method_index) { 653 RelocationHolder rspec = virtual_call_Relocation::spec(pc(), method_index); 654 patchable_set((intptr_t)Universe::non_oop_word(), G5_inline_cache_reg); 655 relocate(rspec); 656 call(entry, relocInfo::none); 657 if (emit_delay) { 658 delayed()->nop(); 659 } 660 } 661 662 void MacroAssembler::card_table_write(jbyte* byte_map_base, 663 Register tmp, Register obj) { 664 srlx(obj, CardTable::card_shift, obj); 665 assert(tmp != obj, "need separate temp reg"); 666 set((address) byte_map_base, tmp); 667 stb(G0, tmp, obj); 668 } 669 670 671 void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { 672 address save_pc; 673 int shiftcnt; 674 #ifdef VALIDATE_PIPELINE 675 assert_no_delay("Cannot put two instructions in delay-slot."); 676 #endif 677 v9_dep(); 678 save_pc = pc(); 679 680 int msb32 = (int) (addrlit.value() >> 32); 681 int lsb32 = (int) (addrlit.value()); 682 683 if (msb32 == 0 && lsb32 >= 0) { 684 Assembler::sethi(lsb32, d, addrlit.rspec()); 685 } 686 else if (msb32 == -1) { 687 Assembler::sethi(~lsb32, d, addrlit.rspec()); 688 xor3(d, ~low10(~0), d); 689 } 690 else { 691 Assembler::sethi(msb32, d, addrlit.rspec()); // msb 22-bits 692 if (msb32 & 0x3ff) // Any bits? 693 or3(d, msb32 & 0x3ff, d); // msb 32-bits are now in lsb 32 694 if (lsb32 & 0xFFFFFC00) { // done? 695 if ((lsb32 >> 20) & 0xfff) { // Any bits set? 696 sllx(d, 12, d); // Make room for next 12 bits 697 or3(d, (lsb32 >> 20) & 0xfff, d); // Or in next 12 698 shiftcnt = 0; // We already shifted 699 } 700 else 701 shiftcnt = 12; 702 if ((lsb32 >> 10) & 0x3ff) { 703 sllx(d, shiftcnt + 10, d); // Make room for last 10 bits 704 or3(d, (lsb32 >> 10) & 0x3ff, d); // Or in next 10 705 shiftcnt = 0; 706 } 707 else 708 shiftcnt = 10; 709 sllx(d, shiftcnt + 10, d); // Shift leaving disp field 0'd 710 } 711 else 712 sllx(d, 32, d); 713 } 714 // Pad out the instruction sequence so it can be patched later. 715 if (ForceRelocatable || (addrlit.rtype() != relocInfo::none && 716 addrlit.rtype() != relocInfo::runtime_call_type)) { 717 while (pc() < (save_pc + (7 * BytesPerInstWord))) 718 nop(); 719 } 720 } 721 722 723 void MacroAssembler::sethi(const AddressLiteral& addrlit, Register d) { 724 internal_sethi(addrlit, d, false); 725 } 726 727 728 void MacroAssembler::patchable_sethi(const AddressLiteral& addrlit, Register d) { 729 internal_sethi(addrlit, d, true); 730 } 731 732 733 int MacroAssembler::insts_for_sethi(address a, bool worst_case) { 734 if (worst_case) return 7; 735 intptr_t iaddr = (intptr_t) a; 736 int msb32 = (int) (iaddr >> 32); 737 int lsb32 = (int) (iaddr); 738 int count; 739 if (msb32 == 0 && lsb32 >= 0) 740 count = 1; 741 else if (msb32 == -1) 742 count = 2; 743 else { 744 count = 2; 745 if (msb32 & 0x3ff) 746 count++; 747 if (lsb32 & 0xFFFFFC00 ) { 748 if ((lsb32 >> 20) & 0xfff) count += 2; 749 if ((lsb32 >> 10) & 0x3ff) count += 2; 750 } 751 } 752 return count; 753 } 754 755 int MacroAssembler::worst_case_insts_for_set() { 756 return insts_for_sethi(NULL, true) + 1; 757 } 758 759 760 // Keep in sync with MacroAssembler::insts_for_internal_set 761 void MacroAssembler::internal_set(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { 762 intptr_t value = addrlit.value(); 763 764 if (!ForceRelocatable && addrlit.rspec().type() == relocInfo::none) { 765 // can optimize 766 if (-4096 <= value && value <= 4095) { 767 or3(G0, value, d); // setsw (this leaves upper 32 bits sign-extended) 768 return; 769 } 770 if (inv_hi22(hi22(value)) == value) { 771 sethi(addrlit, d); 772 return; 773 } 774 } 775 assert_no_delay("Cannot put two instructions in delay-slot."); 776 internal_sethi(addrlit, d, ForceRelocatable); 777 if (ForceRelocatable || addrlit.rspec().type() != relocInfo::none || addrlit.low10() != 0) { 778 add(d, addrlit.low10(), d, addrlit.rspec()); 779 } 780 } 781 782 // Keep in sync with MacroAssembler::internal_set 783 int MacroAssembler::insts_for_internal_set(intptr_t value) { 784 // can optimize 785 if (-4096 <= value && value <= 4095) { 786 return 1; 787 } 788 if (inv_hi22(hi22(value)) == value) { 789 return insts_for_sethi((address) value); 790 } 791 int count = insts_for_sethi((address) value); 792 AddressLiteral al(value); 793 if (al.low10() != 0) { 794 count++; 795 } 796 return count; 797 } 798 799 void MacroAssembler::set(const AddressLiteral& al, Register d) { 800 internal_set(al, d, false); 801 } 802 803 void MacroAssembler::set(intptr_t value, Register d) { 804 AddressLiteral al(value); 805 internal_set(al, d, false); 806 } 807 808 void MacroAssembler::set(address addr, Register d, RelocationHolder const& rspec) { 809 AddressLiteral al(addr, rspec); 810 internal_set(al, d, false); 811 } 812 813 void MacroAssembler::patchable_set(const AddressLiteral& al, Register d) { 814 internal_set(al, d, true); 815 } 816 817 void MacroAssembler::patchable_set(intptr_t value, Register d) { 818 AddressLiteral al(value); 819 internal_set(al, d, true); 820 } 821 822 823 void MacroAssembler::set64(jlong value, Register d, Register tmp) { 824 assert_not_delayed(); 825 v9_dep(); 826 827 int hi = (int)(value >> 32); 828 int lo = (int)(value & ~0); 829 int bits_33to2 = (int)((value >> 2) & ~0); 830 // (Matcher::isSimpleConstant64 knows about the following optimizations.) 831 if (Assembler::is_simm13(lo) && value == lo) { 832 or3(G0, lo, d); 833 } else if (hi == 0) { 834 Assembler::sethi(lo, d); // hardware version zero-extends to upper 32 835 if (low10(lo) != 0) 836 or3(d, low10(lo), d); 837 } 838 else if ((hi >> 2) == 0) { 839 Assembler::sethi(bits_33to2, d); // hardware version zero-extends to upper 32 840 sllx(d, 2, d); 841 if (low12(lo) != 0) 842 or3(d, low12(lo), d); 843 } 844 else if (hi == -1) { 845 Assembler::sethi(~lo, d); // hardware version zero-extends to upper 32 846 xor3(d, low10(lo) ^ ~low10(~0), d); 847 } 848 else if (lo == 0) { 849 if (Assembler::is_simm13(hi)) { 850 or3(G0, hi, d); 851 } else { 852 Assembler::sethi(hi, d); // hardware version zero-extends to upper 32 853 if (low10(hi) != 0) 854 or3(d, low10(hi), d); 855 } 856 sllx(d, 32, d); 857 } 858 else { 859 Assembler::sethi(hi, tmp); 860 Assembler::sethi(lo, d); // macro assembler version sign-extends 861 if (low10(hi) != 0) 862 or3 (tmp, low10(hi), tmp); 863 if (low10(lo) != 0) 864 or3 ( d, low10(lo), d); 865 sllx(tmp, 32, tmp); 866 or3 (d, tmp, d); 867 } 868 } 869 870 int MacroAssembler::insts_for_set64(jlong value) { 871 v9_dep(); 872 873 int hi = (int) (value >> 32); 874 int lo = (int) (value & ~0); 875 int count = 0; 876 877 // (Matcher::isSimpleConstant64 knows about the following optimizations.) 878 if (Assembler::is_simm13(lo) && value == lo) { 879 count++; 880 } else if (hi == 0) { 881 count++; 882 if (low10(lo) != 0) 883 count++; 884 } 885 else if (hi == -1) { 886 count += 2; 887 } 888 else if (lo == 0) { 889 if (Assembler::is_simm13(hi)) { 890 count++; 891 } else { 892 count++; 893 if (low10(hi) != 0) 894 count++; 895 } 896 count++; 897 } 898 else { 899 count += 2; 900 if (low10(hi) != 0) 901 count++; 902 if (low10(lo) != 0) 903 count++; 904 count += 2; 905 } 906 return count; 907 } 908 909 // compute size in bytes of sparc frame, given 910 // number of extraWords 911 int MacroAssembler::total_frame_size_in_bytes(int extraWords) { 912 913 int nWords = frame::memory_parameter_word_sp_offset; 914 915 nWords += extraWords; 916 917 if (nWords & 1) ++nWords; // round up to double-word 918 919 return nWords * BytesPerWord; 920 } 921 922 923 // save_frame: given number of "extra" words in frame, 924 // issue approp. save instruction (p 200, v8 manual) 925 926 void MacroAssembler::save_frame(int extraWords) { 927 int delta = -total_frame_size_in_bytes(extraWords); 928 if (is_simm13(delta)) { 929 save(SP, delta, SP); 930 } else { 931 set(delta, G3_scratch); 932 save(SP, G3_scratch, SP); 933 } 934 } 935 936 937 void MacroAssembler::save_frame_c1(int size_in_bytes) { 938 if (is_simm13(-size_in_bytes)) { 939 save(SP, -size_in_bytes, SP); 940 } else { 941 set(-size_in_bytes, G3_scratch); 942 save(SP, G3_scratch, SP); 943 } 944 } 945 946 947 void MacroAssembler::save_frame_and_mov(int extraWords, 948 Register s1, Register d1, 949 Register s2, Register d2) { 950 assert_not_delayed(); 951 952 // The trick here is to use precisely the same memory word 953 // that trap handlers also use to save the register. 954 // This word cannot be used for any other purpose, but 955 // it works fine to save the register's value, whether or not 956 // an interrupt flushes register windows at any given moment! 957 Address s1_addr; 958 if (s1->is_valid() && (s1->is_in() || s1->is_local())) { 959 s1_addr = s1->address_in_saved_window(); 960 st_ptr(s1, s1_addr); 961 } 962 963 Address s2_addr; 964 if (s2->is_valid() && (s2->is_in() || s2->is_local())) { 965 s2_addr = s2->address_in_saved_window(); 966 st_ptr(s2, s2_addr); 967 } 968 969 save_frame(extraWords); 970 971 if (s1_addr.base() == SP) { 972 ld_ptr(s1_addr.after_save(), d1); 973 } else if (s1->is_valid()) { 974 mov(s1->after_save(), d1); 975 } 976 977 if (s2_addr.base() == SP) { 978 ld_ptr(s2_addr.after_save(), d2); 979 } else if (s2->is_valid()) { 980 mov(s2->after_save(), d2); 981 } 982 } 983 984 985 AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) { 986 assert(oop_recorder() != NULL, "this assembler needs a Recorder"); 987 int index = oop_recorder()->allocate_metadata_index(obj); 988 RelocationHolder rspec = metadata_Relocation::spec(index); 989 return AddressLiteral((address)obj, rspec); 990 } 991 992 AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) { 993 assert(oop_recorder() != NULL, "this assembler needs a Recorder"); 994 int index = oop_recorder()->find_index(obj); 995 RelocationHolder rspec = metadata_Relocation::spec(index); 996 return AddressLiteral((address)obj, rspec); 997 } 998 999 1000 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) { 1001 #ifdef ASSERT 1002 { 1003 ThreadInVMfromUnknown tiv; 1004 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1005 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop"); 1006 } 1007 #endif 1008 int oop_index = oop_recorder()->find_index(obj); 1009 return AddressLiteral(obj, oop_Relocation::spec(oop_index)); 1010 } 1011 1012 void MacroAssembler::set_narrow_oop(jobject obj, Register d) { 1013 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1014 int oop_index = oop_recorder()->find_index(obj); 1015 RelocationHolder rspec = oop_Relocation::spec(oop_index); 1016 1017 assert_not_delayed(); 1018 // Relocation with special format (see relocInfo_sparc.hpp). 1019 relocate(rspec, 1); 1020 // Assembler::sethi(0x3fffff, d); 1021 emit_int32( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(0x3fffff) ); 1022 // Don't add relocation for 'add'. Do patching during 'sethi' processing. 1023 add(d, 0x3ff, d); 1024 1025 } 1026 1027 void MacroAssembler::set_narrow_klass(Klass* k, Register d) { 1028 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1029 int klass_index = oop_recorder()->find_index(k); 1030 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 1031 narrowOop encoded_k = Klass::encode_klass(k); 1032 1033 assert_not_delayed(); 1034 // Relocation with special format (see relocInfo_sparc.hpp). 1035 relocate(rspec, 1); 1036 // Assembler::sethi(encoded_k, d); 1037 emit_int32( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(encoded_k) ); 1038 // Don't add relocation for 'add'. Do patching during 'sethi' processing. 1039 add(d, low10(encoded_k), d); 1040 1041 } 1042 1043 void MacroAssembler::align(int modulus) { 1044 while (offset() % modulus != 0) nop(); 1045 } 1046 1047 void RegistersForDebugging::print(outputStream* s) { 1048 FlagSetting fs(Debugging, true); 1049 int j; 1050 for (j = 0; j < 8; ++j) { 1051 if (j != 6) { s->print("i%d = ", j); os::print_location(s, i[j]); } 1052 else { s->print( "fp = " ); os::print_location(s, i[j]); } 1053 } 1054 s->cr(); 1055 1056 for (j = 0; j < 8; ++j) { 1057 s->print("l%d = ", j); os::print_location(s, l[j]); 1058 } 1059 s->cr(); 1060 1061 for (j = 0; j < 8; ++j) { 1062 if (j != 6) { s->print("o%d = ", j); os::print_location(s, o[j]); } 1063 else { s->print( "sp = " ); os::print_location(s, o[j]); } 1064 } 1065 s->cr(); 1066 1067 for (j = 0; j < 8; ++j) { 1068 s->print("g%d = ", j); os::print_location(s, g[j]); 1069 } 1070 s->cr(); 1071 1072 // print out floats with compression 1073 for (j = 0; j < 32; ) { 1074 jfloat val = f[j]; 1075 int last = j; 1076 for ( ; last+1 < 32; ++last ) { 1077 char b1[1024], b2[1024]; 1078 sprintf(b1, "%f", val); 1079 sprintf(b2, "%f", f[last+1]); 1080 if (strcmp(b1, b2)) 1081 break; 1082 } 1083 s->print("f%d", j); 1084 if ( j != last ) s->print(" - f%d", last); 1085 s->print(" = %f", val); 1086 s->fill_to(25); 1087 s->print_cr(" (0x%x)", *(int*)&val); 1088 j = last + 1; 1089 } 1090 s->cr(); 1091 1092 // and doubles (evens only) 1093 for (j = 0; j < 32; ) { 1094 jdouble val = d[j]; 1095 int last = j; 1096 for ( ; last+1 < 32; ++last ) { 1097 char b1[1024], b2[1024]; 1098 sprintf(b1, "%f", val); 1099 sprintf(b2, "%f", d[last+1]); 1100 if (strcmp(b1, b2)) 1101 break; 1102 } 1103 s->print("d%d", 2 * j); 1104 if ( j != last ) s->print(" - d%d", last); 1105 s->print(" = %f", val); 1106 s->fill_to(30); 1107 s->print("(0x%x)", *(int*)&val); 1108 s->fill_to(42); 1109 s->print_cr("(0x%x)", *(1 + (int*)&val)); 1110 j = last + 1; 1111 } 1112 s->cr(); 1113 } 1114 1115 void RegistersForDebugging::save_registers(MacroAssembler* a) { 1116 a->sub(FP, align_up(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0); 1117 a->flushw(); 1118 int i; 1119 for (i = 0; i < 8; ++i) { 1120 a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, i_offset(i)); 1121 a->ld_ptr(as_lRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, l_offset(i)); 1122 a->st_ptr(as_oRegister(i)->after_save(), O0, o_offset(i)); 1123 a->st_ptr(as_gRegister(i)->after_save(), O0, g_offset(i)); 1124 } 1125 for (i = 0; i < 32; ++i) { 1126 a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i)); 1127 } 1128 for (i = 0; i < 64; i += 2) { 1129 a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i)); 1130 } 1131 } 1132 1133 void RegistersForDebugging::restore_registers(MacroAssembler* a, Register r) { 1134 for (int i = 1; i < 8; ++i) { 1135 a->ld_ptr(r, g_offset(i), as_gRegister(i)); 1136 } 1137 for (int j = 0; j < 32; ++j) { 1138 a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j)); 1139 } 1140 for (int k = 0; k < 64; k += 2) { 1141 a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k)); 1142 } 1143 } 1144 1145 1146 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack 1147 void MacroAssembler::push_fTOS() { 1148 // %%%%%% need to implement this 1149 } 1150 1151 // pops double TOS element from CPU stack and pushes on FPU stack 1152 void MacroAssembler::pop_fTOS() { 1153 // %%%%%% need to implement this 1154 } 1155 1156 void MacroAssembler::empty_FPU_stack() { 1157 // %%%%%% need to implement this 1158 } 1159 1160 void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * file, int line) { 1161 // plausibility check for oops 1162 if (!VerifyOops) return; 1163 1164 if (reg == G0) return; // always NULL, which is always an oop 1165 1166 BLOCK_COMMENT("verify_oop {"); 1167 char buffer[64]; 1168 #ifdef COMPILER1 1169 if (CommentedAssembly) { 1170 snprintf(buffer, sizeof(buffer), "verify_oop at %d", offset()); 1171 block_comment(buffer); 1172 } 1173 #endif 1174 1175 const char* real_msg = NULL; 1176 { 1177 ResourceMark rm; 1178 stringStream ss; 1179 ss.print("%s at offset %d (%s:%d)", msg, offset(), file, line); 1180 real_msg = code_string(ss.as_string()); 1181 } 1182 1183 // Call indirectly to solve generation ordering problem 1184 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address()); 1185 1186 // Make some space on stack above the current register window. 1187 // Enough to hold 8 64-bit registers. 1188 add(SP,-8*8,SP); 1189 1190 // Save some 64-bit registers; a normal 'save' chops the heads off 1191 // of 64-bit longs in the 32-bit build. 1192 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); 1193 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); 1194 mov(reg,O0); // Move arg into O0; arg might be in O7 which is about to be crushed 1195 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); 1196 1197 // Size of set() should stay the same 1198 patchable_set((intptr_t)real_msg, O1); 1199 // Load address to call to into O7 1200 load_ptr_contents(a, O7); 1201 // Register call to verify_oop_subroutine 1202 callr(O7, G0); 1203 delayed()->nop(); 1204 // recover frame size 1205 add(SP, 8*8,SP); 1206 BLOCK_COMMENT("} verify_oop"); 1207 } 1208 1209 void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char * file, int line) { 1210 // plausibility check for oops 1211 if (!VerifyOops) return; 1212 1213 const char* real_msg = NULL; 1214 { 1215 ResourceMark rm; 1216 stringStream ss; 1217 ss.print("%s at SP+%d (%s:%d)", msg, addr.disp(), file, line); 1218 real_msg = code_string(ss.as_string()); 1219 } 1220 1221 // Call indirectly to solve generation ordering problem 1222 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address()); 1223 1224 // Make some space on stack above the current register window. 1225 // Enough to hold 8 64-bit registers. 1226 add(SP,-8*8,SP); 1227 1228 // Save some 64-bit registers; a normal 'save' chops the heads off 1229 // of 64-bit longs in the 32-bit build. 1230 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); 1231 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); 1232 ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed 1233 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); 1234 1235 // Size of set() should stay the same 1236 patchable_set((intptr_t)real_msg, O1); 1237 // Load address to call to into O7 1238 load_ptr_contents(a, O7); 1239 // Register call to verify_oop_subroutine 1240 callr(O7, G0); 1241 delayed()->nop(); 1242 // recover frame size 1243 add(SP, 8*8,SP); 1244 } 1245 1246 // side-door communication with signalHandler in os_solaris.cpp 1247 address MacroAssembler::_verify_oop_implicit_branch[3] = { NULL }; 1248 1249 // This macro is expanded just once; it creates shared code. Contract: 1250 // receives an oop in O0. Must restore O0 & O7 from TLS. Must not smash ANY 1251 // registers, including flags. May not use a register 'save', as this blows 1252 // the high bits of the O-regs if they contain Long values. Acts as a 'leaf' 1253 // call. 1254 void MacroAssembler::verify_oop_subroutine() { 1255 // Leaf call; no frame. 1256 Label succeed, fail, null_or_fail; 1257 1258 // O0 and O7 were saved already (O0 in O0's TLS home, O7 in O5's TLS home). 1259 // O0 is now the oop to be checked. O7 is the return address. 1260 Register O0_obj = O0; 1261 1262 // Save some more registers for temps. 1263 stx(O2,SP,frame::register_save_words*wordSize+STACK_BIAS+2*8); 1264 stx(O3,SP,frame::register_save_words*wordSize+STACK_BIAS+3*8); 1265 stx(O4,SP,frame::register_save_words*wordSize+STACK_BIAS+4*8); 1266 stx(O5,SP,frame::register_save_words*wordSize+STACK_BIAS+5*8); 1267 1268 // Save flags 1269 Register O5_save_flags = O5; 1270 rdccr( O5_save_flags ); 1271 1272 { // count number of verifies 1273 Register O2_adr = O2; 1274 Register O3_accum = O3; 1275 inc_counter(StubRoutines::verify_oop_count_addr(), O2_adr, O3_accum); 1276 } 1277 1278 Register O2_mask = O2; 1279 Register O3_bits = O3; 1280 Register O4_temp = O4; 1281 1282 // mark lower end of faulting range 1283 assert(_verify_oop_implicit_branch[0] == NULL, "set once"); 1284 _verify_oop_implicit_branch[0] = pc(); 1285 1286 // We can't check the mark oop because it could be in the process of 1287 // locking or unlocking while this is running. 1288 set(Universe::verify_oop_mask (), O2_mask); 1289 set(Universe::verify_oop_bits (), O3_bits); 1290 1291 // assert((obj & oop_mask) == oop_bits); 1292 and3(O0_obj, O2_mask, O4_temp); 1293 cmp_and_brx_short(O4_temp, O3_bits, notEqual, pn, null_or_fail); 1294 1295 if ((NULL_WORD & Universe::verify_oop_mask()) == Universe::verify_oop_bits()) { 1296 // the null_or_fail case is useless; must test for null separately 1297 br_null_short(O0_obj, pn, succeed); 1298 } 1299 1300 // Check the Klass* of this object for being in the right area of memory. 1301 // Cannot do the load in the delay above slot in case O0 is null 1302 load_klass(O0_obj, O0_obj); 1303 // assert((klass != NULL) 1304 br_null_short(O0_obj, pn, fail); 1305 1306 wrccr( O5_save_flags ); // Restore CCR's 1307 1308 // mark upper end of faulting range 1309 _verify_oop_implicit_branch[1] = pc(); 1310 1311 //----------------------- 1312 // all tests pass 1313 bind(succeed); 1314 1315 // Restore prior 64-bit registers 1316 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+0*8,O0); 1317 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+1*8,O1); 1318 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+2*8,O2); 1319 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+3*8,O3); 1320 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+4*8,O4); 1321 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+5*8,O5); 1322 1323 retl(); // Leaf return; restore prior O7 in delay slot 1324 delayed()->ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+7*8,O7); 1325 1326 //----------------------- 1327 bind(null_or_fail); // nulls are less common but OK 1328 br_null(O0_obj, false, pt, succeed); 1329 delayed()->wrccr( O5_save_flags ); // Restore CCR's 1330 1331 //----------------------- 1332 // report failure: 1333 bind(fail); 1334 _verify_oop_implicit_branch[2] = pc(); 1335 1336 wrccr( O5_save_flags ); // Restore CCR's 1337 1338 save_frame(align_up(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1339 1340 // stop_subroutine expects message pointer in I1. 1341 mov(I1, O1); 1342 1343 // Restore prior 64-bit registers 1344 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+0*8,I0); 1345 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+1*8,I1); 1346 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+2*8,I2); 1347 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+3*8,I3); 1348 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+4*8,I4); 1349 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+5*8,I5); 1350 1351 // factor long stop-sequence into subroutine to save space 1352 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); 1353 1354 // call indirectly to solve generation ordering problem 1355 AddressLiteral al(StubRoutines::Sparc::stop_subroutine_entry_address()); 1356 load_ptr_contents(al, O5); 1357 jmpl(O5, 0, O7); 1358 delayed()->nop(); 1359 } 1360 1361 1362 void MacroAssembler::stop(const char* msg) { 1363 // save frame first to get O7 for return address 1364 // add one word to size in case struct is odd number of words long 1365 // It must be doubleword-aligned for storing doubles into it. 1366 1367 save_frame(align_up(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1368 1369 // stop_subroutine expects message pointer in I1. 1370 // Size of set() should stay the same 1371 patchable_set((intptr_t)msg, O1); 1372 1373 // factor long stop-sequence into subroutine to save space 1374 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); 1375 1376 // call indirectly to solve generation ordering problem 1377 AddressLiteral a(StubRoutines::Sparc::stop_subroutine_entry_address()); 1378 load_ptr_contents(a, O5); 1379 jmpl(O5, 0, O7); 1380 delayed()->nop(); 1381 1382 breakpoint_trap(); // make stop actually stop rather than writing 1383 // unnoticeable results in the output files. 1384 1385 // restore(); done in callee to save space! 1386 } 1387 1388 1389 void MacroAssembler::warn(const char* msg) { 1390 save_frame(align_up(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1391 RegistersForDebugging::save_registers(this); 1392 mov(O0, L0); 1393 // Size of set() should stay the same 1394 patchable_set((intptr_t)msg, O0); 1395 call( CAST_FROM_FN_PTR(address, warning) ); 1396 delayed()->nop(); 1397 // ret(); 1398 // delayed()->restore(); 1399 RegistersForDebugging::restore_registers(this, L0); 1400 restore(); 1401 } 1402 1403 1404 void MacroAssembler::untested(const char* what) { 1405 // We must be able to turn interactive prompting off 1406 // in order to run automated test scripts on the VM 1407 // Use the flag ShowMessageBoxOnError 1408 1409 const char* b = NULL; 1410 { 1411 ResourceMark rm; 1412 stringStream ss; 1413 ss.print("untested: %s", what); 1414 b = code_string(ss.as_string()); 1415 } 1416 if (ShowMessageBoxOnError) { STOP(b); } 1417 else { warn(b); } 1418 } 1419 1420 1421 void MacroAssembler::unimplemented(const char* what) { 1422 const char* buf = NULL; 1423 { 1424 ResourceMark rm; 1425 stringStream ss; 1426 ss.print("unimplemented: %s", what); 1427 buf = code_string(ss.as_string()); 1428 } 1429 stop(buf); 1430 } 1431 1432 1433 void MacroAssembler::stop_subroutine() { 1434 RegistersForDebugging::save_registers(this); 1435 1436 // for the sake of the debugger, stick a PC on the current frame 1437 // (this assumes that the caller has performed an extra "save") 1438 mov(I7, L7); 1439 add(O7, -7 * BytesPerInt, I7); 1440 1441 save_frame(); // one more save to free up another O7 register 1442 mov(I0, O1); // addr of reg save area 1443 1444 // We expect pointer to message in I1. Caller must set it up in O1 1445 mov(I1, O0); // get msg 1446 call (CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type); 1447 delayed()->nop(); 1448 1449 restore(); 1450 1451 RegistersForDebugging::restore_registers(this, O0); 1452 1453 save_frame(0); 1454 call(CAST_FROM_FN_PTR(address,breakpoint)); 1455 delayed()->nop(); 1456 restore(); 1457 1458 mov(L7, I7); 1459 retl(); 1460 delayed()->restore(); // see stop above 1461 } 1462 1463 1464 void MacroAssembler::debug(char* msg, RegistersForDebugging* regs) { 1465 if ( ShowMessageBoxOnError ) { 1466 JavaThread* thread = JavaThread::current(); 1467 JavaThreadState saved_state = thread->thread_state(); 1468 thread->set_thread_state(_thread_in_vm); 1469 { 1470 // In order to get locks work, we need to fake a in_VM state 1471 ttyLocker ttyl; 1472 ::tty->print_cr("EXECUTION STOPPED: %s\n", msg); 1473 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 1474 BytecodeCounter::print(); 1475 } 1476 if (os::message_box(msg, "Execution stopped, print registers?")) 1477 regs->print(::tty); 1478 } 1479 BREAKPOINT; 1480 ThreadStateTransition::transition(JavaThread::current(), _thread_in_vm, saved_state); 1481 } 1482 else { 1483 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg); 1484 } 1485 assert(false, "DEBUG MESSAGE: %s", msg); 1486 } 1487 1488 1489 void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresult) { 1490 subcc( Rparam_words, Argument::n_register_parameters, Rresult); // how many mem words? 1491 Label no_extras; 1492 br( negative, true, pt, no_extras ); // if neg, clear reg 1493 delayed()->set(0, Rresult); // annuled, so only if taken 1494 bind( no_extras ); 1495 } 1496 1497 1498 void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) { 1499 add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult); 1500 bclr(1, Rresult); 1501 sll(Rresult, LogBytesPerWord, Rresult); // Rresult has total frame bytes 1502 } 1503 1504 1505 void MacroAssembler::calc_frame_size_and_save(Register Rextra_words, Register Rresult) { 1506 calc_frame_size(Rextra_words, Rresult); 1507 neg(Rresult); 1508 save(SP, Rresult, SP); 1509 } 1510 1511 1512 // --------------------------------------------------------- 1513 Assembler::RCondition cond2rcond(Assembler::Condition c) { 1514 switch (c) { 1515 /*case zero: */ 1516 case Assembler::equal: return Assembler::rc_z; 1517 case Assembler::lessEqual: return Assembler::rc_lez; 1518 case Assembler::less: return Assembler::rc_lz; 1519 /*case notZero:*/ 1520 case Assembler::notEqual: return Assembler::rc_nz; 1521 case Assembler::greater: return Assembler::rc_gz; 1522 case Assembler::greaterEqual: return Assembler::rc_gez; 1523 } 1524 ShouldNotReachHere(); 1525 return Assembler::rc_z; 1526 } 1527 1528 // compares (32 bit) register with zero and branches. NOT FOR USE WITH 64-bit POINTERS 1529 void MacroAssembler::cmp_zero_and_br(Condition c, Register s1, Label& L, bool a, Predict p) { 1530 tst(s1); 1531 br (c, a, p, L); 1532 } 1533 1534 // Compares a pointer register with zero and branches on null. 1535 // Does a test & branch on 32-bit systems and a register-branch on 64-bit. 1536 void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) { 1537 assert_not_delayed(); 1538 bpr( rc_z, a, p, s1, L ); 1539 } 1540 1541 void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) { 1542 assert_not_delayed(); 1543 bpr( rc_nz, a, p, s1, L ); 1544 } 1545 1546 // Compare registers and branch with nop in delay slot or cbcond without delay slot. 1547 1548 // Compare integer (32 bit) values (icc only). 1549 void MacroAssembler::cmp_and_br_short(Register s1, Register s2, Condition c, 1550 Predict p, Label& L) { 1551 assert_not_delayed(); 1552 if (use_cbcond(L)) { 1553 Assembler::cbcond(c, icc, s1, s2, L); 1554 } else { 1555 cmp(s1, s2); 1556 br(c, false, p, L); 1557 delayed()->nop(); 1558 } 1559 } 1560 1561 // Compare integer (32 bit) values (icc only). 1562 void MacroAssembler::cmp_and_br_short(Register s1, int simm13a, Condition c, 1563 Predict p, Label& L) { 1564 assert_not_delayed(); 1565 if (is_simm(simm13a,5) && use_cbcond(L)) { 1566 Assembler::cbcond(c, icc, s1, simm13a, L); 1567 } else { 1568 cmp(s1, simm13a); 1569 br(c, false, p, L); 1570 delayed()->nop(); 1571 } 1572 } 1573 1574 // Branch that tests xcc in LP64 and icc in !LP64 1575 void MacroAssembler::cmp_and_brx_short(Register s1, Register s2, Condition c, 1576 Predict p, Label& L) { 1577 assert_not_delayed(); 1578 if (use_cbcond(L)) { 1579 Assembler::cbcond(c, ptr_cc, s1, s2, L); 1580 } else { 1581 cmp(s1, s2); 1582 brx(c, false, p, L); 1583 delayed()->nop(); 1584 } 1585 } 1586 1587 // Branch that tests xcc in LP64 and icc in !LP64 1588 void MacroAssembler::cmp_and_brx_short(Register s1, int simm13a, Condition c, 1589 Predict p, Label& L) { 1590 assert_not_delayed(); 1591 if (is_simm(simm13a,5) && use_cbcond(L)) { 1592 Assembler::cbcond(c, ptr_cc, s1, simm13a, L); 1593 } else { 1594 cmp(s1, simm13a); 1595 brx(c, false, p, L); 1596 delayed()->nop(); 1597 } 1598 } 1599 1600 // Short branch version for compares a pointer with zero. 1601 1602 void MacroAssembler::br_null_short(Register s1, Predict p, Label& L) { 1603 assert_not_delayed(); 1604 if (use_cbcond(L)) { 1605 Assembler::cbcond(zero, ptr_cc, s1, 0, L); 1606 } else { 1607 br_null(s1, false, p, L); 1608 delayed()->nop(); 1609 } 1610 } 1611 1612 void MacroAssembler::br_notnull_short(Register s1, Predict p, Label& L) { 1613 assert_not_delayed(); 1614 if (use_cbcond(L)) { 1615 Assembler::cbcond(notZero, ptr_cc, s1, 0, L); 1616 } else { 1617 br_notnull(s1, false, p, L); 1618 delayed()->nop(); 1619 } 1620 } 1621 1622 // Unconditional short branch 1623 void MacroAssembler::ba_short(Label& L) { 1624 assert_not_delayed(); 1625 if (use_cbcond(L)) { 1626 Assembler::cbcond(equal, icc, G0, G0, L); 1627 } else { 1628 br(always, false, pt, L); 1629 delayed()->nop(); 1630 } 1631 } 1632 1633 // Branch if 'icc' says zero or not (i.e. icc.z == 1|0). 1634 1635 void MacroAssembler::br_icc_zero(bool iszero, Predict p, Label &L) { 1636 assert_not_delayed(); 1637 Condition cf = (iszero ? Assembler::zero : Assembler::notZero); 1638 br(cf, false, p, L); 1639 delayed()->nop(); 1640 } 1641 1642 // instruction sequences factored across compiler & interpreter 1643 1644 1645 void MacroAssembler::lcmp( Register Ra_hi, Register Ra_low, 1646 Register Rb_hi, Register Rb_low, 1647 Register Rresult) { 1648 1649 Label check_low_parts, done; 1650 1651 cmp(Ra_hi, Rb_hi ); // compare hi parts 1652 br(equal, true, pt, check_low_parts); 1653 delayed()->cmp(Ra_low, Rb_low); // test low parts 1654 1655 // And, with an unsigned comparison, it does not matter if the numbers 1656 // are negative or not. 1657 // E.g., -2 cmp -1: the low parts are 0xfffffffe and 0xffffffff. 1658 // The second one is bigger (unsignedly). 1659 1660 // Other notes: The first move in each triplet can be unconditional 1661 // (and therefore probably prefetchable). 1662 // And the equals case for the high part does not need testing, 1663 // since that triplet is reached only after finding the high halves differ. 1664 1665 mov(-1, Rresult); 1666 ba(done); 1667 delayed()->movcc(greater, false, icc, 1, Rresult); 1668 1669 bind(check_low_parts); 1670 1671 mov( -1, Rresult); 1672 movcc(equal, false, icc, 0, Rresult); 1673 movcc(greaterUnsigned, false, icc, 1, Rresult); 1674 1675 bind(done); 1676 } 1677 1678 void MacroAssembler::lneg( Register Rhi, Register Rlow ) { 1679 subcc( G0, Rlow, Rlow ); 1680 subc( G0, Rhi, Rhi ); 1681 } 1682 1683 void MacroAssembler::lshl( Register Rin_high, Register Rin_low, 1684 Register Rcount, 1685 Register Rout_high, Register Rout_low, 1686 Register Rtemp ) { 1687 1688 1689 Register Ralt_count = Rtemp; 1690 Register Rxfer_bits = Rtemp; 1691 1692 assert( Ralt_count != Rin_high 1693 && Ralt_count != Rin_low 1694 && Ralt_count != Rcount 1695 && Rxfer_bits != Rin_low 1696 && Rxfer_bits != Rin_high 1697 && Rxfer_bits != Rcount 1698 && Rxfer_bits != Rout_low 1699 && Rout_low != Rin_high, 1700 "register alias checks"); 1701 1702 Label big_shift, done; 1703 1704 // This code can be optimized to use the 64 bit shifts in V9. 1705 // Here we use the 32 bit shifts. 1706 1707 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1708 subcc(Rcount, 31, Ralt_count); 1709 br(greater, true, pn, big_shift); 1710 delayed()->dec(Ralt_count); 1711 1712 // shift < 32 bits, Ralt_count = Rcount-31 1713 1714 // We get the transfer bits by shifting right by 32-count the low 1715 // register. This is done by shifting right by 31-count and then by one 1716 // more to take care of the special (rare) case where count is zero 1717 // (shifting by 32 would not work). 1718 1719 neg(Ralt_count); 1720 1721 // The order of the next two instructions is critical in the case where 1722 // Rin and Rout are the same and should not be reversed. 1723 1724 srl(Rin_low, Ralt_count, Rxfer_bits); // shift right by 31-count 1725 if (Rcount != Rout_low) { 1726 sll(Rin_low, Rcount, Rout_low); // low half 1727 } 1728 sll(Rin_high, Rcount, Rout_high); 1729 if (Rcount == Rout_low) { 1730 sll(Rin_low, Rcount, Rout_low); // low half 1731 } 1732 srl(Rxfer_bits, 1, Rxfer_bits ); // shift right by one more 1733 ba(done); 1734 delayed()->or3(Rout_high, Rxfer_bits, Rout_high); // new hi value: or in shifted old hi part and xfer from low 1735 1736 // shift >= 32 bits, Ralt_count = Rcount-32 1737 bind(big_shift); 1738 sll(Rin_low, Ralt_count, Rout_high ); 1739 clr(Rout_low); 1740 1741 bind(done); 1742 } 1743 1744 1745 void MacroAssembler::lshr( Register Rin_high, Register Rin_low, 1746 Register Rcount, 1747 Register Rout_high, Register Rout_low, 1748 Register Rtemp ) { 1749 1750 Register Ralt_count = Rtemp; 1751 Register Rxfer_bits = Rtemp; 1752 1753 assert( Ralt_count != Rin_high 1754 && Ralt_count != Rin_low 1755 && Ralt_count != Rcount 1756 && Rxfer_bits != Rin_low 1757 && Rxfer_bits != Rin_high 1758 && Rxfer_bits != Rcount 1759 && Rxfer_bits != Rout_high 1760 && Rout_high != Rin_low, 1761 "register alias checks"); 1762 1763 Label big_shift, done; 1764 1765 // This code can be optimized to use the 64 bit shifts in V9. 1766 // Here we use the 32 bit shifts. 1767 1768 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1769 subcc(Rcount, 31, Ralt_count); 1770 br(greater, true, pn, big_shift); 1771 delayed()->dec(Ralt_count); 1772 1773 // shift < 32 bits, Ralt_count = Rcount-31 1774 1775 // We get the transfer bits by shifting left by 32-count the high 1776 // register. This is done by shifting left by 31-count and then by one 1777 // more to take care of the special (rare) case where count is zero 1778 // (shifting by 32 would not work). 1779 1780 neg(Ralt_count); 1781 if (Rcount != Rout_low) { 1782 srl(Rin_low, Rcount, Rout_low); 1783 } 1784 1785 // The order of the next two instructions is critical in the case where 1786 // Rin and Rout are the same and should not be reversed. 1787 1788 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count 1789 sra(Rin_high, Rcount, Rout_high ); // high half 1790 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more 1791 if (Rcount == Rout_low) { 1792 srl(Rin_low, Rcount, Rout_low); 1793 } 1794 ba(done); 1795 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high 1796 1797 // shift >= 32 bits, Ralt_count = Rcount-32 1798 bind(big_shift); 1799 1800 sra(Rin_high, Ralt_count, Rout_low); 1801 sra(Rin_high, 31, Rout_high); // sign into hi 1802 1803 bind( done ); 1804 } 1805 1806 1807 1808 void MacroAssembler::lushr( Register Rin_high, Register Rin_low, 1809 Register Rcount, 1810 Register Rout_high, Register Rout_low, 1811 Register Rtemp ) { 1812 1813 Register Ralt_count = Rtemp; 1814 Register Rxfer_bits = Rtemp; 1815 1816 assert( Ralt_count != Rin_high 1817 && Ralt_count != Rin_low 1818 && Ralt_count != Rcount 1819 && Rxfer_bits != Rin_low 1820 && Rxfer_bits != Rin_high 1821 && Rxfer_bits != Rcount 1822 && Rxfer_bits != Rout_high 1823 && Rout_high != Rin_low, 1824 "register alias checks"); 1825 1826 Label big_shift, done; 1827 1828 // This code can be optimized to use the 64 bit shifts in V9. 1829 // Here we use the 32 bit shifts. 1830 1831 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1832 subcc(Rcount, 31, Ralt_count); 1833 br(greater, true, pn, big_shift); 1834 delayed()->dec(Ralt_count); 1835 1836 // shift < 32 bits, Ralt_count = Rcount-31 1837 1838 // We get the transfer bits by shifting left by 32-count the high 1839 // register. This is done by shifting left by 31-count and then by one 1840 // more to take care of the special (rare) case where count is zero 1841 // (shifting by 32 would not work). 1842 1843 neg(Ralt_count); 1844 if (Rcount != Rout_low) { 1845 srl(Rin_low, Rcount, Rout_low); 1846 } 1847 1848 // The order of the next two instructions is critical in the case where 1849 // Rin and Rout are the same and should not be reversed. 1850 1851 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count 1852 srl(Rin_high, Rcount, Rout_high ); // high half 1853 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more 1854 if (Rcount == Rout_low) { 1855 srl(Rin_low, Rcount, Rout_low); 1856 } 1857 ba(done); 1858 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high 1859 1860 // shift >= 32 bits, Ralt_count = Rcount-32 1861 bind(big_shift); 1862 1863 srl(Rin_high, Ralt_count, Rout_low); 1864 clr(Rout_high); 1865 1866 bind( done ); 1867 } 1868 1869 void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) { 1870 cmp(Ra, Rb); 1871 mov(-1, Rresult); 1872 movcc(equal, false, xcc, 0, Rresult); 1873 movcc(greater, false, xcc, 1, Rresult); 1874 } 1875 1876 1877 void MacroAssembler::load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed) { 1878 switch (size_in_bytes) { 1879 case 8: ld_long(src, dst); break; 1880 case 4: ld( src, dst); break; 1881 case 2: is_signed ? ldsh(src, dst) : lduh(src, dst); break; 1882 case 1: is_signed ? ldsb(src, dst) : ldub(src, dst); break; 1883 default: ShouldNotReachHere(); 1884 } 1885 } 1886 1887 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) { 1888 switch (size_in_bytes) { 1889 case 8: st_long(src, dst); break; 1890 case 4: st( src, dst); break; 1891 case 2: sth( src, dst); break; 1892 case 1: stb( src, dst); break; 1893 default: ShouldNotReachHere(); 1894 } 1895 } 1896 1897 1898 void MacroAssembler::float_cmp( bool is_float, int unordered_result, 1899 FloatRegister Fa, FloatRegister Fb, 1900 Register Rresult) { 1901 if (is_float) { 1902 fcmp(FloatRegisterImpl::S, fcc0, Fa, Fb); 1903 } else { 1904 fcmp(FloatRegisterImpl::D, fcc0, Fa, Fb); 1905 } 1906 1907 if (unordered_result == 1) { 1908 mov( -1, Rresult); 1909 movcc(f_equal, true, fcc0, 0, Rresult); 1910 movcc(f_unorderedOrGreater, true, fcc0, 1, Rresult); 1911 } else { 1912 mov( -1, Rresult); 1913 movcc(f_equal, true, fcc0, 0, Rresult); 1914 movcc(f_greater, true, fcc0, 1, Rresult); 1915 } 1916 } 1917 1918 1919 void MacroAssembler::save_all_globals_into_locals() { 1920 mov(G1,L1); 1921 mov(G2,L2); 1922 mov(G3,L3); 1923 mov(G4,L4); 1924 mov(G5,L5); 1925 mov(G6,L6); 1926 mov(G7,L7); 1927 } 1928 1929 void MacroAssembler::restore_globals_from_locals() { 1930 mov(L1,G1); 1931 mov(L2,G2); 1932 mov(L3,G3); 1933 mov(L4,G4); 1934 mov(L5,G5); 1935 mov(L6,G6); 1936 mov(L7,G7); 1937 } 1938 1939 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, 1940 Register tmp, 1941 int offset) { 1942 intptr_t value = *delayed_value_addr; 1943 if (value != 0) 1944 return RegisterOrConstant(value + offset); 1945 1946 // load indirectly to solve generation ordering problem 1947 AddressLiteral a(delayed_value_addr); 1948 load_ptr_contents(a, tmp); 1949 1950 #ifdef ASSERT 1951 tst(tmp); 1952 breakpoint_trap(zero, xcc); 1953 #endif 1954 1955 if (offset != 0) 1956 add(tmp, offset, tmp); 1957 1958 return RegisterOrConstant(tmp); 1959 } 1960 1961 1962 RegisterOrConstant MacroAssembler::regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 1963 assert(d.register_or_noreg() != G0, "lost side effect"); 1964 if ((s2.is_constant() && s2.as_constant() == 0) || 1965 (s2.is_register() && s2.as_register() == G0)) { 1966 // Do nothing, just move value. 1967 if (s1.is_register()) { 1968 if (d.is_constant()) d = temp; 1969 mov(s1.as_register(), d.as_register()); 1970 return d; 1971 } else { 1972 return s1; 1973 } 1974 } 1975 1976 if (s1.is_register()) { 1977 assert_different_registers(s1.as_register(), temp); 1978 if (d.is_constant()) d = temp; 1979 andn(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 1980 return d; 1981 } else { 1982 if (s2.is_register()) { 1983 assert_different_registers(s2.as_register(), temp); 1984 if (d.is_constant()) d = temp; 1985 set(s1.as_constant(), temp); 1986 andn(temp, s2.as_register(), d.as_register()); 1987 return d; 1988 } else { 1989 intptr_t res = s1.as_constant() & ~s2.as_constant(); 1990 return res; 1991 } 1992 } 1993 } 1994 1995 RegisterOrConstant MacroAssembler::regcon_inc_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 1996 assert(d.register_or_noreg() != G0, "lost side effect"); 1997 if ((s2.is_constant() && s2.as_constant() == 0) || 1998 (s2.is_register() && s2.as_register() == G0)) { 1999 // Do nothing, just move value. 2000 if (s1.is_register()) { 2001 if (d.is_constant()) d = temp; 2002 mov(s1.as_register(), d.as_register()); 2003 return d; 2004 } else { 2005 return s1; 2006 } 2007 } 2008 2009 if (s1.is_register()) { 2010 assert_different_registers(s1.as_register(), temp); 2011 if (d.is_constant()) d = temp; 2012 add(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 2013 return d; 2014 } else { 2015 if (s2.is_register()) { 2016 assert_different_registers(s2.as_register(), temp); 2017 if (d.is_constant()) d = temp; 2018 add(s2.as_register(), ensure_simm13_or_reg(s1, temp), d.as_register()); 2019 return d; 2020 } else { 2021 intptr_t res = s1.as_constant() + s2.as_constant(); 2022 return res; 2023 } 2024 } 2025 } 2026 2027 RegisterOrConstant MacroAssembler::regcon_sll_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 2028 assert(d.register_or_noreg() != G0, "lost side effect"); 2029 if (!is_simm13(s2.constant_or_zero())) 2030 s2 = (s2.as_constant() & 0xFF); 2031 if ((s2.is_constant() && s2.as_constant() == 0) || 2032 (s2.is_register() && s2.as_register() == G0)) { 2033 // Do nothing, just move value. 2034 if (s1.is_register()) { 2035 if (d.is_constant()) d = temp; 2036 mov(s1.as_register(), d.as_register()); 2037 return d; 2038 } else { 2039 return s1; 2040 } 2041 } 2042 2043 if (s1.is_register()) { 2044 assert_different_registers(s1.as_register(), temp); 2045 if (d.is_constant()) d = temp; 2046 sll_ptr(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 2047 return d; 2048 } else { 2049 if (s2.is_register()) { 2050 assert_different_registers(s2.as_register(), temp); 2051 if (d.is_constant()) d = temp; 2052 set(s1.as_constant(), temp); 2053 sll_ptr(temp, s2.as_register(), d.as_register()); 2054 return d; 2055 } else { 2056 intptr_t res = s1.as_constant() << s2.as_constant(); 2057 return res; 2058 } 2059 } 2060 } 2061 2062 2063 // Look up the method for a megamorphic invokeinterface call. 2064 // The target method is determined by <intf_klass, itable_index>. 2065 // The receiver klass is in recv_klass. 2066 // On success, the result will be in method_result, and execution falls through. 2067 // On failure, execution transfers to the given label. 2068 void MacroAssembler::lookup_interface_method(Register recv_klass, 2069 Register intf_klass, 2070 RegisterOrConstant itable_index, 2071 Register method_result, 2072 Register scan_temp, 2073 Register sethi_temp, 2074 Label& L_no_such_interface, 2075 bool return_method) { 2076 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp); 2077 assert(!return_method || itable_index.is_constant() || itable_index.as_register() == method_result, 2078 "caller must use same register for non-constant itable index as for method"); 2079 2080 Label L_no_such_interface_restore; 2081 bool did_save = false; 2082 if (scan_temp == noreg || sethi_temp == noreg) { 2083 Register recv_2 = recv_klass->is_global() ? recv_klass : L0; 2084 Register intf_2 = intf_klass->is_global() ? intf_klass : L1; 2085 assert(method_result->is_global(), "must be able to return value"); 2086 scan_temp = L2; 2087 sethi_temp = L3; 2088 save_frame_and_mov(0, recv_klass, recv_2, intf_klass, intf_2); 2089 recv_klass = recv_2; 2090 intf_klass = intf_2; 2091 did_save = true; 2092 } 2093 2094 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 2095 int vtable_base = in_bytes(Klass::vtable_start_offset()); 2096 int scan_step = itableOffsetEntry::size() * wordSize; 2097 int vte_size = vtableEntry::size_in_bytes(); 2098 2099 lduw(recv_klass, in_bytes(Klass::vtable_length_offset()), scan_temp); 2100 // %%% We should store the aligned, prescaled offset in the klassoop. 2101 // Then the next several instructions would fold away. 2102 2103 int itb_offset = vtable_base; 2104 int itb_scale = exact_log2(vtableEntry::size_in_bytes()); 2105 sll(scan_temp, itb_scale, scan_temp); 2106 add(scan_temp, itb_offset, scan_temp); 2107 add(recv_klass, scan_temp, scan_temp); 2108 2109 if (return_method) { 2110 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 2111 RegisterOrConstant itable_offset = itable_index; 2112 itable_offset = regcon_sll_ptr(itable_index, exact_log2(itableMethodEntry::size() * wordSize), itable_offset); 2113 itable_offset = regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes(), itable_offset); 2114 add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass); 2115 } 2116 2117 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) { 2118 // if (scan->interface() == intf) { 2119 // result = (klass + scan->offset() + itable_index); 2120 // } 2121 // } 2122 Label L_search, L_found_method; 2123 2124 for (int peel = 1; peel >= 0; peel--) { 2125 // %%%% Could load both offset and interface in one ldx, if they were 2126 // in the opposite order. This would save a load. 2127 ld_ptr(scan_temp, itableOffsetEntry::interface_offset_in_bytes(), method_result); 2128 2129 // Check that this entry is non-null. A null entry means that 2130 // the receiver class doesn't implement the interface, and wasn't the 2131 // same as when the caller was compiled. 2132 bpr(Assembler::rc_z, false, Assembler::pn, method_result, did_save ? L_no_such_interface_restore : L_no_such_interface); 2133 delayed()->cmp(method_result, intf_klass); 2134 2135 if (peel) { 2136 brx(Assembler::equal, false, Assembler::pt, L_found_method); 2137 } else { 2138 brx(Assembler::notEqual, false, Assembler::pn, L_search); 2139 // (invert the test to fall through to found_method...) 2140 } 2141 delayed()->add(scan_temp, scan_step, scan_temp); 2142 2143 if (!peel) break; 2144 2145 bind(L_search); 2146 } 2147 2148 bind(L_found_method); 2149 2150 if (return_method) { 2151 // Got a hit. 2152 int ito_offset = itableOffsetEntry::offset_offset_in_bytes(); 2153 // scan_temp[-scan_step] points to the vtable offset we need 2154 ito_offset -= scan_step; 2155 lduw(scan_temp, ito_offset, scan_temp); 2156 ld_ptr(recv_klass, scan_temp, method_result); 2157 } 2158 2159 if (did_save) { 2160 Label L_done; 2161 ba(L_done); 2162 delayed()->restore(); 2163 2164 bind(L_no_such_interface_restore); 2165 ba(L_no_such_interface); 2166 delayed()->restore(); 2167 2168 bind(L_done); 2169 } 2170 } 2171 2172 2173 // virtual method calling 2174 void MacroAssembler::lookup_virtual_method(Register recv_klass, 2175 RegisterOrConstant vtable_index, 2176 Register method_result) { 2177 assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg()); 2178 Register sethi_temp = method_result; 2179 const int base = in_bytes(Klass::vtable_start_offset()) + 2180 // method pointer offset within the vtable entry: 2181 vtableEntry::method_offset_in_bytes(); 2182 RegisterOrConstant vtable_offset = vtable_index; 2183 // Each of the following three lines potentially generates an instruction. 2184 // But the total number of address formation instructions will always be 2185 // at most two, and will often be zero. In any case, it will be optimal. 2186 // If vtable_index is a register, we will have (sll_ptr N,x; inc_ptr B,x; ld_ptr k,x). 2187 // If vtable_index is a constant, we will have at most (set B+X<<N,t; ld_ptr k,t). 2188 vtable_offset = regcon_sll_ptr(vtable_index, exact_log2(vtableEntry::size_in_bytes()), vtable_offset); 2189 vtable_offset = regcon_inc_ptr(vtable_offset, base, vtable_offset, sethi_temp); 2190 Address vtable_entry_addr(recv_klass, ensure_simm13_or_reg(vtable_offset, sethi_temp)); 2191 ld_ptr(vtable_entry_addr, method_result); 2192 } 2193 2194 2195 void MacroAssembler::check_klass_subtype(Register sub_klass, 2196 Register super_klass, 2197 Register temp_reg, 2198 Register temp2_reg, 2199 Label& L_success) { 2200 Register sub_2 = sub_klass; 2201 Register sup_2 = super_klass; 2202 if (!sub_2->is_global()) sub_2 = L0; 2203 if (!sup_2->is_global()) sup_2 = L1; 2204 bool did_save = false; 2205 if (temp_reg == noreg || temp2_reg == noreg) { 2206 temp_reg = L2; 2207 temp2_reg = L3; 2208 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); 2209 sub_klass = sub_2; 2210 super_klass = sup_2; 2211 did_save = true; 2212 } 2213 Label L_failure, L_pop_to_failure, L_pop_to_success; 2214 check_klass_subtype_fast_path(sub_klass, super_klass, 2215 temp_reg, temp2_reg, 2216 (did_save ? &L_pop_to_success : &L_success), 2217 (did_save ? &L_pop_to_failure : &L_failure), NULL); 2218 2219 if (!did_save) 2220 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); 2221 check_klass_subtype_slow_path(sub_2, sup_2, 2222 L2, L3, L4, L5, 2223 NULL, &L_pop_to_failure); 2224 2225 // on success: 2226 bind(L_pop_to_success); 2227 restore(); 2228 ba_short(L_success); 2229 2230 // on failure: 2231 bind(L_pop_to_failure); 2232 restore(); 2233 bind(L_failure); 2234 } 2235 2236 2237 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 2238 Register super_klass, 2239 Register temp_reg, 2240 Register temp2_reg, 2241 Label* L_success, 2242 Label* L_failure, 2243 Label* L_slow_path, 2244 RegisterOrConstant super_check_offset) { 2245 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 2246 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2247 2248 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 2249 bool need_slow_path = (must_load_sco || 2250 super_check_offset.constant_or_zero() == sco_offset); 2251 2252 assert_different_registers(sub_klass, super_klass, temp_reg); 2253 if (super_check_offset.is_register()) { 2254 assert_different_registers(sub_klass, super_klass, temp_reg, 2255 super_check_offset.as_register()); 2256 } else if (must_load_sco) { 2257 assert(temp2_reg != noreg, "supply either a temp or a register offset"); 2258 } 2259 2260 Label L_fallthrough; 2261 int label_nulls = 0; 2262 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 2263 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 2264 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } 2265 assert(label_nulls <= 1 || 2266 (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path), 2267 "at most one NULL in the batch, usually"); 2268 2269 // If the pointers are equal, we are done (e.g., String[] elements). 2270 // This self-check enables sharing of secondary supertype arrays among 2271 // non-primary types such as array-of-interface. Otherwise, each such 2272 // type would need its own customized SSA. 2273 // We move this check to the front of the fast path because many 2274 // type checks are in fact trivially successful in this manner, 2275 // so we get a nicely predicted branch right at the start of the check. 2276 cmp(super_klass, sub_klass); 2277 brx(Assembler::equal, false, Assembler::pn, *L_success); 2278 delayed()->nop(); 2279 2280 // Check the supertype display: 2281 if (must_load_sco) { 2282 // The super check offset is always positive... 2283 lduw(super_klass, sco_offset, temp2_reg); 2284 super_check_offset = RegisterOrConstant(temp2_reg); 2285 // super_check_offset is register. 2286 assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset.as_register()); 2287 } 2288 ld_ptr(sub_klass, super_check_offset, temp_reg); 2289 cmp(super_klass, temp_reg); 2290 2291 // This check has worked decisively for primary supers. 2292 // Secondary supers are sought in the super_cache ('super_cache_addr'). 2293 // (Secondary supers are interfaces and very deeply nested subtypes.) 2294 // This works in the same check above because of a tricky aliasing 2295 // between the super_cache and the primary super display elements. 2296 // (The 'super_check_addr' can address either, as the case requires.) 2297 // Note that the cache is updated below if it does not help us find 2298 // what we need immediately. 2299 // So if it was a primary super, we can just fail immediately. 2300 // Otherwise, it's the slow path for us (no success at this point). 2301 2302 // Hacked ba(), which may only be used just before L_fallthrough. 2303 #define FINAL_JUMP(label) \ 2304 if (&(label) != &L_fallthrough) { \ 2305 ba(label); delayed()->nop(); \ 2306 } 2307 2308 if (super_check_offset.is_register()) { 2309 brx(Assembler::equal, false, Assembler::pn, *L_success); 2310 delayed()->cmp(super_check_offset.as_register(), sc_offset); 2311 2312 if (L_failure == &L_fallthrough) { 2313 brx(Assembler::equal, false, Assembler::pt, *L_slow_path); 2314 delayed()->nop(); 2315 } else { 2316 brx(Assembler::notEqual, false, Assembler::pn, *L_failure); 2317 delayed()->nop(); 2318 FINAL_JUMP(*L_slow_path); 2319 } 2320 } else if (super_check_offset.as_constant() == sc_offset) { 2321 // Need a slow path; fast failure is impossible. 2322 if (L_slow_path == &L_fallthrough) { 2323 brx(Assembler::equal, false, Assembler::pt, *L_success); 2324 delayed()->nop(); 2325 } else { 2326 brx(Assembler::notEqual, false, Assembler::pn, *L_slow_path); 2327 delayed()->nop(); 2328 FINAL_JUMP(*L_success); 2329 } 2330 } else { 2331 // No slow path; it's a fast decision. 2332 if (L_failure == &L_fallthrough) { 2333 brx(Assembler::equal, false, Assembler::pt, *L_success); 2334 delayed()->nop(); 2335 } else { 2336 brx(Assembler::notEqual, false, Assembler::pn, *L_failure); 2337 delayed()->nop(); 2338 FINAL_JUMP(*L_success); 2339 } 2340 } 2341 2342 bind(L_fallthrough); 2343 2344 #undef FINAL_JUMP 2345 } 2346 2347 2348 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 2349 Register super_klass, 2350 Register count_temp, 2351 Register scan_temp, 2352 Register scratch_reg, 2353 Register coop_reg, 2354 Label* L_success, 2355 Label* L_failure) { 2356 assert_different_registers(sub_klass, super_klass, 2357 count_temp, scan_temp, scratch_reg, coop_reg); 2358 2359 Label L_fallthrough, L_loop; 2360 int label_nulls = 0; 2361 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 2362 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 2363 assert(label_nulls <= 1, "at most one NULL in the batch"); 2364 2365 // a couple of useful fields in sub_klass: 2366 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 2367 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 2368 2369 // Do a linear scan of the secondary super-klass chain. 2370 // This code is rarely used, so simplicity is a virtue here. 2371 2372 #ifndef PRODUCT 2373 int* pst_counter = &SharedRuntime::_partial_subtype_ctr; 2374 inc_counter((address) pst_counter, count_temp, scan_temp); 2375 #endif 2376 2377 // We will consult the secondary-super array. 2378 ld_ptr(sub_klass, ss_offset, scan_temp); 2379 2380 Register search_key = super_klass; 2381 2382 // Load the array length. (Positive movl does right thing on LP64.) 2383 lduw(scan_temp, Array<Klass*>::length_offset_in_bytes(), count_temp); 2384 2385 // Check for empty secondary super list 2386 tst(count_temp); 2387 2388 // In the array of super classes elements are pointer sized. 2389 int element_size = wordSize; 2390 2391 // Top of search loop 2392 bind(L_loop); 2393 br(Assembler::equal, false, Assembler::pn, *L_failure); 2394 delayed()->add(scan_temp, element_size, scan_temp); 2395 2396 // Skip the array header in all array accesses. 2397 int elem_offset = Array<Klass*>::base_offset_in_bytes(); 2398 elem_offset -= element_size; // the scan pointer was pre-incremented also 2399 2400 // Load next super to check 2401 ld_ptr( scan_temp, elem_offset, scratch_reg ); 2402 2403 // Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list 2404 cmp(scratch_reg, search_key); 2405 2406 // A miss means we are NOT a subtype and need to keep looping 2407 brx(Assembler::notEqual, false, Assembler::pn, L_loop); 2408 delayed()->deccc(count_temp); // decrement trip counter in delay slot 2409 2410 // Success. Cache the super we found and proceed in triumph. 2411 st_ptr(super_klass, sub_klass, sc_offset); 2412 2413 if (L_success != &L_fallthrough) { 2414 ba(*L_success); 2415 delayed()->nop(); 2416 } 2417 2418 bind(L_fallthrough); 2419 } 2420 2421 2422 RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot, 2423 Register temp_reg, 2424 int extra_slot_offset) { 2425 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 2426 int stackElementSize = Interpreter::stackElementSize; 2427 int offset = extra_slot_offset * stackElementSize; 2428 if (arg_slot.is_constant()) { 2429 offset += arg_slot.as_constant() * stackElementSize; 2430 return offset; 2431 } else { 2432 assert(temp_reg != noreg, "must specify"); 2433 sll_ptr(arg_slot.as_register(), exact_log2(stackElementSize), temp_reg); 2434 if (offset != 0) 2435 add(temp_reg, offset, temp_reg); 2436 return temp_reg; 2437 } 2438 } 2439 2440 2441 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 2442 Register temp_reg, 2443 int extra_slot_offset) { 2444 return Address(Gargs, argument_offset(arg_slot, temp_reg, extra_slot_offset)); 2445 } 2446 2447 2448 void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg, 2449 Register temp_reg, 2450 Label& done, Label* slow_case, 2451 BiasedLockingCounters* counters) { 2452 assert(UseBiasedLocking, "why call this otherwise?"); 2453 2454 if (PrintBiasedLockingStatistics) { 2455 assert_different_registers(obj_reg, mark_reg, temp_reg, O7); 2456 if (counters == NULL) 2457 counters = BiasedLocking::counters(); 2458 } 2459 2460 Label cas_label; 2461 2462 // Biased locking 2463 // See whether the lock is currently biased toward our thread and 2464 // whether the epoch is still valid 2465 // Note that the runtime guarantees sufficient alignment of JavaThread 2466 // pointers to allow age to be placed into low bits 2467 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); 2468 and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg); 2469 cmp_and_brx_short(temp_reg, markOopDesc::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label); 2470 2471 load_klass(obj_reg, temp_reg); 2472 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2473 or3(G2_thread, temp_reg, temp_reg); 2474 xor3(mark_reg, temp_reg, temp_reg); 2475 andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg); 2476 if (counters != NULL) { 2477 cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg); 2478 // Reload mark_reg as we may need it later 2479 ld_ptr(Address(obj_reg, oopDesc::mark_offset_in_bytes()), mark_reg); 2480 } 2481 brx(Assembler::equal, true, Assembler::pt, done); 2482 delayed()->nop(); 2483 2484 Label try_revoke_bias; 2485 Label try_rebias; 2486 Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes()); 2487 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2488 2489 // At this point we know that the header has the bias pattern and 2490 // that we are not the bias owner in the current epoch. We need to 2491 // figure out more details about the state of the header in order to 2492 // know what operations can be legally performed on the object's 2493 // header. 2494 2495 // If the low three bits in the xor result aren't clear, that means 2496 // the prototype header is no longer biased and we have to revoke 2497 // the bias on this object. 2498 btst(markOopDesc::biased_lock_mask_in_place, temp_reg); 2499 brx(Assembler::notZero, false, Assembler::pn, try_revoke_bias); 2500 2501 // Biasing is still enabled for this data type. See whether the 2502 // epoch of the current bias is still valid, meaning that the epoch 2503 // bits of the mark word are equal to the epoch bits of the 2504 // prototype header. (Note that the prototype header's epoch bits 2505 // only change at a safepoint.) If not, attempt to rebias the object 2506 // toward the current thread. Note that we must be absolutely sure 2507 // that the current epoch is invalid in order to do this because 2508 // otherwise the manipulations it performs on the mark word are 2509 // illegal. 2510 delayed()->btst(markOopDesc::epoch_mask_in_place, temp_reg); 2511 brx(Assembler::notZero, false, Assembler::pn, try_rebias); 2512 2513 // The epoch of the current bias is still valid but we know nothing 2514 // about the owner; it might be set or it might be clear. Try to 2515 // acquire the bias of the object using an atomic operation. If this 2516 // fails we will go in to the runtime to revoke the object's bias. 2517 // Note that we first construct the presumed unbiased header so we 2518 // don't accidentally blow away another thread's valid bias. 2519 delayed()->and3(mark_reg, 2520 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place, 2521 mark_reg); 2522 or3(G2_thread, mark_reg, temp_reg); 2523 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2524 // If the biasing toward our thread failed, this means that 2525 // another thread succeeded in biasing it toward itself and we 2526 // need to revoke that bias. The revocation will occur in the 2527 // interpreter runtime in the slow case. 2528 cmp(mark_reg, temp_reg); 2529 if (counters != NULL) { 2530 cond_inc(Assembler::zero, (address) counters->anonymously_biased_lock_entry_count_addr(), mark_reg, temp_reg); 2531 } 2532 if (slow_case != NULL) { 2533 brx(Assembler::notEqual, true, Assembler::pn, *slow_case); 2534 delayed()->nop(); 2535 } 2536 ba_short(done); 2537 2538 bind(try_rebias); 2539 // At this point we know the epoch has expired, meaning that the 2540 // current "bias owner", if any, is actually invalid. Under these 2541 // circumstances _only_, we are allowed to use the current header's 2542 // value as the comparison value when doing the cas to acquire the 2543 // bias in the current epoch. In other words, we allow transfer of 2544 // the bias from one thread to another directly in this situation. 2545 // 2546 // FIXME: due to a lack of registers we currently blow away the age 2547 // bits in this situation. Should attempt to preserve them. 2548 load_klass(obj_reg, temp_reg); 2549 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2550 or3(G2_thread, temp_reg, temp_reg); 2551 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2552 // If the biasing toward our thread failed, this means that 2553 // another thread succeeded in biasing it toward itself and we 2554 // need to revoke that bias. The revocation will occur in the 2555 // interpreter runtime in the slow case. 2556 cmp(mark_reg, temp_reg); 2557 if (counters != NULL) { 2558 cond_inc(Assembler::zero, (address) counters->rebiased_lock_entry_count_addr(), mark_reg, temp_reg); 2559 } 2560 if (slow_case != NULL) { 2561 brx(Assembler::notEqual, true, Assembler::pn, *slow_case); 2562 delayed()->nop(); 2563 } 2564 ba_short(done); 2565 2566 bind(try_revoke_bias); 2567 // The prototype mark in the klass doesn't have the bias bit set any 2568 // more, indicating that objects of this data type are not supposed 2569 // to be biased any more. We are going to try to reset the mark of 2570 // this object to the prototype value and fall through to the 2571 // CAS-based locking scheme. Note that if our CAS fails, it means 2572 // that another thread raced us for the privilege of revoking the 2573 // bias of this particular object, so it's okay to continue in the 2574 // normal locking code. 2575 // 2576 // FIXME: due to a lack of registers we currently blow away the age 2577 // bits in this situation. Should attempt to preserve them. 2578 load_klass(obj_reg, temp_reg); 2579 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2580 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2581 // Fall through to the normal CAS-based lock, because no matter what 2582 // the result of the above CAS, some thread must have succeeded in 2583 // removing the bias bit from the object's header. 2584 if (counters != NULL) { 2585 cmp(mark_reg, temp_reg); 2586 cond_inc(Assembler::zero, (address) counters->revoked_lock_entry_count_addr(), mark_reg, temp_reg); 2587 } 2588 2589 bind(cas_label); 2590 } 2591 2592 void MacroAssembler::biased_locking_exit (Address mark_addr, Register temp_reg, Label& done, 2593 bool allow_delay_slot_filling) { 2594 // Check for biased locking unlock case, which is a no-op 2595 // Note: we do not have to check the thread ID for two reasons. 2596 // First, the interpreter checks for IllegalMonitorStateException at 2597 // a higher level. Second, if the bias was revoked while we held the 2598 // lock, the object could not be rebiased toward another thread, so 2599 // the bias bit would be clear. 2600 ld_ptr(mark_addr, temp_reg); 2601 and3(temp_reg, markOopDesc::biased_lock_mask_in_place, temp_reg); 2602 cmp(temp_reg, markOopDesc::biased_lock_pattern); 2603 brx(Assembler::equal, allow_delay_slot_filling, Assembler::pt, done); 2604 delayed(); 2605 if (!allow_delay_slot_filling) { 2606 nop(); 2607 } 2608 } 2609 2610 2611 // compiler_lock_object() and compiler_unlock_object() are direct transliterations 2612 // of i486.ad fast_lock() and fast_unlock(). See those methods for detailed comments. 2613 // The code could be tightened up considerably. 2614 // 2615 // box->dhw disposition - post-conditions at DONE_LABEL. 2616 // - Successful inflated lock: box->dhw != 0. 2617 // Any non-zero value suffices. 2618 // Consider G2_thread, rsp, boxReg, or markOopDesc::unused_mark() 2619 // - Successful Stack-lock: box->dhw == mark. 2620 // box->dhw must contain the displaced mark word value 2621 // - Failure -- icc.ZFlag == 0 and box->dhw is undefined. 2622 // The slow-path fast_enter() and slow_enter() operators 2623 // are responsible for setting box->dhw = NonZero (typically markOopDesc::unused_mark()). 2624 // - Biased: box->dhw is undefined 2625 // 2626 // SPARC refworkload performance - specifically jetstream and scimark - are 2627 // extremely sensitive to the size of the code emitted by compiler_lock_object 2628 // and compiler_unlock_object. Critically, the key factor is code size, not path 2629 // length. (Simply experiments to pad CLO with unexecuted NOPs demonstrte the 2630 // effect). 2631 2632 2633 void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark, 2634 Register Rbox, Register Rscratch, 2635 BiasedLockingCounters* counters, 2636 bool try_bias) { 2637 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); 2638 2639 verify_oop(Roop); 2640 Label done ; 2641 2642 if (counters != NULL) { 2643 inc_counter((address) counters->total_entry_count_addr(), Rmark, Rscratch); 2644 } 2645 2646 if (EmitSync & 1) { 2647 mov(3, Rscratch); 2648 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2649 cmp(SP, G0); 2650 return ; 2651 } 2652 2653 if (EmitSync & 2) { 2654 2655 // Fetch object's markword 2656 ld_ptr(mark_addr, Rmark); 2657 2658 if (try_bias) { 2659 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 2660 } 2661 2662 // Save Rbox in Rscratch to be used for the cas operation 2663 mov(Rbox, Rscratch); 2664 2665 // set Rmark to markOop | markOopDesc::unlocked_value 2666 or3(Rmark, markOopDesc::unlocked_value, Rmark); 2667 2668 // Initialize the box. (Must happen before we update the object mark!) 2669 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2670 2671 // compare object markOop with Rmark and if equal exchange Rscratch with object markOop 2672 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2673 cas_ptr(mark_addr.base(), Rmark, Rscratch); 2674 2675 // if compare/exchange succeeded we found an unlocked object and we now have locked it 2676 // hence we are done 2677 cmp(Rmark, Rscratch); 2678 sub(Rscratch, STACK_BIAS, Rscratch); 2679 brx(Assembler::equal, false, Assembler::pt, done); 2680 delayed()->sub(Rscratch, SP, Rscratch); //pull next instruction into delay slot 2681 2682 // we did not find an unlocked object so see if this is a recursive case 2683 // sub(Rscratch, SP, Rscratch); 2684 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 2685 andcc(Rscratch, 0xfffff003, Rscratch); 2686 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2687 bind (done); 2688 return ; 2689 } 2690 2691 Label Egress ; 2692 2693 if (EmitSync & 256) { 2694 Label IsInflated ; 2695 2696 ld_ptr(mark_addr, Rmark); // fetch obj->mark 2697 // Triage: biased, stack-locked, neutral, inflated 2698 if (try_bias) { 2699 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 2700 // Invariant: if control reaches this point in the emitted stream 2701 // then Rmark has not been modified. 2702 } 2703 2704 // Store mark into displaced mark field in the on-stack basic-lock "box" 2705 // Critically, this must happen before the CAS 2706 // Maximize the ST-CAS distance to minimize the ST-before-CAS penalty. 2707 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2708 andcc(Rmark, 2, G0); 2709 brx(Assembler::notZero, false, Assembler::pn, IsInflated); 2710 delayed()-> 2711 2712 // Try stack-lock acquisition. 2713 // Beware: the 1st instruction is in a delay slot 2714 mov(Rbox, Rscratch); 2715 or3(Rmark, markOopDesc::unlocked_value, Rmark); 2716 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2717 cas_ptr(mark_addr.base(), Rmark, Rscratch); 2718 cmp(Rmark, Rscratch); 2719 brx(Assembler::equal, false, Assembler::pt, done); 2720 delayed()->sub(Rscratch, SP, Rscratch); 2721 2722 // Stack-lock attempt failed - check for recursive stack-lock. 2723 // See the comments below about how we might remove this case. 2724 sub(Rscratch, STACK_BIAS, Rscratch); 2725 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 2726 andcc(Rscratch, 0xfffff003, Rscratch); 2727 br(Assembler::always, false, Assembler::pt, done); 2728 delayed()-> st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2729 2730 bind(IsInflated); 2731 if (EmitSync & 64) { 2732 // If m->owner != null goto IsLocked 2733 // Pessimistic form: Test-and-CAS vs CAS 2734 // The optimistic form avoids RTS->RTO cache line upgrades. 2735 ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rscratch); 2736 andcc(Rscratch, Rscratch, G0); 2737 brx(Assembler::notZero, false, Assembler::pn, done); 2738 delayed()->nop(); 2739 // m->owner == null : it's unlocked. 2740 } 2741 2742 // Try to CAS m->owner from null to Self 2743 // Invariant: if we acquire the lock then _recursions should be 0. 2744 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2745 mov(G2_thread, Rscratch); 2746 cas_ptr(Rmark, G0, Rscratch); 2747 cmp(Rscratch, G0); 2748 // Intentional fall-through into done 2749 } else { 2750 // Aggressively avoid the Store-before-CAS penalty 2751 // Defer the store into box->dhw until after the CAS 2752 Label IsInflated, Recursive ; 2753 2754 // Anticipate CAS -- Avoid RTS->RTO upgrade 2755 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); 2756 2757 ld_ptr(mark_addr, Rmark); // fetch obj->mark 2758 // Triage: biased, stack-locked, neutral, inflated 2759 2760 if (try_bias) { 2761 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 2762 // Invariant: if control reaches this point in the emitted stream 2763 // then Rmark has not been modified. 2764 } 2765 andcc(Rmark, 2, G0); 2766 brx(Assembler::notZero, false, Assembler::pn, IsInflated); 2767 delayed()-> // Beware - dangling delay-slot 2768 2769 // Try stack-lock acquisition. 2770 // Transiently install BUSY (0) encoding in the mark word. 2771 // if the CAS of 0 into the mark was successful then we execute: 2772 // ST box->dhw = mark -- save fetched mark in on-stack basiclock box 2773 // ST obj->mark = box -- overwrite transient 0 value 2774 // This presumes TSO, of course. 2775 2776 mov(0, Rscratch); 2777 or3(Rmark, markOopDesc::unlocked_value, Rmark); 2778 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2779 cas_ptr(mark_addr.base(), Rmark, Rscratch); 2780 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); 2781 cmp(Rscratch, Rmark); 2782 brx(Assembler::notZero, false, Assembler::pn, Recursive); 2783 delayed()->st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2784 if (counters != NULL) { 2785 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); 2786 } 2787 ba(done); 2788 delayed()->st_ptr(Rbox, mark_addr); 2789 2790 bind(Recursive); 2791 // Stack-lock attempt failed - check for recursive stack-lock. 2792 // Tests show that we can remove the recursive case with no impact 2793 // on refworkload 0.83. If we need to reduce the size of the code 2794 // emitted by compiler_lock_object() the recursive case is perfect 2795 // candidate. 2796 // 2797 // A more extreme idea is to always inflate on stack-lock recursion. 2798 // This lets us eliminate the recursive checks in compiler_lock_object 2799 // and compiler_unlock_object and the (box->dhw == 0) encoding. 2800 // A brief experiment - requiring changes to synchronizer.cpp, interpreter, 2801 // and showed a performance *increase*. In the same experiment I eliminated 2802 // the fast-path stack-lock code from the interpreter and always passed 2803 // control to the "slow" operators in synchronizer.cpp. 2804 2805 // RScratch contains the fetched obj->mark value from the failed CAS. 2806 sub(Rscratch, STACK_BIAS, Rscratch); 2807 sub(Rscratch, SP, Rscratch); 2808 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 2809 andcc(Rscratch, 0xfffff003, Rscratch); 2810 if (counters != NULL) { 2811 // Accounting needs the Rscratch register 2812 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2813 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); 2814 ba_short(done); 2815 } else { 2816 ba(done); 2817 delayed()->st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2818 } 2819 2820 bind (IsInflated); 2821 2822 // Try to CAS m->owner from null to Self 2823 // Invariant: if we acquire the lock then _recursions should be 0. 2824 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2825 mov(G2_thread, Rscratch); 2826 cas_ptr(Rmark, G0, Rscratch); 2827 andcc(Rscratch, Rscratch, G0); // set ICCs for done: icc.zf iff success 2828 // set icc.zf : 1=success 0=failure 2829 // ST box->displaced_header = NonZero. 2830 // Any non-zero value suffices: 2831 // markOopDesc::unused_mark(), G2_thread, RBox, RScratch, rsp, etc. 2832 st_ptr(Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2833 // Intentional fall-through into done 2834 } 2835 2836 bind (done); 2837 } 2838 2839 void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark, 2840 Register Rbox, Register Rscratch, 2841 bool try_bias) { 2842 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); 2843 2844 Label done ; 2845 2846 if (EmitSync & 4) { 2847 cmp(SP, G0); 2848 return ; 2849 } 2850 2851 if (EmitSync & 8) { 2852 if (try_bias) { 2853 biased_locking_exit(mark_addr, Rscratch, done); 2854 } 2855 2856 // Test first if it is a fast recursive unlock 2857 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rmark); 2858 br_null_short(Rmark, Assembler::pt, done); 2859 2860 // Check if it is still a light weight lock, this is is true if we see 2861 // the stack address of the basicLock in the markOop of the object 2862 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2863 cas_ptr(mark_addr.base(), Rbox, Rmark); 2864 ba(done); 2865 delayed()->cmp(Rbox, Rmark); 2866 bind(done); 2867 return ; 2868 } 2869 2870 // Beware ... If the aggregate size of the code emitted by CLO and CUO is 2871 // is too large performance rolls abruptly off a cliff. 2872 // This could be related to inlining policies, code cache management, or 2873 // I$ effects. 2874 Label LStacked ; 2875 2876 if (try_bias) { 2877 // TODO: eliminate redundant LDs of obj->mark 2878 biased_locking_exit(mark_addr, Rscratch, done); 2879 } 2880 2881 ld_ptr(Roop, oopDesc::mark_offset_in_bytes(), Rmark); 2882 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rscratch); 2883 andcc(Rscratch, Rscratch, G0); 2884 brx(Assembler::zero, false, Assembler::pn, done); 2885 delayed()->nop(); // consider: relocate fetch of mark, above, into this DS 2886 andcc(Rmark, 2, G0); 2887 brx(Assembler::zero, false, Assembler::pt, LStacked); 2888 delayed()->nop(); 2889 2890 // It's inflated 2891 // Conceptually we need a #loadstore|#storestore "release" MEMBAR before 2892 // the ST of 0 into _owner which releases the lock. This prevents loads 2893 // and stores within the critical section from reordering (floating) 2894 // past the store that releases the lock. But TSO is a strong memory model 2895 // and that particular flavor of barrier is a noop, so we can safely elide it. 2896 // Note that we use 1-0 locking by default for the inflated case. We 2897 // close the resultant (and rare) race by having contended threads in 2898 // monitorenter periodically poll _owner. 2899 2900 if (EmitSync & 1024) { 2901 // Emit code to check that _owner == Self 2902 // We could fold the _owner test into subsequent code more efficiently 2903 // than using a stand-alone check, but since _owner checking is off by 2904 // default we don't bother. We also might consider predicating the 2905 // _owner==Self check on Xcheck:jni or running on a debug build. 2906 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), Rscratch); 2907 orcc(Rscratch, G0, G0); 2908 brx(Assembler::notZero, false, Assembler::pn, done); 2909 delayed()->nop(); 2910 } 2911 2912 if (EmitSync & 512) { 2913 // classic lock release code absent 1-0 locking 2914 // m->Owner = null; 2915 // membar #storeload 2916 // if (m->cxq|m->EntryList) == null goto Success 2917 // if (m->succ != null) goto Success 2918 // if CAS (&m->Owner,0,Self) != 0 goto Success 2919 // goto SlowPath 2920 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), Rbox); 2921 orcc(Rbox, G0, G0); 2922 brx(Assembler::notZero, false, Assembler::pn, done); 2923 delayed()->nop(); 2924 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2925 if (os::is_MP()) { membar(StoreLoad); } 2926 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)), Rscratch); 2927 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)), Rbox); 2928 orcc(Rbox, Rscratch, G0); 2929 brx(Assembler::zero, false, Assembler::pt, done); 2930 delayed()-> 2931 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch); 2932 andcc(Rscratch, Rscratch, G0); 2933 brx(Assembler::notZero, false, Assembler::pt, done); 2934 delayed()->andcc(G0, G0, G0); 2935 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2936 mov(G2_thread, Rscratch); 2937 cas_ptr(Rmark, G0, Rscratch); 2938 cmp(Rscratch, G0); 2939 // invert icc.zf and goto done 2940 brx(Assembler::notZero, false, Assembler::pt, done); 2941 delayed()->cmp(G0, G0); 2942 br(Assembler::always, false, Assembler::pt, done); 2943 delayed()->cmp(G0, 1); 2944 } else { 2945 // 1-0 form : avoids CAS and MEMBAR in the common case 2946 // Do not bother to ratify that m->Owner == Self. 2947 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), Rbox); 2948 orcc(Rbox, G0, G0); 2949 brx(Assembler::notZero, false, Assembler::pn, done); 2950 delayed()-> 2951 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)), Rscratch); 2952 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)), Rbox); 2953 orcc(Rbox, Rscratch, G0); 2954 if (EmitSync & 16384) { 2955 // As an optional optimization, if (EntryList|cxq) != null and _succ is null then 2956 // we should transfer control directly to the slow-path. 2957 // This test makes the reacquire operation below very infrequent. 2958 // The logic is equivalent to : 2959 // if (cxq|EntryList) == null : Owner=null; goto Success 2960 // if succ == null : goto SlowPath 2961 // Owner=null; membar #storeload 2962 // if succ != null : goto Success 2963 // if CAS(&Owner,null,Self) != null goto Success 2964 // goto SlowPath 2965 brx(Assembler::zero, true, Assembler::pt, done); 2966 delayed()-> 2967 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2968 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch); 2969 andcc(Rscratch, Rscratch, G0) ; 2970 brx(Assembler::zero, false, Assembler::pt, done); 2971 delayed()->orcc(G0, 1, G0); 2972 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2973 } else { 2974 brx(Assembler::zero, false, Assembler::pt, done); 2975 delayed()-> 2976 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2977 } 2978 if (os::is_MP()) { membar(StoreLoad); } 2979 // Check that _succ is (or remains) non-zero 2980 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch); 2981 andcc(Rscratch, Rscratch, G0); 2982 brx(Assembler::notZero, false, Assembler::pt, done); 2983 delayed()->andcc(G0, G0, G0); 2984 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2985 mov(G2_thread, Rscratch); 2986 cas_ptr(Rmark, G0, Rscratch); 2987 cmp(Rscratch, G0); 2988 // invert icc.zf and goto done 2989 // A slightly better v8+/v9 idiom would be the following: 2990 // movrnz Rscratch,1,Rscratch 2991 // ba done 2992 // xorcc Rscratch,1,G0 2993 // In v8+ mode the idiom would be valid IFF Rscratch was a G or O register 2994 brx(Assembler::notZero, false, Assembler::pt, done); 2995 delayed()->cmp(G0, G0); 2996 br(Assembler::always, false, Assembler::pt, done); 2997 delayed()->cmp(G0, 1); 2998 } 2999 3000 bind (LStacked); 3001 // Consider: we could replace the expensive CAS in the exit 3002 // path with a simple ST of the displaced mark value fetched from 3003 // the on-stack basiclock box. That admits a race where a thread T2 3004 // in the slow lock path -- inflating with monitor M -- could race a 3005 // thread T1 in the fast unlock path, resulting in a missed wakeup for T2. 3006 // More precisely T1 in the stack-lock unlock path could "stomp" the 3007 // inflated mark value M installed by T2, resulting in an orphan 3008 // object monitor M and T2 becoming stranded. We can remedy that situation 3009 // by having T2 periodically poll the object's mark word using timed wait 3010 // operations. If T2 discovers that a stomp has occurred it vacates 3011 // the monitor M and wakes any other threads stranded on the now-orphan M. 3012 // In addition the monitor scavenger, which performs deflation, 3013 // would also need to check for orpan monitors and stranded threads. 3014 // 3015 // Finally, inflation is also used when T2 needs to assign a hashCode 3016 // to O and O is stack-locked by T1. The "stomp" race could cause 3017 // an assigned hashCode value to be lost. We can avoid that condition 3018 // and provide the necessary hashCode stability invariants by ensuring 3019 // that hashCode generation is idempotent between copying GCs. 3020 // For example we could compute the hashCode of an object O as 3021 // O's heap address XOR some high quality RNG value that is refreshed 3022 // at GC-time. The monitor scavenger would install the hashCode 3023 // found in any orphan monitors. Again, the mechanism admits a 3024 // lost-update "stomp" WAW race but detects and recovers as needed. 3025 // 3026 // A prototype implementation showed excellent results, although 3027 // the scavenger and timeout code was rather involved. 3028 3029 cas_ptr(mark_addr.base(), Rbox, Rscratch); 3030 cmp(Rbox, Rscratch); 3031 // Intentional fall through into done ... 3032 3033 bind(done); 3034 } 3035 3036 3037 3038 void MacroAssembler::print_CPU_state() { 3039 // %%%%% need to implement this 3040 } 3041 3042 void MacroAssembler::verify_FPU(int stack_depth, const char* s) { 3043 // %%%%% need to implement this 3044 } 3045 3046 void MacroAssembler::push_IU_state() { 3047 // %%%%% need to implement this 3048 } 3049 3050 3051 void MacroAssembler::pop_IU_state() { 3052 // %%%%% need to implement this 3053 } 3054 3055 3056 void MacroAssembler::push_FPU_state() { 3057 // %%%%% need to implement this 3058 } 3059 3060 3061 void MacroAssembler::pop_FPU_state() { 3062 // %%%%% need to implement this 3063 } 3064 3065 3066 void MacroAssembler::push_CPU_state() { 3067 // %%%%% need to implement this 3068 } 3069 3070 3071 void MacroAssembler::pop_CPU_state() { 3072 // %%%%% need to implement this 3073 } 3074 3075 3076 3077 void MacroAssembler::verify_tlab() { 3078 #ifdef ASSERT 3079 if (UseTLAB && VerifyOops) { 3080 Label next, next2, ok; 3081 Register t1 = L0; 3082 Register t2 = L1; 3083 Register t3 = L2; 3084 3085 save_frame(0); 3086 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); 3087 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2); 3088 or3(t1, t2, t3); 3089 cmp_and_br_short(t1, t2, Assembler::greaterEqual, Assembler::pn, next); 3090 STOP("assert(top >= start)"); 3091 should_not_reach_here(); 3092 3093 bind(next); 3094 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); 3095 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2); 3096 or3(t3, t2, t3); 3097 cmp_and_br_short(t1, t2, Assembler::lessEqual, Assembler::pn, next2); 3098 STOP("assert(top <= end)"); 3099 should_not_reach_here(); 3100 3101 bind(next2); 3102 and3(t3, MinObjAlignmentInBytesMask, t3); 3103 cmp_and_br_short(t3, 0, Assembler::lessEqual, Assembler::pn, ok); 3104 STOP("assert(aligned)"); 3105 should_not_reach_here(); 3106 3107 bind(ok); 3108 restore(); 3109 } 3110 #endif 3111 } 3112 3113 3114 void MacroAssembler::eden_allocate( 3115 Register obj, // result: pointer to object after successful allocation 3116 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 3117 int con_size_in_bytes, // object size in bytes if known at compile time 3118 Register t1, // temp register 3119 Register t2, // temp register 3120 Label& slow_case // continuation point if fast allocation fails 3121 ){ 3122 // make sure arguments make sense 3123 assert_different_registers(obj, var_size_in_bytes, t1, t2); 3124 assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size"); 3125 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); 3126 3127 if (!Universe::heap()->supports_inline_contig_alloc()) { 3128 // No allocation in the shared eden. 3129 ba(slow_case); 3130 delayed()->nop(); 3131 } else { 3132 // get eden boundaries 3133 // note: we need both top & top_addr! 3134 const Register top_addr = t1; 3135 const Register end = t2; 3136 3137 CollectedHeap* ch = Universe::heap(); 3138 set((intx)ch->top_addr(), top_addr); 3139 intx delta = (intx)ch->end_addr() - (intx)ch->top_addr(); 3140 ld_ptr(top_addr, delta, end); 3141 ld_ptr(top_addr, 0, obj); 3142 3143 // try to allocate 3144 Label retry; 3145 bind(retry); 3146 #ifdef ASSERT 3147 // make sure eden top is properly aligned 3148 { 3149 Label L; 3150 btst(MinObjAlignmentInBytesMask, obj); 3151 br(Assembler::zero, false, Assembler::pt, L); 3152 delayed()->nop(); 3153 STOP("eden top is not properly aligned"); 3154 bind(L); 3155 } 3156 #endif // ASSERT 3157 const Register free = end; 3158 sub(end, obj, free); // compute amount of free space 3159 if (var_size_in_bytes->is_valid()) { 3160 // size is unknown at compile time 3161 cmp(free, var_size_in_bytes); 3162 brx(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case 3163 delayed()->add(obj, var_size_in_bytes, end); 3164 } else { 3165 // size is known at compile time 3166 cmp(free, con_size_in_bytes); 3167 brx(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case 3168 delayed()->add(obj, con_size_in_bytes, end); 3169 } 3170 // Compare obj with the value at top_addr; if still equal, swap the value of 3171 // end with the value at top_addr. If not equal, read the value at top_addr 3172 // into end. 3173 cas_ptr(top_addr, obj, end); 3174 // if someone beat us on the allocation, try again, otherwise continue 3175 cmp(obj, end); 3176 brx(Assembler::notEqual, false, Assembler::pn, retry); 3177 delayed()->mov(end, obj); // nop if successfull since obj == end 3178 3179 #ifdef ASSERT 3180 // make sure eden top is properly aligned 3181 { 3182 Label L; 3183 const Register top_addr = t1; 3184 3185 set((intx)ch->top_addr(), top_addr); 3186 ld_ptr(top_addr, 0, top_addr); 3187 btst(MinObjAlignmentInBytesMask, top_addr); 3188 br(Assembler::zero, false, Assembler::pt, L); 3189 delayed()->nop(); 3190 STOP("eden top is not properly aligned"); 3191 bind(L); 3192 } 3193 #endif // ASSERT 3194 } 3195 } 3196 3197 3198 void MacroAssembler::tlab_allocate( 3199 Register obj, // result: pointer to object after successful allocation 3200 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 3201 int con_size_in_bytes, // object size in bytes if known at compile time 3202 Register t1, // temp register 3203 Label& slow_case // continuation point if fast allocation fails 3204 ){ 3205 // make sure arguments make sense 3206 assert_different_registers(obj, var_size_in_bytes, t1); 3207 assert(0 <= con_size_in_bytes && is_simm13(con_size_in_bytes), "illegal object size"); 3208 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); 3209 3210 const Register free = t1; 3211 3212 verify_tlab(); 3213 3214 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), obj); 3215 3216 // calculate amount of free space 3217 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), free); 3218 sub(free, obj, free); 3219 3220 Label done; 3221 if (var_size_in_bytes == noreg) { 3222 cmp(free, con_size_in_bytes); 3223 } else { 3224 cmp(free, var_size_in_bytes); 3225 } 3226 br(Assembler::less, false, Assembler::pn, slow_case); 3227 // calculate the new top pointer 3228 if (var_size_in_bytes == noreg) { 3229 delayed()->add(obj, con_size_in_bytes, free); 3230 } else { 3231 delayed()->add(obj, var_size_in_bytes, free); 3232 } 3233 3234 bind(done); 3235 3236 #ifdef ASSERT 3237 // make sure new free pointer is properly aligned 3238 { 3239 Label L; 3240 btst(MinObjAlignmentInBytesMask, free); 3241 br(Assembler::zero, false, Assembler::pt, L); 3242 delayed()->nop(); 3243 STOP("updated TLAB free is not properly aligned"); 3244 bind(L); 3245 } 3246 #endif // ASSERT 3247 3248 // update the tlab top pointer 3249 st_ptr(free, G2_thread, in_bytes(JavaThread::tlab_top_offset())); 3250 verify_tlab(); 3251 } 3252 3253 void MacroAssembler::zero_memory(Register base, Register index) { 3254 assert_different_registers(base, index); 3255 Label loop; 3256 bind(loop); 3257 subcc(index, HeapWordSize, index); 3258 brx(Assembler::greaterEqual, true, Assembler::pt, loop); 3259 delayed()->st_ptr(G0, base, index); 3260 } 3261 3262 void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes, 3263 Register t1, Register t2) { 3264 // Bump total bytes allocated by this thread 3265 assert(t1->is_global(), "must be global reg"); // so all 64 bits are saved on a context switch 3266 assert_different_registers(size_in_bytes.register_or_noreg(), t1, t2); 3267 // v8 support has gone the way of the dodo 3268 ldx(G2_thread, in_bytes(JavaThread::allocated_bytes_offset()), t1); 3269 add(t1, ensure_simm13_or_reg(size_in_bytes, t2), t1); 3270 stx(t1, G2_thread, in_bytes(JavaThread::allocated_bytes_offset())); 3271 } 3272 3273 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { 3274 switch (cond) { 3275 // Note some conditions are synonyms for others 3276 case Assembler::never: return Assembler::always; 3277 case Assembler::zero: return Assembler::notZero; 3278 case Assembler::lessEqual: return Assembler::greater; 3279 case Assembler::less: return Assembler::greaterEqual; 3280 case Assembler::lessEqualUnsigned: return Assembler::greaterUnsigned; 3281 case Assembler::lessUnsigned: return Assembler::greaterEqualUnsigned; 3282 case Assembler::negative: return Assembler::positive; 3283 case Assembler::overflowSet: return Assembler::overflowClear; 3284 case Assembler::always: return Assembler::never; 3285 case Assembler::notZero: return Assembler::zero; 3286 case Assembler::greater: return Assembler::lessEqual; 3287 case Assembler::greaterEqual: return Assembler::less; 3288 case Assembler::greaterUnsigned: return Assembler::lessEqualUnsigned; 3289 case Assembler::greaterEqualUnsigned: return Assembler::lessUnsigned; 3290 case Assembler::positive: return Assembler::negative; 3291 case Assembler::overflowClear: return Assembler::overflowSet; 3292 } 3293 3294 ShouldNotReachHere(); return Assembler::overflowClear; 3295 } 3296 3297 void MacroAssembler::cond_inc(Assembler::Condition cond, address counter_ptr, 3298 Register Rtmp1, Register Rtmp2 /*, Register Rtmp3, Register Rtmp4 */) { 3299 Condition negated_cond = negate_condition(cond); 3300 Label L; 3301 brx(negated_cond, false, Assembler::pt, L); 3302 delayed()->nop(); 3303 inc_counter(counter_ptr, Rtmp1, Rtmp2); 3304 bind(L); 3305 } 3306 3307 void MacroAssembler::inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2) { 3308 AddressLiteral addrlit(counter_addr); 3309 sethi(addrlit, Rtmp1); // Move hi22 bits into temporary register. 3310 Address addr(Rtmp1, addrlit.low10()); // Build an address with low10 bits. 3311 ld(addr, Rtmp2); 3312 inc(Rtmp2); 3313 st(Rtmp2, addr); 3314 } 3315 3316 void MacroAssembler::inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2) { 3317 inc_counter((address) counter_addr, Rtmp1, Rtmp2); 3318 } 3319 3320 SkipIfEqual::SkipIfEqual( 3321 MacroAssembler* masm, Register temp, const bool* flag_addr, 3322 Assembler::Condition condition) { 3323 _masm = masm; 3324 AddressLiteral flag(flag_addr); 3325 _masm->sethi(flag, temp); 3326 _masm->ldub(temp, flag.low10(), temp); 3327 _masm->tst(temp); 3328 _masm->br(condition, false, Assembler::pt, _label); 3329 _masm->delayed()->nop(); 3330 } 3331 3332 SkipIfEqual::~SkipIfEqual() { 3333 _masm->bind(_label); 3334 } 3335 3336 3337 // Writes to stack successive pages until offset reached to check for 3338 // stack overflow + shadow pages. This clobbers tsp and scratch. 3339 void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp, 3340 Register Rscratch) { 3341 // Use stack pointer in temp stack pointer 3342 mov(SP, Rtsp); 3343 3344 // Bang stack for total size given plus stack shadow page size. 3345 // Bang one page at a time because a large size can overflow yellow and 3346 // red zones (the bang will fail but stack overflow handling can't tell that 3347 // it was a stack overflow bang vs a regular segv). 3348 int offset = os::vm_page_size(); 3349 Register Roffset = Rscratch; 3350 3351 Label loop; 3352 bind(loop); 3353 set((-offset)+STACK_BIAS, Rscratch); 3354 st(G0, Rtsp, Rscratch); 3355 set(offset, Roffset); 3356 sub(Rsize, Roffset, Rsize); 3357 cmp(Rsize, G0); 3358 br(Assembler::greater, false, Assembler::pn, loop); 3359 delayed()->sub(Rtsp, Roffset, Rtsp); 3360 3361 // Bang down shadow pages too. 3362 // At this point, (tmp-0) is the last address touched, so don't 3363 // touch it again. (It was touched as (tmp-pagesize) but then tmp 3364 // was post-decremented.) Skip this address by starting at i=1, and 3365 // touch a few more pages below. N.B. It is important to touch all 3366 // the way down to and including i=StackShadowPages. 3367 for (int i = 1; i < JavaThread::stack_shadow_zone_size() / os::vm_page_size(); i++) { 3368 set((-i*offset)+STACK_BIAS, Rscratch); 3369 st(G0, Rtsp, Rscratch); 3370 } 3371 } 3372 3373 void MacroAssembler::reserved_stack_check() { 3374 // testing if reserved zone needs to be enabled 3375 Label no_reserved_zone_enabling; 3376 3377 ld_ptr(G2_thread, JavaThread::reserved_stack_activation_offset(), G4_scratch); 3378 cmp_and_brx_short(SP, G4_scratch, Assembler::lessUnsigned, Assembler::pt, no_reserved_zone_enabling); 3379 3380 call_VM_leaf(L0, CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), G2_thread); 3381 3382 AddressLiteral stub(StubRoutines::throw_delayed_StackOverflowError_entry()); 3383 jump_to(stub, G4_scratch); 3384 delayed()->restore(); 3385 3386 should_not_reach_here(); 3387 3388 bind(no_reserved_zone_enabling); 3389 } 3390 3391 /////////////////////////////////////////////////////////////////////////////////// 3392 #if INCLUDE_ALL_GCS 3393 3394 static address satb_log_enqueue_with_frame = NULL; 3395 static u_char* satb_log_enqueue_with_frame_end = NULL; 3396 3397 static address satb_log_enqueue_frameless = NULL; 3398 static u_char* satb_log_enqueue_frameless_end = NULL; 3399 3400 static int EnqueueCodeSize = 128 DEBUG_ONLY( + 256); // Instructions? 3401 3402 static void generate_satb_log_enqueue(bool with_frame) { 3403 BufferBlob* bb = BufferBlob::create("enqueue_with_frame", EnqueueCodeSize); 3404 CodeBuffer buf(bb); 3405 MacroAssembler masm(&buf); 3406 3407 #define __ masm. 3408 3409 address start = __ pc(); 3410 Register pre_val; 3411 3412 Label refill, restart; 3413 if (with_frame) { 3414 __ save_frame(0); 3415 pre_val = I0; // Was O0 before the save. 3416 } else { 3417 pre_val = O0; 3418 } 3419 3420 int satb_q_index_byte_offset = 3421 in_bytes(JavaThread::satb_mark_queue_offset() + 3422 SATBMarkQueue::byte_offset_of_index()); 3423 3424 int satb_q_buf_byte_offset = 3425 in_bytes(JavaThread::satb_mark_queue_offset() + 3426 SATBMarkQueue::byte_offset_of_buf()); 3427 3428 assert(in_bytes(SATBMarkQueue::byte_width_of_index()) == sizeof(intptr_t) && 3429 in_bytes(SATBMarkQueue::byte_width_of_buf()) == sizeof(intptr_t), 3430 "check sizes in assembly below"); 3431 3432 __ bind(restart); 3433 3434 // Load the index into the SATB buffer. SATBMarkQueue::_index is a size_t 3435 // so ld_ptr is appropriate. 3436 __ ld_ptr(G2_thread, satb_q_index_byte_offset, L0); 3437 3438 // index == 0? 3439 __ cmp_and_brx_short(L0, G0, Assembler::equal, Assembler::pn, refill); 3440 3441 __ ld_ptr(G2_thread, satb_q_buf_byte_offset, L1); 3442 __ sub(L0, oopSize, L0); 3443 3444 __ st_ptr(pre_val, L1, L0); // [_buf + index] := I0 3445 if (!with_frame) { 3446 // Use return-from-leaf 3447 __ retl(); 3448 __ delayed()->st_ptr(L0, G2_thread, satb_q_index_byte_offset); 3449 } else { 3450 // Not delayed. 3451 __ st_ptr(L0, G2_thread, satb_q_index_byte_offset); 3452 } 3453 if (with_frame) { 3454 __ ret(); 3455 __ delayed()->restore(); 3456 } 3457 __ bind(refill); 3458 3459 address handle_zero = 3460 CAST_FROM_FN_PTR(address, 3461 &SATBMarkQueueSet::handle_zero_index_for_thread); 3462 // This should be rare enough that we can afford to save all the 3463 // scratch registers that the calling context might be using. 3464 __ mov(G1_scratch, L0); 3465 __ mov(G3_scratch, L1); 3466 __ mov(G4, L2); 3467 // We need the value of O0 above (for the write into the buffer), so we 3468 // save and restore it. 3469 __ mov(O0, L3); 3470 // Since the call will overwrite O7, we save and restore that, as well. 3471 __ mov(O7, L4); 3472 __ call_VM_leaf(L5, handle_zero, G2_thread); 3473 __ mov(L0, G1_scratch); 3474 __ mov(L1, G3_scratch); 3475 __ mov(L2, G4); 3476 __ mov(L3, O0); 3477 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); 3478 __ delayed()->mov(L4, O7); 3479 3480 if (with_frame) { 3481 satb_log_enqueue_with_frame = start; 3482 satb_log_enqueue_with_frame_end = __ pc(); 3483 } else { 3484 satb_log_enqueue_frameless = start; 3485 satb_log_enqueue_frameless_end = __ pc(); 3486 } 3487 3488 #undef __ 3489 } 3490 3491 void MacroAssembler::g1_write_barrier_pre(Register obj, 3492 Register index, 3493 int offset, 3494 Register pre_val, 3495 Register tmp, 3496 bool preserve_o_regs) { 3497 Label filtered; 3498 3499 if (obj == noreg) { 3500 // We are not loading the previous value so make 3501 // sure that we don't trash the value in pre_val 3502 // with the code below. 3503 assert_different_registers(pre_val, tmp); 3504 } else { 3505 // We will be loading the previous value 3506 // in this code so... 3507 assert(offset == 0 || index == noreg, "choose one"); 3508 assert(pre_val == noreg, "check this code"); 3509 } 3510 3511 // Is marking active? 3512 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 3513 ld(G2, 3514 in_bytes(JavaThread::satb_mark_queue_offset() + 3515 SATBMarkQueue::byte_offset_of_active()), 3516 tmp); 3517 } else { 3518 guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, 3519 "Assumption"); 3520 ldsb(G2, 3521 in_bytes(JavaThread::satb_mark_queue_offset() + 3522 SATBMarkQueue::byte_offset_of_active()), 3523 tmp); 3524 } 3525 3526 // Is marking active? 3527 cmp_and_br_short(tmp, G0, Assembler::equal, Assembler::pt, filtered); 3528 3529 // Do we need to load the previous value? 3530 if (obj != noreg) { 3531 // Load the previous value... 3532 if (index == noreg) { 3533 if (Assembler::is_simm13(offset)) { 3534 load_heap_oop(obj, offset, tmp); 3535 } else { 3536 set(offset, tmp); 3537 load_heap_oop(obj, tmp, tmp); 3538 } 3539 } else { 3540 load_heap_oop(obj, index, tmp); 3541 } 3542 // Previous value has been loaded into tmp 3543 pre_val = tmp; 3544 } 3545 3546 assert(pre_val != noreg, "must have a real register"); 3547 3548 // Is the previous value null? 3549 cmp_and_brx_short(pre_val, G0, Assembler::equal, Assembler::pt, filtered); 3550 3551 // OK, it's not filtered, so we'll need to call enqueue. In the normal 3552 // case, pre_val will be a scratch G-reg, but there are some cases in 3553 // which it's an O-reg. In the first case, do a normal call. In the 3554 // latter, do a save here and call the frameless version. 3555 3556 guarantee(pre_val->is_global() || pre_val->is_out(), 3557 "Or we need to think harder."); 3558 3559 if (pre_val->is_global() && !preserve_o_regs) { 3560 call(satb_log_enqueue_with_frame); 3561 delayed()->mov(pre_val, O0); 3562 } else { 3563 save_frame(0); 3564 call(satb_log_enqueue_frameless); 3565 delayed()->mov(pre_val->after_save(), O0); 3566 restore(); 3567 } 3568 3569 bind(filtered); 3570 } 3571 3572 static address dirty_card_log_enqueue = 0; 3573 static u_char* dirty_card_log_enqueue_end = 0; 3574 3575 // This gets to assume that o0 contains the object address. 3576 static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) { 3577 BufferBlob* bb = BufferBlob::create("dirty_card_enqueue", EnqueueCodeSize*2); 3578 CodeBuffer buf(bb); 3579 MacroAssembler masm(&buf); 3580 #define __ masm. 3581 address start = __ pc(); 3582 3583 Label not_already_dirty, restart, refill, young_card; 3584 3585 __ srlx(O0, CardTable::card_shift, O0); 3586 AddressLiteral addrlit(byte_map_base); 3587 __ set(addrlit, O1); // O1 := <card table base> 3588 __ ldub(O0, O1, O2); // O2 := [O0 + O1] 3589 3590 __ cmp_and_br_short(O2, G1CardTable::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card); 3591 3592 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 3593 __ ldub(O0, O1, O2); // O2 := [O0 + O1] 3594 3595 assert(CardTable::dirty_card_val() == 0, "otherwise check this code"); 3596 __ cmp_and_br_short(O2, G0, Assembler::notEqual, Assembler::pt, not_already_dirty); 3597 3598 __ bind(young_card); 3599 // We didn't take the branch, so we're already dirty: return. 3600 // Use return-from-leaf 3601 __ retl(); 3602 __ delayed()->nop(); 3603 3604 // Not dirty. 3605 __ bind(not_already_dirty); 3606 3607 // Get O0 + O1 into a reg by itself 3608 __ add(O0, O1, O3); 3609 3610 // First, dirty it. 3611 __ stb(G0, O3, G0); // [cardPtr] := 0 (i.e., dirty). 3612 3613 int dirty_card_q_index_byte_offset = 3614 in_bytes(JavaThread::dirty_card_queue_offset() + 3615 DirtyCardQueue::byte_offset_of_index()); 3616 int dirty_card_q_buf_byte_offset = 3617 in_bytes(JavaThread::dirty_card_queue_offset() + 3618 DirtyCardQueue::byte_offset_of_buf()); 3619 __ bind(restart); 3620 3621 // Load the index into the update buffer. DirtyCardQueue::_index is 3622 // a size_t so ld_ptr is appropriate here. 3623 __ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, L0); 3624 3625 // index == 0? 3626 __ cmp_and_brx_short(L0, G0, Assembler::equal, Assembler::pn, refill); 3627 3628 __ ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, L1); 3629 __ sub(L0, oopSize, L0); 3630 3631 __ st_ptr(O3, L1, L0); // [_buf + index] := I0 3632 // Use return-from-leaf 3633 __ retl(); 3634 __ delayed()->st_ptr(L0, G2_thread, dirty_card_q_index_byte_offset); 3635 3636 __ bind(refill); 3637 address handle_zero = 3638 CAST_FROM_FN_PTR(address, 3639 &DirtyCardQueueSet::handle_zero_index_for_thread); 3640 // This should be rare enough that we can afford to save all the 3641 // scratch registers that the calling context might be using. 3642 __ mov(G1_scratch, L3); 3643 __ mov(G3_scratch, L5); 3644 // We need the value of O3 above (for the write into the buffer), so we 3645 // save and restore it. 3646 __ mov(O3, L6); 3647 // Since the call will overwrite O7, we save and restore that, as well. 3648 __ mov(O7, L4); 3649 3650 __ call_VM_leaf(L7_thread_cache, handle_zero, G2_thread); 3651 __ mov(L3, G1_scratch); 3652 __ mov(L5, G3_scratch); 3653 __ mov(L6, O3); 3654 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); 3655 __ delayed()->mov(L4, O7); 3656 3657 dirty_card_log_enqueue = start; 3658 dirty_card_log_enqueue_end = __ pc(); 3659 // XXX Should have a guarantee here about not going off the end! 3660 // Does it already do so? Do an experiment... 3661 3662 #undef __ 3663 3664 } 3665 3666 void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val, Register tmp) { 3667 3668 Label filtered; 3669 MacroAssembler* post_filter_masm = this; 3670 3671 if (new_val == G0) return; 3672 3673 G1BarrierSet* bs = 3674 barrier_set_cast<G1BarrierSet>(Universe::heap()->barrier_set()); 3675 CardTable* ct = bs->card_table(); 3676 3677 if (G1RSBarrierRegionFilter) { 3678 xor3(store_addr, new_val, tmp); 3679 srlx(tmp, HeapRegion::LogOfHRGrainBytes, tmp); 3680 3681 // XXX Should I predict this taken or not? Does it matter? 3682 cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pt, filtered); 3683 } 3684 3685 // If the "store_addr" register is an "in" or "local" register, move it to 3686 // a scratch reg so we can pass it as an argument. 3687 bool use_scr = !(store_addr->is_global() || store_addr->is_out()); 3688 // Pick a scratch register different from "tmp". 3689 Register scr = (tmp == G1_scratch ? G3_scratch : G1_scratch); 3690 // Make sure we use up the delay slot! 3691 if (use_scr) { 3692 post_filter_masm->mov(store_addr, scr); 3693 } else { 3694 post_filter_masm->nop(); 3695 } 3696 save_frame(0); 3697 call(dirty_card_log_enqueue); 3698 if (use_scr) { 3699 delayed()->mov(scr, O0); 3700 } else { 3701 delayed()->mov(store_addr->after_save(), O0); 3702 } 3703 restore(); 3704 3705 bind(filtered); 3706 } 3707 3708 // Called from init_globals() after universe_init() and before interpreter_init() 3709 void g1_barrier_stubs_init() { 3710 CollectedHeap* heap = Universe::heap(); 3711 if (heap->kind() == CollectedHeap::G1) { 3712 // Only needed for G1 3713 if (dirty_card_log_enqueue == 0) { 3714 G1BarrierSet* bs = 3715 barrier_set_cast<G1BarrierSet>(heap->barrier_set()); 3716 CardTable *ct = bs->card_table(); 3717 generate_dirty_card_log_enqueue(ct->byte_map_base()); 3718 assert(dirty_card_log_enqueue != 0, "postcondition."); 3719 } 3720 if (satb_log_enqueue_with_frame == 0) { 3721 generate_satb_log_enqueue(true); 3722 assert(satb_log_enqueue_with_frame != 0, "postcondition."); 3723 } 3724 if (satb_log_enqueue_frameless == 0) { 3725 generate_satb_log_enqueue(false); 3726 assert(satb_log_enqueue_frameless != 0, "postcondition."); 3727 } 3728 } 3729 } 3730 3731 #endif // INCLUDE_ALL_GCS 3732 /////////////////////////////////////////////////////////////////////////////////// 3733 3734 void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_val, Register tmp) { 3735 // If we're writing constant NULL, we can skip the write barrier. 3736 if (new_val == G0) return; 3737 CardTableBarrierSet* bs = 3738 barrier_set_cast<CardTableBarrierSet>(Universe::heap()->barrier_set()); 3739 CardTable* ct = bs->card_table(); 3740 3741 assert(bs->kind() == BarrierSet::CardTableBarrierSet, "wrong barrier"); 3742 card_table_write(ct->byte_map_base(), tmp, store_addr); 3743 } 3744 3745 // ((OopHandle)result).resolve(); 3746 void MacroAssembler::resolve_oop_handle(Register result) { 3747 // OopHandle::resolve is an indirection. 3748 ld_ptr(result, 0, result); 3749 } 3750 3751 void MacroAssembler::load_mirror(Register mirror, Register method) { 3752 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 3753 ld_ptr(method, in_bytes(Method::const_offset()), mirror); 3754 ld_ptr(mirror, in_bytes(ConstMethod::constants_offset()), mirror); 3755 ld_ptr(mirror, ConstantPool::pool_holder_offset_in_bytes(), mirror); 3756 ld_ptr(mirror, mirror_offset, mirror); 3757 resolve_oop_handle(mirror); 3758 } 3759 3760 void MacroAssembler::load_klass(Register src_oop, Register klass) { 3761 // The number of bytes in this code is used by 3762 // MachCallDynamicJavaNode::ret_addr_offset() 3763 // if this changes, change that. 3764 if (UseCompressedClassPointers) { 3765 lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass); 3766 decode_klass_not_null(klass); 3767 } else { 3768 ld_ptr(src_oop, oopDesc::klass_offset_in_bytes(), klass); 3769 } 3770 } 3771 3772 void MacroAssembler::store_klass(Register klass, Register dst_oop) { 3773 if (UseCompressedClassPointers) { 3774 assert(dst_oop != klass, "not enough registers"); 3775 encode_klass_not_null(klass); 3776 st(klass, dst_oop, oopDesc::klass_offset_in_bytes()); 3777 } else { 3778 st_ptr(klass, dst_oop, oopDesc::klass_offset_in_bytes()); 3779 } 3780 } 3781 3782 void MacroAssembler::store_klass_gap(Register s, Register d) { 3783 if (UseCompressedClassPointers) { 3784 assert(s != d, "not enough registers"); 3785 st(s, d, oopDesc::klass_gap_offset_in_bytes()); 3786 } 3787 } 3788 3789 void MacroAssembler::load_heap_oop(const Address& s, Register d) { 3790 if (UseCompressedOops) { 3791 lduw(s, d); 3792 decode_heap_oop(d); 3793 } else { 3794 ld_ptr(s, d); 3795 } 3796 } 3797 3798 void MacroAssembler::load_heap_oop(Register s1, Register s2, Register d) { 3799 if (UseCompressedOops) { 3800 lduw(s1, s2, d); 3801 decode_heap_oop(d, d); 3802 } else { 3803 ld_ptr(s1, s2, d); 3804 } 3805 } 3806 3807 void MacroAssembler::load_heap_oop(Register s1, int simm13a, Register d) { 3808 if (UseCompressedOops) { 3809 lduw(s1, simm13a, d); 3810 decode_heap_oop(d, d); 3811 } else { 3812 ld_ptr(s1, simm13a, d); 3813 } 3814 } 3815 3816 void MacroAssembler::load_heap_oop(Register s1, RegisterOrConstant s2, Register d) { 3817 if (s2.is_constant()) load_heap_oop(s1, s2.as_constant(), d); 3818 else load_heap_oop(s1, s2.as_register(), d); 3819 } 3820 3821 void MacroAssembler::store_heap_oop(Register d, Register s1, Register s2) { 3822 if (UseCompressedOops) { 3823 assert(s1 != d && s2 != d, "not enough registers"); 3824 encode_heap_oop(d); 3825 st(d, s1, s2); 3826 } else { 3827 st_ptr(d, s1, s2); 3828 } 3829 } 3830 3831 void MacroAssembler::store_heap_oop(Register d, Register s1, int simm13a) { 3832 if (UseCompressedOops) { 3833 assert(s1 != d, "not enough registers"); 3834 encode_heap_oop(d); 3835 st(d, s1, simm13a); 3836 } else { 3837 st_ptr(d, s1, simm13a); 3838 } 3839 } 3840 3841 void MacroAssembler::store_heap_oop(Register d, const Address& a, int offset) { 3842 if (UseCompressedOops) { 3843 assert(a.base() != d, "not enough registers"); 3844 encode_heap_oop(d); 3845 st(d, a, offset); 3846 } else { 3847 st_ptr(d, a, offset); 3848 } 3849 } 3850 3851 3852 void MacroAssembler::encode_heap_oop(Register src, Register dst) { 3853 assert (UseCompressedOops, "must be compressed"); 3854 assert (Universe::heap() != NULL, "java heap should be initialized"); 3855 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3856 verify_oop(src); 3857 if (Universe::narrow_oop_base() == NULL) { 3858 srlx(src, LogMinObjAlignmentInBytes, dst); 3859 return; 3860 } 3861 Label done; 3862 if (src == dst) { 3863 // optimize for frequent case src == dst 3864 bpr(rc_nz, true, Assembler::pt, src, done); 3865 delayed() -> sub(src, G6_heapbase, dst); // annuled if not taken 3866 bind(done); 3867 srlx(src, LogMinObjAlignmentInBytes, dst); 3868 } else { 3869 bpr(rc_z, false, Assembler::pn, src, done); 3870 delayed() -> mov(G0, dst); 3871 // could be moved before branch, and annulate delay, 3872 // but may add some unneeded work decoding null 3873 sub(src, G6_heapbase, dst); 3874 srlx(dst, LogMinObjAlignmentInBytes, dst); 3875 bind(done); 3876 } 3877 } 3878 3879 3880 void MacroAssembler::encode_heap_oop_not_null(Register r) { 3881 assert (UseCompressedOops, "must be compressed"); 3882 assert (Universe::heap() != NULL, "java heap should be initialized"); 3883 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3884 verify_oop(r); 3885 if (Universe::narrow_oop_base() != NULL) 3886 sub(r, G6_heapbase, r); 3887 srlx(r, LogMinObjAlignmentInBytes, r); 3888 } 3889 3890 void MacroAssembler::encode_heap_oop_not_null(Register src, Register dst) { 3891 assert (UseCompressedOops, "must be compressed"); 3892 assert (Universe::heap() != NULL, "java heap should be initialized"); 3893 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3894 verify_oop(src); 3895 if (Universe::narrow_oop_base() == NULL) { 3896 srlx(src, LogMinObjAlignmentInBytes, dst); 3897 } else { 3898 sub(src, G6_heapbase, dst); 3899 srlx(dst, LogMinObjAlignmentInBytes, dst); 3900 } 3901 } 3902 3903 // Same algorithm as oops.inline.hpp decode_heap_oop. 3904 void MacroAssembler::decode_heap_oop(Register src, Register dst) { 3905 assert (UseCompressedOops, "must be compressed"); 3906 assert (Universe::heap() != NULL, "java heap should be initialized"); 3907 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3908 sllx(src, LogMinObjAlignmentInBytes, dst); 3909 if (Universe::narrow_oop_base() != NULL) { 3910 Label done; 3911 bpr(rc_nz, true, Assembler::pt, dst, done); 3912 delayed() -> add(dst, G6_heapbase, dst); // annuled if not taken 3913 bind(done); 3914 } 3915 verify_oop(dst); 3916 } 3917 3918 void MacroAssembler::decode_heap_oop_not_null(Register r) { 3919 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 3920 // pd_code_size_limit. 3921 // Also do not verify_oop as this is called by verify_oop. 3922 assert (UseCompressedOops, "must be compressed"); 3923 assert (Universe::heap() != NULL, "java heap should be initialized"); 3924 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3925 sllx(r, LogMinObjAlignmentInBytes, r); 3926 if (Universe::narrow_oop_base() != NULL) 3927 add(r, G6_heapbase, r); 3928 } 3929 3930 void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) { 3931 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 3932 // pd_code_size_limit. 3933 // Also do not verify_oop as this is called by verify_oop. 3934 assert (UseCompressedOops, "must be compressed"); 3935 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3936 sllx(src, LogMinObjAlignmentInBytes, dst); 3937 if (Universe::narrow_oop_base() != NULL) 3938 add(dst, G6_heapbase, dst); 3939 } 3940 3941 void MacroAssembler::encode_klass_not_null(Register r) { 3942 assert (UseCompressedClassPointers, "must be compressed"); 3943 if (Universe::narrow_klass_base() != NULL) { 3944 assert(r != G6_heapbase, "bad register choice"); 3945 set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); 3946 sub(r, G6_heapbase, r); 3947 if (Universe::narrow_klass_shift() != 0) { 3948 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 3949 srlx(r, LogKlassAlignmentInBytes, r); 3950 } 3951 reinit_heapbase(); 3952 } else { 3953 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 3954 srlx(r, Universe::narrow_klass_shift(), r); 3955 } 3956 } 3957 3958 void MacroAssembler::encode_klass_not_null(Register src, Register dst) { 3959 if (src == dst) { 3960 encode_klass_not_null(src); 3961 } else { 3962 assert (UseCompressedClassPointers, "must be compressed"); 3963 if (Universe::narrow_klass_base() != NULL) { 3964 set((intptr_t)Universe::narrow_klass_base(), dst); 3965 sub(src, dst, dst); 3966 if (Universe::narrow_klass_shift() != 0) { 3967 srlx(dst, LogKlassAlignmentInBytes, dst); 3968 } 3969 } else { 3970 // shift src into dst 3971 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 3972 srlx(src, Universe::narrow_klass_shift(), dst); 3973 } 3974 } 3975 } 3976 3977 // Function instr_size_for_decode_klass_not_null() counts the instructions 3978 // generated by decode_klass_not_null() and reinit_heapbase(). Hence, if 3979 // the instructions they generate change, then this method needs to be updated. 3980 int MacroAssembler::instr_size_for_decode_klass_not_null() { 3981 assert (UseCompressedClassPointers, "only for compressed klass ptrs"); 3982 int num_instrs = 1; // shift src,dst or add 3983 if (Universe::narrow_klass_base() != NULL) { 3984 // set + add + set 3985 num_instrs += insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) + 3986 insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base()); 3987 if (Universe::narrow_klass_shift() != 0) { 3988 num_instrs += 1; // sllx 3989 } 3990 } 3991 return num_instrs * BytesPerInstWord; 3992 } 3993 3994 // !!! If the instructions that get generated here change then function 3995 // instr_size_for_decode_klass_not_null() needs to get updated. 3996 void MacroAssembler::decode_klass_not_null(Register r) { 3997 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 3998 // pd_code_size_limit. 3999 assert (UseCompressedClassPointers, "must be compressed"); 4000 if (Universe::narrow_klass_base() != NULL) { 4001 assert(r != G6_heapbase, "bad register choice"); 4002 set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); 4003 if (Universe::narrow_klass_shift() != 0) 4004 sllx(r, LogKlassAlignmentInBytes, r); 4005 add(r, G6_heapbase, r); 4006 reinit_heapbase(); 4007 } else { 4008 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 4009 sllx(r, Universe::narrow_klass_shift(), r); 4010 } 4011 } 4012 4013 void MacroAssembler::decode_klass_not_null(Register src, Register dst) { 4014 if (src == dst) { 4015 decode_klass_not_null(src); 4016 } else { 4017 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 4018 // pd_code_size_limit. 4019 assert (UseCompressedClassPointers, "must be compressed"); 4020 if (Universe::narrow_klass_base() != NULL) { 4021 if (Universe::narrow_klass_shift() != 0) { 4022 assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice"); 4023 set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); 4024 sllx(src, LogKlassAlignmentInBytes, dst); 4025 add(dst, G6_heapbase, dst); 4026 reinit_heapbase(); 4027 } else { 4028 set((intptr_t)Universe::narrow_klass_base(), dst); 4029 add(src, dst, dst); 4030 } 4031 } else { 4032 // shift/mov src into dst. 4033 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 4034 sllx(src, Universe::narrow_klass_shift(), dst); 4035 } 4036 } 4037 } 4038 4039 void MacroAssembler::reinit_heapbase() { 4040 if (UseCompressedOops || UseCompressedClassPointers) { 4041 if (Universe::heap() != NULL) { 4042 set((intptr_t)Universe::narrow_ptrs_base(), G6_heapbase); 4043 } else { 4044 AddressLiteral base(Universe::narrow_ptrs_base_addr()); 4045 load_ptr_contents(base, G6_heapbase); 4046 } 4047 } 4048 } 4049 4050 #ifdef COMPILER2 4051 4052 // Compress char[] to byte[] by compressing 16 bytes at once. Return 0 on failure. 4053 void MacroAssembler::string_compress_16(Register src, Register dst, Register cnt, Register result, 4054 Register tmp1, Register tmp2, Register tmp3, Register tmp4, 4055 FloatRegister ftmp1, FloatRegister ftmp2, FloatRegister ftmp3, Label& Ldone) { 4056 Label Lloop, Lslow; 4057 assert(UseVIS >= 3, "VIS3 is required"); 4058 assert_different_registers(src, dst, cnt, tmp1, tmp2, tmp3, tmp4, result); 4059 assert_different_registers(ftmp1, ftmp2, ftmp3); 4060 4061 // Check if cnt >= 8 (= 16 bytes) 4062 cmp(cnt, 8); 4063 br(Assembler::less, false, Assembler::pn, Lslow); 4064 delayed()->mov(cnt, result); // copy count 4065 4066 // Check for 8-byte alignment of src and dst 4067 or3(src, dst, tmp1); 4068 andcc(tmp1, 7, G0); 4069 br(Assembler::notZero, false, Assembler::pn, Lslow); 4070 delayed()->nop(); 4071 4072 // Set mask for bshuffle instruction 4073 Register mask = tmp4; 4074 set(0x13579bdf, mask); 4075 bmask(mask, G0, G0); 4076 4077 // Set mask to 0xff00 ff00 ff00 ff00 to check for non-latin1 characters 4078 Assembler::sethi(0xff00fc00, mask); // mask = 0x0000 0000 ff00 fc00 4079 add(mask, 0x300, mask); // mask = 0x0000 0000 ff00 ff00 4080 sllx(mask, 32, tmp1); // tmp1 = 0xff00 ff00 0000 0000 4081 or3(mask, tmp1, mask); // mask = 0xff00 ff00 ff00 ff00 4082 4083 // Load first 8 bytes 4084 ldx(src, 0, tmp1); 4085 4086 bind(Lloop); 4087 // Load next 8 bytes 4088 ldx(src, 8, tmp2); 4089 4090 // Check for non-latin1 character by testing if the most significant byte of a char is set. 4091 // Although we have to move the data between integer and floating point registers, this is 4092 // still faster than the corresponding VIS instructions (ford/fand/fcmpd). 4093 or3(tmp1, tmp2, tmp3); 4094 btst(tmp3, mask); 4095 // annul zeroing if branch is not taken to preserve original count 4096 brx(Assembler::notZero, true, Assembler::pn, Ldone); 4097 delayed()->mov(G0, result); // 0 - failed 4098 4099 // Move bytes into float register 4100 movxtod(tmp1, ftmp1); 4101 movxtod(tmp2, ftmp2); 4102 4103 // Compress by copying one byte per char from ftmp1 and ftmp2 to ftmp3 4104 bshuffle(ftmp1, ftmp2, ftmp3); 4105 stf(FloatRegisterImpl::D, ftmp3, dst, 0); 4106 4107 // Increment addresses and decrement count 4108 inc(src, 16); 4109 inc(dst, 8); 4110 dec(cnt, 8); 4111 4112 cmp(cnt, 8); 4113 // annul LDX if branch is not taken to prevent access past end of string 4114 br(Assembler::greaterEqual, true, Assembler::pt, Lloop); 4115 delayed()->ldx(src, 0, tmp1); 4116 4117 // Fallback to slow version 4118 bind(Lslow); 4119 } 4120 4121 // Compress char[] to byte[]. Return 0 on failure. 4122 void MacroAssembler::string_compress(Register src, Register dst, Register cnt, Register result, Register tmp, Label& Ldone) { 4123 Label Lloop; 4124 assert_different_registers(src, dst, cnt, tmp, result); 4125 4126 lduh(src, 0, tmp); 4127 4128 bind(Lloop); 4129 inc(src, sizeof(jchar)); 4130 cmp(tmp, 0xff); 4131 // annul zeroing if branch is not taken to preserve original count 4132 br(Assembler::greater, true, Assembler::pn, Ldone); // don't check xcc 4133 delayed()->mov(G0, result); // 0 - failed 4134 deccc(cnt); 4135 stb(tmp, dst, 0); 4136 inc(dst); 4137 // annul LDUH if branch is not taken to prevent access past end of string 4138 br(Assembler::notZero, true, Assembler::pt, Lloop); 4139 delayed()->lduh(src, 0, tmp); // hoisted 4140 } 4141 4142 // Inflate byte[] to char[] by inflating 16 bytes at once. 4143 void MacroAssembler::string_inflate_16(Register src, Register dst, Register cnt, Register tmp, 4144 FloatRegister ftmp1, FloatRegister ftmp2, FloatRegister ftmp3, FloatRegister ftmp4, Label& Ldone) { 4145 Label Lloop, Lslow; 4146 assert(UseVIS >= 3, "VIS3 is required"); 4147 assert_different_registers(src, dst, cnt, tmp); 4148 assert_different_registers(ftmp1, ftmp2, ftmp3, ftmp4); 4149 4150 // Check if cnt >= 8 (= 16 bytes) 4151 cmp(cnt, 8); 4152 br(Assembler::less, false, Assembler::pn, Lslow); 4153 delayed()->nop(); 4154 4155 // Check for 8-byte alignment of src and dst 4156 or3(src, dst, tmp); 4157 andcc(tmp, 7, G0); 4158 br(Assembler::notZero, false, Assembler::pn, Lslow); 4159 // Initialize float register to zero 4160 FloatRegister zerof = ftmp4; 4161 delayed()->fzero(FloatRegisterImpl::D, zerof); 4162 4163 // Load first 8 bytes 4164 ldf(FloatRegisterImpl::D, src, 0, ftmp1); 4165 4166 bind(Lloop); 4167 inc(src, 8); 4168 dec(cnt, 8); 4169 4170 // Inflate the string by interleaving each byte from the source array 4171 // with a zero byte and storing the result in the destination array. 4172 fpmerge(zerof, ftmp1->successor(), ftmp2); 4173 stf(FloatRegisterImpl::D, ftmp2, dst, 8); 4174 fpmerge(zerof, ftmp1, ftmp3); 4175 stf(FloatRegisterImpl::D, ftmp3, dst, 0); 4176 4177 inc(dst, 16); 4178 4179 cmp(cnt, 8); 4180 // annul LDX if branch is not taken to prevent access past end of string 4181 br(Assembler::greaterEqual, true, Assembler::pt, Lloop); 4182 delayed()->ldf(FloatRegisterImpl::D, src, 0, ftmp1); 4183 4184 // Fallback to slow version 4185 bind(Lslow); 4186 } 4187 4188 // Inflate byte[] to char[]. 4189 void MacroAssembler::string_inflate(Register src, Register dst, Register cnt, Register tmp, Label& Ldone) { 4190 Label Loop; 4191 assert_different_registers(src, dst, cnt, tmp); 4192 4193 ldub(src, 0, tmp); 4194 bind(Loop); 4195 inc(src); 4196 deccc(cnt); 4197 sth(tmp, dst, 0); 4198 inc(dst, sizeof(jchar)); 4199 // annul LDUB if branch is not taken to prevent access past end of string 4200 br(Assembler::notZero, true, Assembler::pt, Loop); 4201 delayed()->ldub(src, 0, tmp); // hoisted 4202 } 4203 4204 void MacroAssembler::string_compare(Register str1, Register str2, 4205 Register cnt1, Register cnt2, 4206 Register tmp1, Register tmp2, 4207 Register result, int ae) { 4208 Label Ldone, Lloop; 4209 assert_different_registers(str1, str2, cnt1, cnt2, tmp1, result); 4210 int stride1, stride2; 4211 4212 // Note: Making use of the fact that compareTo(a, b) == -compareTo(b, a) 4213 // we interchange str1 and str2 in the UL case and negate the result. 4214 // Like this, str1 is always latin1 encoded, expect for the UU case. 4215 4216 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 4217 srl(cnt2, 1, cnt2); 4218 } 4219 4220 // See if the lengths are different, and calculate min in cnt1. 4221 // Save diff in case we need it for a tie-breaker. 4222 Label Lskip; 4223 Register diff = tmp1; 4224 subcc(cnt1, cnt2, diff); 4225 br(Assembler::greater, true, Assembler::pt, Lskip); 4226 // cnt2 is shorter, so use its count: 4227 delayed()->mov(cnt2, cnt1); 4228 bind(Lskip); 4229 4230 // Rename registers 4231 Register limit1 = cnt1; 4232 Register limit2 = limit1; 4233 Register chr1 = result; 4234 Register chr2 = cnt2; 4235 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 4236 // We need an additional register to keep track of two limits 4237 assert_different_registers(str1, str2, cnt1, cnt2, tmp1, tmp2, result); 4238 limit2 = tmp2; 4239 } 4240 4241 // Is the minimum length zero? 4242 cmp(limit1, (int)0); // use cast to resolve overloading ambiguity 4243 br(Assembler::equal, true, Assembler::pn, Ldone); 4244 // result is difference in lengths 4245 if (ae == StrIntrinsicNode::UU) { 4246 delayed()->sra(diff, 1, result); // Divide by 2 to get number of chars 4247 } else { 4248 delayed()->mov(diff, result); 4249 } 4250 4251 // Load first characters 4252 if (ae == StrIntrinsicNode::LL) { 4253 stride1 = stride2 = sizeof(jbyte); 4254 ldub(str1, 0, chr1); 4255 ldub(str2, 0, chr2); 4256 } else if (ae == StrIntrinsicNode::UU) { 4257 stride1 = stride2 = sizeof(jchar); 4258 lduh(str1, 0, chr1); 4259 lduh(str2, 0, chr2); 4260 } else { 4261 stride1 = sizeof(jbyte); 4262 stride2 = sizeof(jchar); 4263 ldub(str1, 0, chr1); 4264 lduh(str2, 0, chr2); 4265 } 4266 4267 // Compare first characters 4268 subcc(chr1, chr2, chr1); 4269 br(Assembler::notZero, false, Assembler::pt, Ldone); 4270 assert(chr1 == result, "result must be pre-placed"); 4271 delayed()->nop(); 4272 4273 // Check if the strings start at same location 4274 cmp(str1, str2); 4275 brx(Assembler::equal, true, Assembler::pn, Ldone); 4276 delayed()->mov(G0, result); // result is zero 4277 4278 // We have no guarantee that on 64 bit the higher half of limit is 0 4279 signx(limit1); 4280 4281 // Get limit 4282 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 4283 sll(limit1, 1, limit2); 4284 subcc(limit2, stride2, chr2); 4285 } 4286 subcc(limit1, stride1, chr1); 4287 br(Assembler::zero, true, Assembler::pn, Ldone); 4288 // result is difference in lengths 4289 if (ae == StrIntrinsicNode::UU) { 4290 delayed()->sra(diff, 1, result); // Divide by 2 to get number of chars 4291 } else { 4292 delayed()->mov(diff, result); 4293 } 4294 4295 // Shift str1 and str2 to the end of the arrays, negate limit 4296 add(str1, limit1, str1); 4297 add(str2, limit2, str2); 4298 neg(chr1, limit1); // limit1 = -(limit1-stride1) 4299 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 4300 neg(chr2, limit2); // limit2 = -(limit2-stride2) 4301 } 4302 4303 // Compare the rest of the characters 4304 load_sized_value(Address(str1, limit1), chr1, (ae == StrIntrinsicNode::UU) ? 2 : 1, false); 4305 4306 bind(Lloop); 4307 load_sized_value(Address(str2, limit2), chr2, (ae == StrIntrinsicNode::LL) ? 1 : 2, false); 4308 4309 subcc(chr1, chr2, chr1); 4310 br(Assembler::notZero, false, Assembler::pt, Ldone); 4311 assert(chr1 == result, "result must be pre-placed"); 4312 delayed()->inccc(limit1, stride1); 4313 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 4314 inccc(limit2, stride2); 4315 } 4316 4317 // annul LDUB if branch is not taken to prevent access past end of string 4318 br(Assembler::notZero, true, Assembler::pt, Lloop); 4319 delayed()->load_sized_value(Address(str1, limit1), chr1, (ae == StrIntrinsicNode::UU) ? 2 : 1, false); 4320 4321 // If strings are equal up to min length, return the length difference. 4322 if (ae == StrIntrinsicNode::UU) { 4323 // Divide by 2 to get number of chars 4324 sra(diff, 1, result); 4325 } else { 4326 mov(diff, result); 4327 } 4328 4329 // Otherwise, return the difference between the first mismatched chars. 4330 bind(Ldone); 4331 if(ae == StrIntrinsicNode::UL) { 4332 // Negate result (see note above) 4333 neg(result); 4334 } 4335 } 4336 4337 void MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register ary2, 4338 Register limit, Register tmp, Register result, bool is_byte) { 4339 Label Ldone, Lloop, Lremaining; 4340 assert_different_registers(ary1, ary2, limit, tmp, result); 4341 4342 int length_offset = arrayOopDesc::length_offset_in_bytes(); 4343 int base_offset = arrayOopDesc::base_offset_in_bytes(is_byte ? T_BYTE : T_CHAR); 4344 assert(base_offset % 8 == 0, "Base offset must be 8-byte aligned"); 4345 4346 if (is_array_equ) { 4347 // return true if the same array 4348 cmp(ary1, ary2); 4349 brx(Assembler::equal, true, Assembler::pn, Ldone); 4350 delayed()->mov(1, result); // equal 4351 4352 br_null(ary1, true, Assembler::pn, Ldone); 4353 delayed()->clr(result); // not equal 4354 4355 br_null(ary2, true, Assembler::pn, Ldone); 4356 delayed()->clr(result); // not equal 4357 4358 // load the lengths of arrays 4359 ld(Address(ary1, length_offset), limit); 4360 ld(Address(ary2, length_offset), tmp); 4361 4362 // return false if the two arrays are not equal length 4363 cmp(limit, tmp); 4364 br(Assembler::notEqual, true, Assembler::pn, Ldone); 4365 delayed()->clr(result); // not equal 4366 } 4367 4368 cmp_zero_and_br(Assembler::zero, limit, Ldone, true, Assembler::pn); 4369 delayed()->mov(1, result); // zero-length arrays are equal 4370 4371 if (is_array_equ) { 4372 // load array addresses 4373 add(ary1, base_offset, ary1); 4374 add(ary2, base_offset, ary2); 4375 // set byte count 4376 if (!is_byte) { 4377 sll(limit, exact_log2(sizeof(jchar)), limit); 4378 } 4379 } else { 4380 // We have no guarantee that on 64 bit the higher half of limit is 0 4381 signx(limit); 4382 } 4383 4384 #ifdef ASSERT 4385 // Sanity check for doubleword (8-byte) alignment of ary1 and ary2. 4386 // Guaranteed on 64-bit systems (see arrayOopDesc::header_size_in_bytes()). 4387 Label Laligned; 4388 or3(ary1, ary2, tmp); 4389 andcc(tmp, 7, tmp); 4390 br_null_short(tmp, Assembler::pn, Laligned); 4391 STOP("First array element is not 8-byte aligned."); 4392 should_not_reach_here(); 4393 bind(Laligned); 4394 #endif 4395 4396 // Shift ary1 and ary2 to the end of the arrays, negate limit 4397 add(ary1, limit, ary1); 4398 add(ary2, limit, ary2); 4399 neg(limit, limit); 4400 4401 // MAIN LOOP 4402 // Load and compare array elements of size 'byte_width' until the elements are not 4403 // equal or we reached the end of the arrays. If the size of the arrays is not a 4404 // multiple of 'byte_width', we simply read over the end of the array, bail out and 4405 // compare the remaining bytes below by skipping the garbage bytes. 4406 ldx(ary1, limit, result); 4407 bind(Lloop); 4408 ldx(ary2, limit, tmp); 4409 inccc(limit, 8); 4410 // Bail out if we reached the end (but still do the comparison) 4411 br(Assembler::positive, false, Assembler::pn, Lremaining); 4412 delayed()->cmp(result, tmp); 4413 // Check equality of elements 4414 brx(Assembler::equal, false, Assembler::pt, target(Lloop)); 4415 delayed()->ldx(ary1, limit, result); 4416 4417 ba(Ldone); 4418 delayed()->clr(result); // not equal 4419 4420 // TAIL COMPARISON 4421 // We got here because we reached the end of the arrays. 'limit' is the number of 4422 // garbage bytes we may have compared by reading over the end of the arrays. Shift 4423 // out the garbage and compare the remaining elements. 4424 bind(Lremaining); 4425 // Optimistic shortcut: elements potentially including garbage are equal 4426 brx(Assembler::equal, true, Assembler::pt, target(Ldone)); 4427 delayed()->mov(1, result); // equal 4428 // Shift 'limit' bytes to the right and compare 4429 sll(limit, 3, limit); // bytes to bits 4430 srlx(result, limit, result); 4431 srlx(tmp, limit, tmp); 4432 cmp(result, tmp); 4433 clr(result); 4434 movcc(Assembler::equal, false, xcc, 1, result); 4435 4436 bind(Ldone); 4437 } 4438 4439 void MacroAssembler::has_negatives(Register inp, Register size, Register result, Register t2, Register t3, Register t4, Register t5) { 4440 4441 // test for negative bytes in input string of a given size 4442 // result 1 if found, 0 otherwise. 4443 4444 Label Lcore, Ltail, Lreturn, Lcore_rpt; 4445 4446 assert_different_registers(inp, size, t2, t3, t4, t5, result); 4447 4448 Register i = result; // result used as integer index i until very end 4449 Register lmask = t2; // t2 is aliased to lmask 4450 4451 // INITIALIZATION 4452 // =========================================================== 4453 // initialize highbits mask -> lmask = 0x8080808080808080 (8B/64b) 4454 // compute unaligned offset -> i 4455 // compute core end index -> t5 4456 Assembler::sethi(0x80808000, t2); //! sethi macro fails to emit optimal 4457 add(t2, 0x80, t2); 4458 sllx(t2, 32, t3); 4459 or3(t3, t2, lmask); // 0x8080808080808080 -> lmask 4460 sra(size,0,size); 4461 andcc(inp, 0x7, i); // unaligned offset -> i 4462 br(Assembler::zero, true, Assembler::pn, Lcore); // starts 8B aligned? 4463 delayed()->add(size, -8, t5); // (annuled) core end index -> t5 4464 4465 // =========================================================== 4466 4467 // UNALIGNED HEAD 4468 // =========================================================== 4469 // * unaligned head handling: grab aligned 8B containing unaligned inp(ut) 4470 // * obliterate (ignore) bytes outside string by shifting off reg ends 4471 // * compare with bitmask, short circuit return true if one or more high 4472 // bits set. 4473 cmp(size, 0); 4474 br(Assembler::zero, true, Assembler::pn, Lreturn); // short-circuit? 4475 delayed()->mov(0,result); // annuled so i not clobbered for following 4476 neg(i, t4); 4477 add(i, size, t5); 4478 ldx(inp, t4, t3); // raw aligned 8B containing unaligned head -> t3 4479 mov(8, t4); 4480 sub(t4, t5, t4); 4481 sra(t4, 31, t5); 4482 andn(t4, t5, t5); 4483 add(i, t5, t4); 4484 sll(t5, 3, t5); 4485 sll(t4, 3, t4); // # bits to shift right, left -> t5,t4 4486 srlx(t3, t5, t3); 4487 sllx(t3, t4, t3); // bytes outside string in 8B header obliterated -> t3 4488 andcc(lmask, t3, G0); 4489 brx(Assembler::notZero, true, Assembler::pn, Lreturn); // short circuit? 4490 delayed()->mov(1,result); // annuled so i not clobbered for following 4491 add(size, -8, t5); // core end index -> t5 4492 mov(8, t4); 4493 sub(t4, i, i); // # bytes examined in unalgn head (<8) -> i 4494 // =========================================================== 4495 4496 // ALIGNED CORE 4497 // =========================================================== 4498 // * iterate index i over aligned 8B sections of core, comparing with 4499 // bitmask, short circuit return true if one or more high bits set 4500 // t5 contains core end index/loop limit which is the index 4501 // of the MSB of last (unaligned) 8B fully contained in the string. 4502 // inp contains address of first byte in string/array 4503 // lmask contains 8B high bit mask for comparison 4504 // i contains next index to be processed (adr. inp+i is on 8B boundary) 4505 bind(Lcore); 4506 cmp_and_br_short(i, t5, Assembler::greater, Assembler::pn, Ltail); 4507 bind(Lcore_rpt); 4508 ldx(inp, i, t3); 4509 andcc(t3, lmask, G0); 4510 brx(Assembler::notZero, true, Assembler::pn, Lreturn); 4511 delayed()->mov(1, result); // annuled so i not clobbered for following 4512 add(i, 8, i); 4513 cmp_and_br_short(i, t5, Assembler::lessEqual, Assembler::pn, Lcore_rpt); 4514 // =========================================================== 4515 4516 // ALIGNED TAIL (<8B) 4517 // =========================================================== 4518 // handle aligned tail of 7B or less as complete 8B, obliterating end of 4519 // string bytes by shifting them off end, compare what's left with bitmask 4520 // inp contains address of first byte in string/array 4521 // lmask contains 8B high bit mask for comparison 4522 // i contains next index to be processed (adr. inp+i is on 8B boundary) 4523 bind(Ltail); 4524 subcc(size, i, t4); // # of remaining bytes in string -> t4 4525 // return 0 if no more remaining bytes 4526 br(Assembler::lessEqual, true, Assembler::pn, Lreturn); 4527 delayed()->mov(0, result); // annuled so i not clobbered for following 4528 ldx(inp, i, t3); // load final 8B (aligned) containing tail -> t3 4529 mov(8, t5); 4530 sub(t5, t4, t4); 4531 mov(0, result); // ** i clobbered at this point 4532 sll(t4, 3, t4); // bits beyond end of string -> t4 4533 srlx(t3, t4, t3); // bytes beyond end now obliterated -> t3 4534 andcc(lmask, t3, G0); 4535 movcc(Assembler::notZero, false, xcc, 1, result); 4536 bind(Lreturn); 4537 } 4538 4539 #endif 4540 4541 4542 // Use BIS for zeroing (count is in bytes). 4543 void MacroAssembler::bis_zeroing(Register to, Register count, Register temp, Label& Ldone) { 4544 assert(UseBlockZeroing && VM_Version::has_blk_zeroing(), "only works with BIS zeroing"); 4545 Register end = count; 4546 int cache_line_size = VM_Version::prefetch_data_size(); 4547 assert(cache_line_size > 0, "cache line size should be known for this code"); 4548 // Minimum count when BIS zeroing can be used since 4549 // it needs membar which is expensive. 4550 int block_zero_size = MAX2(cache_line_size*3, (int)BlockZeroingLowLimit); 4551 4552 Label small_loop; 4553 // Check if count is negative (dead code) or zero. 4554 // Note, count uses 64bit in 64 bit VM. 4555 cmp_and_brx_short(count, 0, Assembler::lessEqual, Assembler::pn, Ldone); 4556 4557 // Use BIS zeroing only for big arrays since it requires membar. 4558 if (Assembler::is_simm13(block_zero_size)) { // < 4096 4559 cmp(count, block_zero_size); 4560 } else { 4561 set(block_zero_size, temp); 4562 cmp(count, temp); 4563 } 4564 br(Assembler::lessUnsigned, false, Assembler::pt, small_loop); 4565 delayed()->add(to, count, end); 4566 4567 // Note: size is >= three (32 bytes) cache lines. 4568 4569 // Clean the beginning of space up to next cache line. 4570 for (int offs = 0; offs < cache_line_size; offs += 8) { 4571 stx(G0, to, offs); 4572 } 4573 4574 // align to next cache line 4575 add(to, cache_line_size, to); 4576 and3(to, -cache_line_size, to); 4577 4578 // Note: size left >= two (32 bytes) cache lines. 4579 4580 // BIS should not be used to zero tail (64 bytes) 4581 // to avoid zeroing a header of the following object. 4582 sub(end, (cache_line_size*2)-8, end); 4583 4584 Label bis_loop; 4585 bind(bis_loop); 4586 stxa(G0, to, G0, Assembler::ASI_ST_BLKINIT_PRIMARY); 4587 add(to, cache_line_size, to); 4588 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, bis_loop); 4589 4590 // BIS needs membar. 4591 membar(Assembler::StoreLoad); 4592 4593 add(end, (cache_line_size*2)-8, end); // restore end 4594 cmp_and_brx_short(to, end, Assembler::greaterEqualUnsigned, Assembler::pn, Ldone); 4595 4596 // Clean the tail. 4597 bind(small_loop); 4598 stx(G0, to, 0); 4599 add(to, 8, to); 4600 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, small_loop); 4601 nop(); // Separate short branches 4602 } 4603 4604 /** 4605 * Update CRC-32[C] with a byte value according to constants in table 4606 * 4607 * @param [in,out]crc Register containing the crc. 4608 * @param [in]val Register containing the byte to fold into the CRC. 4609 * @param [in]table Register containing the table of crc constants. 4610 * 4611 * uint32_t crc; 4612 * val = crc_table[(val ^ crc) & 0xFF]; 4613 * crc = val ^ (crc >> 8); 4614 */ 4615 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 4616 xor3(val, crc, val); 4617 and3(val, 0xFF, val); 4618 sllx(val, 2, val); 4619 lduw(table, val, val); 4620 srlx(crc, 8, crc); 4621 xor3(val, crc, crc); 4622 } 4623 4624 // Reverse byte order of lower 32 bits, assuming upper 32 bits all zeros 4625 void MacroAssembler::reverse_bytes_32(Register src, Register dst, Register tmp) { 4626 srlx(src, 24, dst); 4627 4628 sllx(src, 32+8, tmp); 4629 srlx(tmp, 32+24, tmp); 4630 sllx(tmp, 8, tmp); 4631 or3(dst, tmp, dst); 4632 4633 sllx(src, 32+16, tmp); 4634 srlx(tmp, 32+24, tmp); 4635 sllx(tmp, 16, tmp); 4636 or3(dst, tmp, dst); 4637 4638 sllx(src, 32+24, tmp); 4639 srlx(tmp, 32, tmp); 4640 or3(dst, tmp, dst); 4641 } 4642 4643 void MacroAssembler::movitof_revbytes(Register src, FloatRegister dst, Register tmp1, Register tmp2) { 4644 reverse_bytes_32(src, tmp1, tmp2); 4645 movxtod(tmp1, dst); 4646 } 4647 4648 void MacroAssembler::movftoi_revbytes(FloatRegister src, Register dst, Register tmp1, Register tmp2) { 4649 movdtox(src, tmp1); 4650 reverse_bytes_32(tmp1, dst, tmp2); 4651 } 4652 4653 void MacroAssembler::fold_128bit_crc32(Register xcrc_hi, Register xcrc_lo, Register xK_hi, Register xK_lo, Register xtmp_hi, Register xtmp_lo, Register buf, int offset) { 4654 xmulx(xcrc_hi, xK_hi, xtmp_lo); 4655 xmulxhi(xcrc_hi, xK_hi, xtmp_hi); 4656 xmulxhi(xcrc_lo, xK_lo, xcrc_hi); 4657 xmulx(xcrc_lo, xK_lo, xcrc_lo); 4658 xor3(xcrc_lo, xtmp_lo, xcrc_lo); 4659 xor3(xcrc_hi, xtmp_hi, xcrc_hi); 4660 ldxl(buf, G0, xtmp_lo); 4661 inc(buf, 8); 4662 ldxl(buf, G0, xtmp_hi); 4663 inc(buf, 8); 4664 xor3(xcrc_lo, xtmp_lo, xcrc_lo); 4665 xor3(xcrc_hi, xtmp_hi, xcrc_hi); 4666 } 4667 4668 void MacroAssembler::fold_128bit_crc32(Register xcrc_hi, Register xcrc_lo, Register xK_hi, Register xK_lo, Register xtmp_hi, Register xtmp_lo, Register xbuf_hi, Register xbuf_lo) { 4669 mov(xcrc_lo, xtmp_lo); 4670 mov(xcrc_hi, xtmp_hi); 4671 xmulx(xtmp_hi, xK_hi, xtmp_lo); 4672 xmulxhi(xtmp_hi, xK_hi, xtmp_hi); 4673 xmulxhi(xcrc_lo, xK_lo, xcrc_hi); 4674 xmulx(xcrc_lo, xK_lo, xcrc_lo); 4675 xor3(xcrc_lo, xbuf_lo, xcrc_lo); 4676 xor3(xcrc_hi, xbuf_hi, xcrc_hi); 4677 xor3(xcrc_lo, xtmp_lo, xcrc_lo); 4678 xor3(xcrc_hi, xtmp_hi, xcrc_hi); 4679 } 4680 4681 void MacroAssembler::fold_8bit_crc32(Register xcrc, Register table, Register xtmp, Register tmp) { 4682 and3(xcrc, 0xFF, tmp); 4683 sllx(tmp, 2, tmp); 4684 lduw(table, tmp, xtmp); 4685 srlx(xcrc, 8, xcrc); 4686 xor3(xtmp, xcrc, xcrc); 4687 } 4688 4689 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { 4690 and3(crc, 0xFF, tmp); 4691 srlx(crc, 8, crc); 4692 sllx(tmp, 2, tmp); 4693 lduw(table, tmp, tmp); 4694 xor3(tmp, crc, crc); 4695 } 4696 4697 #define CRC32_TMP_REG_NUM 18 4698 4699 #define CRC32_CONST_64 0x163cd6124 4700 #define CRC32_CONST_96 0x0ccaa009e 4701 #define CRC32_CONST_160 0x1751997d0 4702 #define CRC32_CONST_480 0x1c6e41596 4703 #define CRC32_CONST_544 0x154442bd4 4704 4705 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table) { 4706 4707 Label L_cleanup_loop, L_cleanup_check, L_align_loop, L_align_check; 4708 Label L_main_loop_prologue; 4709 Label L_fold_512b, L_fold_512b_loop, L_fold_128b; 4710 Label L_fold_tail, L_fold_tail_loop; 4711 Label L_8byte_fold_loop, L_8byte_fold_check; 4712 4713 const Register tmp[CRC32_TMP_REG_NUM] = {L0, L1, L2, L3, L4, L5, L6, G1, I0, I1, I2, I3, I4, I5, I7, O4, O5, G3}; 4714 4715 Register const_64 = tmp[CRC32_TMP_REG_NUM-1]; 4716 Register const_96 = tmp[CRC32_TMP_REG_NUM-1]; 4717 Register const_160 = tmp[CRC32_TMP_REG_NUM-2]; 4718 Register const_480 = tmp[CRC32_TMP_REG_NUM-1]; 4719 Register const_544 = tmp[CRC32_TMP_REG_NUM-2]; 4720 4721 set(ExternalAddress(StubRoutines::crc_table_addr()), table); 4722 4723 not1(crc); // ~c 4724 clruwu(crc); // clear upper 32 bits of crc 4725 4726 // Check if below cutoff, proceed directly to cleanup code 4727 mov(31, G4); 4728 cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_cleanup_check); 4729 4730 // Align buffer to 8 byte boundry 4731 mov(8, O5); 4732 and3(buf, 0x7, O4); 4733 sub(O5, O4, O5); 4734 and3(O5, 0x7, O5); 4735 sub(len, O5, len); 4736 ba(L_align_check); 4737 delayed()->nop(); 4738 4739 // Alignment loop, table look up method for up to 7 bytes 4740 bind(L_align_loop); 4741 ldub(buf, 0, O4); 4742 inc(buf); 4743 dec(O5); 4744 xor3(O4, crc, O4); 4745 and3(O4, 0xFF, O4); 4746 sllx(O4, 2, O4); 4747 lduw(table, O4, O4); 4748 srlx(crc, 8, crc); 4749 xor3(O4, crc, crc); 4750 bind(L_align_check); 4751 nop(); 4752 cmp_and_br_short(O5, 0, Assembler::notEqual, Assembler::pt, L_align_loop); 4753 4754 // Aligned on 64-bit (8-byte) boundry at this point 4755 // Check if still above cutoff (31-bytes) 4756 mov(31, G4); 4757 cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_cleanup_check); 4758 // At least 32 bytes left to process 4759 4760 // Free up registers by storing them to FP registers 4761 for (int i = 0; i < CRC32_TMP_REG_NUM; i++) { 4762 movxtod(tmp[i], as_FloatRegister(2*i)); 4763 } 4764 4765 // Determine which loop to enter 4766 // Shared prologue 4767 ldxl(buf, G0, tmp[0]); 4768 inc(buf, 8); 4769 ldxl(buf, G0, tmp[1]); 4770 inc(buf, 8); 4771 xor3(tmp[0], crc, tmp[0]); // Fold CRC into first few bytes 4772 and3(crc, 0, crc); // Clear out the crc register 4773 // Main loop needs 128-bytes at least 4774 mov(128, G4); 4775 mov(64, tmp[2]); 4776 cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_main_loop_prologue); 4777 // Less than 64 bytes 4778 nop(); 4779 cmp_and_br_short(len, tmp[2], Assembler::lessUnsigned, Assembler::pt, L_fold_tail); 4780 // Between 64 and 127 bytes 4781 set64(CRC32_CONST_96, const_96, tmp[8]); 4782 set64(CRC32_CONST_160, const_160, tmp[9]); 4783 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[2], tmp[3], buf, 0); 4784 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[4], tmp[5], buf, 16); 4785 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[6], tmp[7], buf, 32); 4786 dec(len, 48); 4787 ba(L_fold_tail); 4788 delayed()->nop(); 4789 4790 bind(L_main_loop_prologue); 4791 for (int i = 2; i < 8; i++) { 4792 ldxl(buf, G0, tmp[i]); 4793 inc(buf, 8); 4794 } 4795 4796 // Fold total 512 bits of polynomial on each iteration, 4797 // 128 bits per each of 4 parallel streams 4798 set64(CRC32_CONST_480, const_480, tmp[8]); 4799 set64(CRC32_CONST_544, const_544, tmp[9]); 4800 4801 mov(128, G4); 4802 bind(L_fold_512b_loop); 4803 fold_128bit_crc32(tmp[1], tmp[0], const_480, const_544, tmp[9], tmp[8], buf, 0); 4804 fold_128bit_crc32(tmp[3], tmp[2], const_480, const_544, tmp[11], tmp[10], buf, 16); 4805 fold_128bit_crc32(tmp[5], tmp[4], const_480, const_544, tmp[13], tmp[12], buf, 32); 4806 fold_128bit_crc32(tmp[7], tmp[6], const_480, const_544, tmp[15], tmp[14], buf, 64); 4807 dec(len, 64); 4808 cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_fold_512b_loop); 4809 4810 // Fold 512 bits to 128 bits 4811 bind(L_fold_512b); 4812 set64(CRC32_CONST_96, const_96, tmp[8]); 4813 set64(CRC32_CONST_160, const_160, tmp[9]); 4814 4815 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[3], tmp[2]); 4816 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[5], tmp[4]); 4817 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[7], tmp[6]); 4818 dec(len, 48); 4819 4820 // Fold the rest of 128 bits data chunks 4821 bind(L_fold_tail); 4822 mov(32, G4); 4823 cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_fold_128b); 4824 4825 set64(CRC32_CONST_96, const_96, tmp[8]); 4826 set64(CRC32_CONST_160, const_160, tmp[9]); 4827 4828 bind(L_fold_tail_loop); 4829 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[2], tmp[3], buf, 0); 4830 sub(len, 16, len); 4831 cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_fold_tail_loop); 4832 4833 // Fold the 128 bits in tmps 0 - 1 into tmp 1 4834 bind(L_fold_128b); 4835 4836 set64(CRC32_CONST_64, const_64, tmp[4]); 4837 4838 xmulx(const_64, tmp[0], tmp[2]); 4839 xmulxhi(const_64, tmp[0], tmp[3]); 4840 4841 srl(tmp[2], G0, tmp[4]); 4842 xmulx(const_64, tmp[4], tmp[4]); 4843 4844 srlx(tmp[2], 32, tmp[2]); 4845 sllx(tmp[3], 32, tmp[3]); 4846 or3(tmp[2], tmp[3], tmp[2]); 4847 4848 xor3(tmp[4], tmp[1], tmp[4]); 4849 xor3(tmp[4], tmp[2], tmp[1]); 4850 dec(len, 8); 4851 4852 // Use table lookup for the 8 bytes left in tmp[1] 4853 dec(len, 8); 4854 4855 // 8 8-bit folds to compute 32-bit CRC. 4856 for (int j = 0; j < 4; j++) { 4857 fold_8bit_crc32(tmp[1], table, tmp[2], tmp[3]); 4858 } 4859 srl(tmp[1], G0, crc); // move 32 bits to general register 4860 for (int j = 0; j < 4; j++) { 4861 fold_8bit_crc32(crc, table, tmp[3]); 4862 } 4863 4864 bind(L_8byte_fold_check); 4865 4866 // Restore int registers saved in FP registers 4867 for (int i = 0; i < CRC32_TMP_REG_NUM; i++) { 4868 movdtox(as_FloatRegister(2*i), tmp[i]); 4869 } 4870 4871 ba(L_cleanup_check); 4872 delayed()->nop(); 4873 4874 // Table look-up method for the remaining few bytes 4875 bind(L_cleanup_loop); 4876 ldub(buf, 0, O4); 4877 inc(buf); 4878 dec(len); 4879 xor3(O4, crc, O4); 4880 and3(O4, 0xFF, O4); 4881 sllx(O4, 2, O4); 4882 lduw(table, O4, O4); 4883 srlx(crc, 8, crc); 4884 xor3(O4, crc, crc); 4885 bind(L_cleanup_check); 4886 nop(); 4887 cmp_and_br_short(len, 0, Assembler::greaterUnsigned, Assembler::pt, L_cleanup_loop); 4888 4889 not1(crc); 4890 } 4891 4892 #define CHUNK_LEN 128 /* 128 x 8B = 1KB */ 4893 #define CHUNK_K1 0x1307a0206 /* reverseBits(pow(x, CHUNK_LEN*8*8*3 - 32) mod P(x)) << 1 */ 4894 #define CHUNK_K2 0x1a0f717c4 /* reverseBits(pow(x, CHUNK_LEN*8*8*2 - 32) mod P(x)) << 1 */ 4895 #define CHUNK_K3 0x0170076fa /* reverseBits(pow(x, CHUNK_LEN*8*8*1 - 32) mod P(x)) << 1 */ 4896 4897 void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len, Register table) { 4898 4899 Label L_crc32c_head, L_crc32c_aligned; 4900 Label L_crc32c_parallel, L_crc32c_parallel_loop; 4901 Label L_crc32c_serial, L_crc32c_x32_loop, L_crc32c_x8, L_crc32c_x8_loop; 4902 Label L_crc32c_done, L_crc32c_tail, L_crc32c_return; 4903 4904 set(ExternalAddress(StubRoutines::crc32c_table_addr()), table); 4905 4906 cmp_and_br_short(len, 0, Assembler::lessEqual, Assembler::pn, L_crc32c_return); 4907 4908 // clear upper 32 bits of crc 4909 clruwu(crc); 4910 4911 and3(buf, 7, G4); 4912 cmp_and_brx_short(G4, 0, Assembler::equal, Assembler::pt, L_crc32c_aligned); 4913 4914 mov(8, G1); 4915 sub(G1, G4, G4); 4916 4917 // ------ process the misaligned head (7 bytes or less) ------ 4918 bind(L_crc32c_head); 4919 4920 // crc = (crc >>> 8) ^ byteTable[(crc ^ b) & 0xFF]; 4921 ldub(buf, 0, G1); 4922 update_byte_crc32(crc, G1, table); 4923 4924 inc(buf); 4925 dec(len); 4926 cmp_and_br_short(len, 0, Assembler::equal, Assembler::pn, L_crc32c_return); 4927 dec(G4); 4928 cmp_and_br_short(G4, 0, Assembler::greater, Assembler::pt, L_crc32c_head); 4929 4930 // ------ process the 8-byte-aligned body ------ 4931 bind(L_crc32c_aligned); 4932 nop(); 4933 cmp_and_br_short(len, 8, Assembler::less, Assembler::pn, L_crc32c_tail); 4934 4935 // reverse the byte order of lower 32 bits to big endian, and move to FP side 4936 movitof_revbytes(crc, F0, G1, G3); 4937 4938 set(CHUNK_LEN*8*4, G4); 4939 cmp_and_br_short(len, G4, Assembler::less, Assembler::pt, L_crc32c_serial); 4940 4941 // ------ process four 1KB chunks in parallel ------ 4942 bind(L_crc32c_parallel); 4943 4944 fzero(FloatRegisterImpl::D, F2); 4945 fzero(FloatRegisterImpl::D, F4); 4946 fzero(FloatRegisterImpl::D, F6); 4947 4948 mov(CHUNK_LEN - 1, G4); 4949 bind(L_crc32c_parallel_loop); 4950 // schedule ldf's ahead of crc32c's to hide the load-use latency 4951 ldf(FloatRegisterImpl::D, buf, 0, F8); 4952 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*8, F10); 4953 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*16, F12); 4954 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*24, F14); 4955 crc32c(F0, F8, F0); 4956 crc32c(F2, F10, F2); 4957 crc32c(F4, F12, F4); 4958 crc32c(F6, F14, F6); 4959 inc(buf, 8); 4960 dec(G4); 4961 cmp_and_br_short(G4, 0, Assembler::greater, Assembler::pt, L_crc32c_parallel_loop); 4962 4963 ldf(FloatRegisterImpl::D, buf, 0, F8); 4964 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*8, F10); 4965 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*16, F12); 4966 crc32c(F0, F8, F0); 4967 crc32c(F2, F10, F2); 4968 crc32c(F4, F12, F4); 4969 4970 inc(buf, CHUNK_LEN*24); 4971 ldfl(FloatRegisterImpl::D, buf, G0, F14); // load in little endian 4972 inc(buf, 8); 4973 4974 prefetch(buf, 0, Assembler::severalReads); 4975 prefetch(buf, CHUNK_LEN*8, Assembler::severalReads); 4976 prefetch(buf, CHUNK_LEN*16, Assembler::severalReads); 4977 prefetch(buf, CHUNK_LEN*24, Assembler::severalReads); 4978 4979 // move to INT side, and reverse the byte order of lower 32 bits to little endian 4980 movftoi_revbytes(F0, O4, G1, G4); 4981 movftoi_revbytes(F2, O5, G1, G4); 4982 movftoi_revbytes(F4, G5, G1, G4); 4983 4984 // combine the results of 4 chunks 4985 set64(CHUNK_K1, G3, G1); 4986 xmulx(O4, G3, O4); 4987 set64(CHUNK_K2, G3, G1); 4988 xmulx(O5, G3, O5); 4989 set64(CHUNK_K3, G3, G1); 4990 xmulx(G5, G3, G5); 4991 4992 movdtox(F14, G4); 4993 xor3(O4, O5, O5); 4994 xor3(G5, O5, O5); 4995 xor3(G4, O5, O5); 4996 4997 // reverse the byte order to big endian, via stack, and move to FP side 4998 // TODO: use new revb instruction 4999 add(SP, -8, G1); 5000 srlx(G1, 3, G1); 5001 sllx(G1, 3, G1); 5002 stx(O5, G1, G0); 5003 ldfl(FloatRegisterImpl::D, G1, G0, F2); // load in little endian 5004 5005 crc32c(F6, F2, F0); 5006 5007 set(CHUNK_LEN*8*4, G4); 5008 sub(len, G4, len); 5009 cmp_and_br_short(len, G4, Assembler::greaterEqual, Assembler::pt, L_crc32c_parallel); 5010 nop(); 5011 cmp_and_br_short(len, 0, Assembler::equal, Assembler::pt, L_crc32c_done); 5012 5013 bind(L_crc32c_serial); 5014 5015 mov(32, G4); 5016 cmp_and_br_short(len, G4, Assembler::less, Assembler::pn, L_crc32c_x8); 5017 5018 // ------ process 32B chunks ------ 5019 bind(L_crc32c_x32_loop); 5020 ldf(FloatRegisterImpl::D, buf, 0, F2); 5021 crc32c(F0, F2, F0); 5022 ldf(FloatRegisterImpl::D, buf, 8, F2); 5023 crc32c(F0, F2, F0); 5024 ldf(FloatRegisterImpl::D, buf, 16, F2); 5025 crc32c(F0, F2, F0); 5026 ldf(FloatRegisterImpl::D, buf, 24, F2); 5027 inc(buf, 32); 5028 crc32c(F0, F2, F0); 5029 dec(len, 32); 5030 cmp_and_br_short(len, G4, Assembler::greaterEqual, Assembler::pt, L_crc32c_x32_loop); 5031 5032 bind(L_crc32c_x8); 5033 nop(); 5034 cmp_and_br_short(len, 8, Assembler::less, Assembler::pt, L_crc32c_done); 5035 5036 // ------ process 8B chunks ------ 5037 bind(L_crc32c_x8_loop); 5038 ldf(FloatRegisterImpl::D, buf, 0, F2); 5039 inc(buf, 8); 5040 crc32c(F0, F2, F0); 5041 dec(len, 8); 5042 cmp_and_br_short(len, 8, Assembler::greaterEqual, Assembler::pt, L_crc32c_x8_loop); 5043 5044 bind(L_crc32c_done); 5045 5046 // move to INT side, and reverse the byte order of lower 32 bits to little endian 5047 movftoi_revbytes(F0, crc, G1, G3); 5048 5049 cmp_and_br_short(len, 0, Assembler::equal, Assembler::pt, L_crc32c_return); 5050 5051 // ------ process the misaligned tail (7 bytes or less) ------ 5052 bind(L_crc32c_tail); 5053 5054 // crc = (crc >>> 8) ^ byteTable[(crc ^ b) & 0xFF]; 5055 ldub(buf, 0, G1); 5056 update_byte_crc32(crc, G1, table); 5057 5058 inc(buf); 5059 dec(len); 5060 cmp_and_br_short(len, 0, Assembler::greater, Assembler::pt, L_crc32c_tail); 5061 5062 bind(L_crc32c_return); 5063 nop(); 5064 }