1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "jvm.h" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "compiler/disassembler.hpp" 29 #include "gc/shared/cardTableModRefBS.hpp" 30 #include "gc/shared/collectedHeap.inline.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "memory/resourceArea.hpp" 33 #include "memory/universe.hpp" 34 #include "oops/klass.inline.hpp" 35 #include "prims/methodHandles.hpp" 36 #include "runtime/biasedLocking.hpp" 37 #include "runtime/interfaceSupport.hpp" 38 #include "runtime/objectMonitor.hpp" 39 #include "runtime/os.inline.hpp" 40 #include "runtime/safepoint.hpp" 41 #include "runtime/safepointMechanism.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 #include "runtime/stubRoutines.hpp" 44 #include "utilities/align.hpp" 45 #include "utilities/macros.hpp" 46 #if INCLUDE_ALL_GCS 47 #include "gc/g1/g1CollectedHeap.inline.hpp" 48 #include "gc/g1/g1SATBCardTableModRefBS.hpp" 49 #include "gc/g1/heapRegion.hpp" 50 #endif // INCLUDE_ALL_GCS 51 #ifdef COMPILER2 52 #include "opto/intrinsicnode.hpp" 53 #endif 54 55 #ifdef PRODUCT 56 #define BLOCK_COMMENT(str) /* nothing */ 57 #define STOP(error) stop(error) 58 #else 59 #define BLOCK_COMMENT(str) block_comment(str) 60 #define STOP(error) block_comment(error); stop(error) 61 #endif 62 63 // Convert the raw encoding form into the form expected by the 64 // constructor for Address. 65 Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) { 66 assert(scale == 0, "not supported"); 67 RelocationHolder rspec; 68 if (disp_reloc != relocInfo::none) { 69 rspec = Relocation::spec_simple(disp_reloc); 70 } 71 72 Register rindex = as_Register(index); 73 if (rindex != G0) { 74 Address madr(as_Register(base), rindex); 75 madr._rspec = rspec; 76 return madr; 77 } else { 78 Address madr(as_Register(base), disp); 79 madr._rspec = rspec; 80 return madr; 81 } 82 } 83 84 Address Argument::address_in_frame() const { 85 // Warning: In LP64 mode disp will occupy more than 10 bits, but 86 // op codes such as ld or ldx, only access disp() to get 87 // their simm13 argument. 88 int disp = ((_number - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS; 89 if (is_in()) 90 return Address(FP, disp); // In argument. 91 else 92 return Address(SP, disp); // Out argument. 93 } 94 95 static const char* argumentNames[][2] = { 96 {"A0","P0"}, {"A1","P1"}, {"A2","P2"}, {"A3","P3"}, {"A4","P4"}, 97 {"A5","P5"}, {"A6","P6"}, {"A7","P7"}, {"A8","P8"}, {"A9","P9"}, 98 {"A(n>9)","P(n>9)"} 99 }; 100 101 const char* Argument::name() const { 102 int nofArgs = sizeof argumentNames / sizeof argumentNames[0]; 103 int num = number(); 104 if (num >= nofArgs) num = nofArgs - 1; 105 return argumentNames[num][is_in() ? 1 : 0]; 106 } 107 108 #ifdef ASSERT 109 // On RISC, there's no benefit to verifying instruction boundaries. 110 bool AbstractAssembler::pd_check_instruction_mark() { return false; } 111 #endif 112 113 // Patch instruction inst at offset inst_pos to refer to dest_pos 114 // and return the resulting instruction. 115 // We should have pcs, not offsets, but since all is relative, it will work out 116 // OK. 117 int MacroAssembler::patched_branch(int dest_pos, int inst, int inst_pos) { 118 int m; // mask for displacement field 119 int v; // new value for displacement field 120 const int word_aligned_ones = -4; 121 switch (inv_op(inst)) { 122 default: ShouldNotReachHere(); 123 case call_op: m = wdisp(word_aligned_ones, 0, 30); v = wdisp(dest_pos, inst_pos, 30); break; 124 case branch_op: 125 switch (inv_op2(inst)) { 126 case fbp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; 127 case bp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; 128 case fb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; 129 case br_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; 130 case bpr_op2: { 131 if (is_cbcond(inst)) { 132 m = wdisp10(word_aligned_ones, 0); 133 v = wdisp10(dest_pos, inst_pos); 134 } else { 135 m = wdisp16(word_aligned_ones, 0); 136 v = wdisp16(dest_pos, inst_pos); 137 } 138 break; 139 } 140 default: ShouldNotReachHere(); 141 } 142 } 143 return inst & ~m | v; 144 } 145 146 // Return the offset of the branch destionation of instruction inst 147 // at offset pos. 148 // Should have pcs, but since all is relative, it works out. 149 int MacroAssembler::branch_destination(int inst, int pos) { 150 int r; 151 switch (inv_op(inst)) { 152 default: ShouldNotReachHere(); 153 case call_op: r = inv_wdisp(inst, pos, 30); break; 154 case branch_op: 155 switch (inv_op2(inst)) { 156 case fbp_op2: r = inv_wdisp( inst, pos, 19); break; 157 case bp_op2: r = inv_wdisp( inst, pos, 19); break; 158 case fb_op2: r = inv_wdisp( inst, pos, 22); break; 159 case br_op2: r = inv_wdisp( inst, pos, 22); break; 160 case bpr_op2: { 161 if (is_cbcond(inst)) { 162 r = inv_wdisp10(inst, pos); 163 } else { 164 r = inv_wdisp16(inst, pos); 165 } 166 break; 167 } 168 default: ShouldNotReachHere(); 169 } 170 } 171 return r; 172 } 173 174 void MacroAssembler::null_check(Register reg, int offset) { 175 if (needs_explicit_null_check((intptr_t)offset)) { 176 // provoke OS NULL exception if reg = NULL by 177 // accessing M[reg] w/o changing any registers 178 ld_ptr(reg, 0, G0); 179 } 180 else { 181 // nothing to do, (later) access of M[reg + offset] 182 // will provoke OS NULL exception if reg = NULL 183 } 184 } 185 186 // Ring buffer jumps 187 188 189 void MacroAssembler::jmp2(Register r1, Register r2, const char* file, int line ) { 190 assert_not_delayed(); 191 jmpl(r1, r2, G0); 192 } 193 void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) { 194 assert_not_delayed(); 195 jmp(r1, offset); 196 } 197 198 // This code sequence is relocatable to any address, even on LP64. 199 void MacroAssembler::jumpl(const AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line) { 200 assert_not_delayed(); 201 // Force fixed length sethi because NativeJump and NativeFarCall don't handle 202 // variable length instruction streams. 203 patchable_sethi(addrlit, temp); 204 Address a(temp, addrlit.low10() + offset); // Add the offset to the displacement. 205 jmpl(a.base(), a.disp(), d); 206 } 207 208 void MacroAssembler::jump(const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line) { 209 jumpl(addrlit, temp, G0, offset, file, line); 210 } 211 212 213 // Conditional breakpoint (for assertion checks in assembly code) 214 void MacroAssembler::breakpoint_trap(Condition c, CC cc) { 215 trap(c, cc, G0, ST_RESERVED_FOR_USER_0); 216 } 217 218 // We want to use ST_BREAKPOINT here, but the debugger is confused by it. 219 void MacroAssembler::breakpoint_trap() { 220 trap(ST_RESERVED_FOR_USER_0); 221 } 222 223 // Write serialization page so VM thread can do a pseudo remote membar 224 // We use the current thread pointer to calculate a thread specific 225 // offset to write to within the page. This minimizes bus traffic 226 // due to cache line collision. 227 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) { 228 srl(thread, os::get_serialize_page_shift_count(), tmp2); 229 if (Assembler::is_simm13(os::vm_page_size())) { 230 and3(tmp2, (os::vm_page_size() - sizeof(int)), tmp2); 231 } 232 else { 233 set((os::vm_page_size() - sizeof(int)), tmp1); 234 and3(tmp2, tmp1, tmp2); 235 } 236 set(os::get_memory_serialize_page(), tmp1); 237 st(G0, tmp1, tmp2); 238 } 239 240 241 void MacroAssembler::safepoint_poll(Label& slow_path, bool a, Register thread_reg, Register temp_reg) { 242 if (SafepointMechanism::uses_thread_local_poll()) { 243 ldx(Address(thread_reg, Thread::polling_page_offset()), temp_reg, 0); 244 // Armed page has poll bit set. 245 and3(temp_reg, SafepointMechanism::poll_bit(), temp_reg); 246 br_notnull(temp_reg, a, Assembler::pn, slow_path); 247 } else { 248 AddressLiteral sync_state(SafepointSynchronize::address_of_state()); 249 250 load_contents(sync_state, temp_reg); 251 cmp(temp_reg, SafepointSynchronize::_not_synchronized); 252 br(Assembler::notEqual, a, Assembler::pn, slow_path); 253 } 254 } 255 256 void MacroAssembler::enter() { 257 Unimplemented(); 258 } 259 260 void MacroAssembler::leave() { 261 Unimplemented(); 262 } 263 264 // Calls to C land 265 266 #ifdef ASSERT 267 // a hook for debugging 268 static Thread* reinitialize_thread() { 269 return Thread::current(); 270 } 271 #else 272 #define reinitialize_thread Thread::current 273 #endif 274 275 #ifdef ASSERT 276 address last_get_thread = NULL; 277 #endif 278 279 // call this when G2_thread is not known to be valid 280 void MacroAssembler::get_thread() { 281 save_frame(0); // to avoid clobbering O0 282 mov(G1, L0); // avoid clobbering G1 283 mov(G5_method, L1); // avoid clobbering G5 284 mov(G3, L2); // avoid clobbering G3 also 285 mov(G4, L5); // avoid clobbering G4 286 #ifdef ASSERT 287 AddressLiteral last_get_thread_addrlit(&last_get_thread); 288 set(last_get_thread_addrlit, L3); 289 rdpc(L4); 290 inc(L4, 3 * BytesPerInstWord); // skip rdpc + inc + st_ptr to point L4 at call st_ptr(L4, L3, 0); 291 #endif 292 call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type); 293 delayed()->nop(); 294 mov(L0, G1); 295 mov(L1, G5_method); 296 mov(L2, G3); 297 mov(L5, G4); 298 restore(O0, 0, G2_thread); 299 } 300 301 static Thread* verify_thread_subroutine(Thread* gthread_value) { 302 Thread* correct_value = Thread::current(); 303 guarantee(gthread_value == correct_value, "G2_thread value must be the thread"); 304 return correct_value; 305 } 306 307 void MacroAssembler::verify_thread() { 308 if (VerifyThread) { 309 // NOTE: this chops off the heads of the 64-bit O registers. 310 // make sure G2_thread contains the right value 311 save_frame_and_mov(0, Lmethod, Lmethod); // to avoid clobbering O0 (and propagate Lmethod) 312 mov(G1, L1); // avoid clobbering G1 313 // G2 saved below 314 mov(G3, L3); // avoid clobbering G3 315 mov(G4, L4); // avoid clobbering G4 316 mov(G5_method, L5); // avoid clobbering G5_method 317 call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type); 318 delayed()->mov(G2_thread, O0); 319 320 mov(L1, G1); // Restore G1 321 // G2 restored below 322 mov(L3, G3); // restore G3 323 mov(L4, G4); // restore G4 324 mov(L5, G5_method); // restore G5_method 325 restore(O0, 0, G2_thread); 326 } 327 } 328 329 330 void MacroAssembler::save_thread(const Register thread_cache) { 331 verify_thread(); 332 if (thread_cache->is_valid()) { 333 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); 334 mov(G2_thread, thread_cache); 335 } 336 if (VerifyThread) { 337 // smash G2_thread, as if the VM were about to anyway 338 set(0x67676767, G2_thread); 339 } 340 } 341 342 343 void MacroAssembler::restore_thread(const Register thread_cache) { 344 if (thread_cache->is_valid()) { 345 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); 346 mov(thread_cache, G2_thread); 347 verify_thread(); 348 } else { 349 // do it the slow way 350 get_thread(); 351 } 352 } 353 354 355 // %%% maybe get rid of [re]set_last_Java_frame 356 void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Java_pc) { 357 assert_not_delayed(); 358 Address flags(G2_thread, JavaThread::frame_anchor_offset() + 359 JavaFrameAnchor::flags_offset()); 360 Address pc_addr(G2_thread, JavaThread::last_Java_pc_offset()); 361 362 // Always set last_Java_pc and flags first because once last_Java_sp is visible 363 // has_last_Java_frame is true and users will look at the rest of the fields. 364 // (Note: flags should always be zero before we get here so doesn't need to be set.) 365 366 #ifdef ASSERT 367 // Verify that flags was zeroed on return to Java 368 Label PcOk; 369 save_frame(0); // to avoid clobbering O0 370 ld_ptr(pc_addr, L0); 371 br_null_short(L0, Assembler::pt, PcOk); 372 STOP("last_Java_pc not zeroed before leaving Java"); 373 bind(PcOk); 374 375 // Verify that flags was zeroed on return to Java 376 Label FlagsOk; 377 ld(flags, L0); 378 tst(L0); 379 br(Assembler::zero, false, Assembler::pt, FlagsOk); 380 delayed() -> restore(); 381 STOP("flags not zeroed before leaving Java"); 382 bind(FlagsOk); 383 #endif /* ASSERT */ 384 // 385 // When returning from calling out from Java mode the frame anchor's last_Java_pc 386 // will always be set to NULL. It is set here so that if we are doing a call to 387 // native (not VM) that we capture the known pc and don't have to rely on the 388 // native call having a standard frame linkage where we can find the pc. 389 390 if (last_Java_pc->is_valid()) { 391 st_ptr(last_Java_pc, pc_addr); 392 } 393 394 #ifdef ASSERT 395 // Make sure that we have an odd stack 396 Label StackOk; 397 andcc(last_java_sp, 0x01, G0); 398 br(Assembler::notZero, false, Assembler::pt, StackOk); 399 delayed()->nop(); 400 STOP("Stack Not Biased in set_last_Java_frame"); 401 bind(StackOk); 402 #endif // ASSERT 403 assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame"); 404 add( last_java_sp, STACK_BIAS, G4_scratch ); 405 st_ptr(G4_scratch, G2_thread, JavaThread::last_Java_sp_offset()); 406 } 407 408 void MacroAssembler::reset_last_Java_frame(void) { 409 assert_not_delayed(); 410 411 Address sp_addr(G2_thread, JavaThread::last_Java_sp_offset()); 412 Address pc_addr(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); 413 Address flags (G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); 414 415 #ifdef ASSERT 416 // check that it WAS previously set 417 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod to helper frame 418 ld_ptr(sp_addr, L0); 419 tst(L0); 420 breakpoint_trap(Assembler::zero, Assembler::ptr_cc); 421 restore(); 422 #endif // ASSERT 423 424 st_ptr(G0, sp_addr); 425 // Always return last_Java_pc to zero 426 st_ptr(G0, pc_addr); 427 // Always null flags after return to Java 428 st(G0, flags); 429 } 430 431 432 void MacroAssembler::call_VM_base( 433 Register oop_result, 434 Register thread_cache, 435 Register last_java_sp, 436 address entry_point, 437 int number_of_arguments, 438 bool check_exceptions) 439 { 440 assert_not_delayed(); 441 442 // determine last_java_sp register 443 if (!last_java_sp->is_valid()) { 444 last_java_sp = SP; 445 } 446 // debugging support 447 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 448 449 // 64-bit last_java_sp is biased! 450 set_last_Java_frame(last_java_sp, noreg); 451 if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early 452 save_thread(thread_cache); 453 // do the call 454 call(entry_point, relocInfo::runtime_call_type); 455 if (!VerifyThread) 456 delayed()->mov(G2_thread, O0); // pass thread as first argument 457 else 458 delayed()->nop(); // (thread already passed) 459 restore_thread(thread_cache); 460 reset_last_Java_frame(); 461 462 // check for pending exceptions. use Gtemp as scratch register. 463 if (check_exceptions) { 464 check_and_forward_exception(Gtemp); 465 } 466 467 #ifdef ASSERT 468 set(badHeapWordVal, G3); 469 set(badHeapWordVal, G4); 470 set(badHeapWordVal, G5); 471 #endif 472 473 // get oop result if there is one and reset the value in the thread 474 if (oop_result->is_valid()) { 475 get_vm_result(oop_result); 476 } 477 } 478 479 void MacroAssembler::check_and_forward_exception(Register scratch_reg) 480 { 481 Label L; 482 483 check_and_handle_popframe(scratch_reg); 484 check_and_handle_earlyret(scratch_reg); 485 486 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 487 ld_ptr(exception_addr, scratch_reg); 488 br_null_short(scratch_reg, pt, L); 489 // we use O7 linkage so that forward_exception_entry has the issuing PC 490 call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 491 delayed()->nop(); 492 bind(L); 493 } 494 495 496 void MacroAssembler::check_and_handle_popframe(Register scratch_reg) { 497 } 498 499 500 void MacroAssembler::check_and_handle_earlyret(Register scratch_reg) { 501 } 502 503 504 void MacroAssembler::call_VM(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 505 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); 506 } 507 508 509 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) { 510 // O0 is reserved for the thread 511 mov(arg_1, O1); 512 call_VM(oop_result, entry_point, 1, check_exceptions); 513 } 514 515 516 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 517 // O0 is reserved for the thread 518 mov(arg_1, O1); 519 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 520 call_VM(oop_result, entry_point, 2, check_exceptions); 521 } 522 523 524 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { 525 // O0 is reserved for the thread 526 mov(arg_1, O1); 527 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 528 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); 529 call_VM(oop_result, entry_point, 3, check_exceptions); 530 } 531 532 533 534 // Note: The following call_VM overloadings are useful when a "save" 535 // has already been performed by a stub, and the last Java frame is 536 // the previous one. In that case, last_java_sp must be passed as FP 537 // instead of SP. 538 539 540 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments, bool check_exceptions) { 541 call_VM_base(oop_result, noreg, last_java_sp, entry_point, number_of_arguments, check_exceptions); 542 } 543 544 545 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) { 546 // O0 is reserved for the thread 547 mov(arg_1, O1); 548 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 549 } 550 551 552 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 553 // O0 is reserved for the thread 554 mov(arg_1, O1); 555 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 556 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 557 } 558 559 560 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { 561 // O0 is reserved for the thread 562 mov(arg_1, O1); 563 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 564 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); 565 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 566 } 567 568 569 570 void MacroAssembler::call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments) { 571 assert_not_delayed(); 572 save_thread(thread_cache); 573 // do the call 574 call(entry_point, relocInfo::runtime_call_type); 575 delayed()->nop(); 576 restore_thread(thread_cache); 577 #ifdef ASSERT 578 set(badHeapWordVal, G3); 579 set(badHeapWordVal, G4); 580 set(badHeapWordVal, G5); 581 #endif 582 } 583 584 585 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments) { 586 call_VM_leaf_base(thread_cache, entry_point, number_of_arguments); 587 } 588 589 590 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1) { 591 mov(arg_1, O0); 592 call_VM_leaf(thread_cache, entry_point, 1); 593 } 594 595 596 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) { 597 mov(arg_1, O0); 598 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); 599 call_VM_leaf(thread_cache, entry_point, 2); 600 } 601 602 603 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3) { 604 mov(arg_1, O0); 605 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); 606 mov(arg_3, O2); assert(arg_3 != O0 && arg_3 != O1, "smashed argument"); 607 call_VM_leaf(thread_cache, entry_point, 3); 608 } 609 610 611 void MacroAssembler::get_vm_result(Register oop_result) { 612 verify_thread(); 613 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 614 ld_ptr( vm_result_addr, oop_result); 615 st_ptr(G0, vm_result_addr); 616 verify_oop(oop_result); 617 } 618 619 620 void MacroAssembler::get_vm_result_2(Register metadata_result) { 621 verify_thread(); 622 Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset()); 623 ld_ptr(vm_result_addr_2, metadata_result); 624 st_ptr(G0, vm_result_addr_2); 625 } 626 627 628 // We require that C code which does not return a value in vm_result will 629 // leave it undisturbed. 630 void MacroAssembler::set_vm_result(Register oop_result) { 631 verify_thread(); 632 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 633 verify_oop(oop_result); 634 635 # ifdef ASSERT 636 // Check that we are not overwriting any other oop. 637 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod 638 ld_ptr(vm_result_addr, L0); 639 tst(L0); 640 restore(); 641 breakpoint_trap(notZero, Assembler::ptr_cc); 642 // } 643 # endif 644 645 st_ptr(oop_result, vm_result_addr); 646 } 647 648 649 void MacroAssembler::ic_call(address entry, bool emit_delay, jint method_index) { 650 RelocationHolder rspec = virtual_call_Relocation::spec(pc(), method_index); 651 patchable_set((intptr_t)Universe::non_oop_word(), G5_inline_cache_reg); 652 relocate(rspec); 653 call(entry, relocInfo::none); 654 if (emit_delay) { 655 delayed()->nop(); 656 } 657 } 658 659 void MacroAssembler::card_table_write(jbyte* byte_map_base, 660 Register tmp, Register obj) { 661 srlx(obj, CardTableModRefBS::card_shift, obj); 662 assert(tmp != obj, "need separate temp reg"); 663 set((address) byte_map_base, tmp); 664 stb(G0, tmp, obj); 665 } 666 667 668 void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { 669 address save_pc; 670 int shiftcnt; 671 #ifdef VALIDATE_PIPELINE 672 assert_no_delay("Cannot put two instructions in delay-slot."); 673 #endif 674 v9_dep(); 675 save_pc = pc(); 676 677 int msb32 = (int) (addrlit.value() >> 32); 678 int lsb32 = (int) (addrlit.value()); 679 680 if (msb32 == 0 && lsb32 >= 0) { 681 Assembler::sethi(lsb32, d, addrlit.rspec()); 682 } 683 else if (msb32 == -1) { 684 Assembler::sethi(~lsb32, d, addrlit.rspec()); 685 xor3(d, ~low10(~0), d); 686 } 687 else { 688 Assembler::sethi(msb32, d, addrlit.rspec()); // msb 22-bits 689 if (msb32 & 0x3ff) // Any bits? 690 or3(d, msb32 & 0x3ff, d); // msb 32-bits are now in lsb 32 691 if (lsb32 & 0xFFFFFC00) { // done? 692 if ((lsb32 >> 20) & 0xfff) { // Any bits set? 693 sllx(d, 12, d); // Make room for next 12 bits 694 or3(d, (lsb32 >> 20) & 0xfff, d); // Or in next 12 695 shiftcnt = 0; // We already shifted 696 } 697 else 698 shiftcnt = 12; 699 if ((lsb32 >> 10) & 0x3ff) { 700 sllx(d, shiftcnt + 10, d); // Make room for last 10 bits 701 or3(d, (lsb32 >> 10) & 0x3ff, d); // Or in next 10 702 shiftcnt = 0; 703 } 704 else 705 shiftcnt = 10; 706 sllx(d, shiftcnt + 10, d); // Shift leaving disp field 0'd 707 } 708 else 709 sllx(d, 32, d); 710 } 711 // Pad out the instruction sequence so it can be patched later. 712 if (ForceRelocatable || (addrlit.rtype() != relocInfo::none && 713 addrlit.rtype() != relocInfo::runtime_call_type)) { 714 while (pc() < (save_pc + (7 * BytesPerInstWord))) 715 nop(); 716 } 717 } 718 719 720 void MacroAssembler::sethi(const AddressLiteral& addrlit, Register d) { 721 internal_sethi(addrlit, d, false); 722 } 723 724 725 void MacroAssembler::patchable_sethi(const AddressLiteral& addrlit, Register d) { 726 internal_sethi(addrlit, d, true); 727 } 728 729 730 int MacroAssembler::insts_for_sethi(address a, bool worst_case) { 731 if (worst_case) return 7; 732 intptr_t iaddr = (intptr_t) a; 733 int msb32 = (int) (iaddr >> 32); 734 int lsb32 = (int) (iaddr); 735 int count; 736 if (msb32 == 0 && lsb32 >= 0) 737 count = 1; 738 else if (msb32 == -1) 739 count = 2; 740 else { 741 count = 2; 742 if (msb32 & 0x3ff) 743 count++; 744 if (lsb32 & 0xFFFFFC00 ) { 745 if ((lsb32 >> 20) & 0xfff) count += 2; 746 if ((lsb32 >> 10) & 0x3ff) count += 2; 747 } 748 } 749 return count; 750 } 751 752 int MacroAssembler::worst_case_insts_for_set() { 753 return insts_for_sethi(NULL, true) + 1; 754 } 755 756 757 // Keep in sync with MacroAssembler::insts_for_internal_set 758 void MacroAssembler::internal_set(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { 759 intptr_t value = addrlit.value(); 760 761 if (!ForceRelocatable && addrlit.rspec().type() == relocInfo::none) { 762 // can optimize 763 if (-4096 <= value && value <= 4095) { 764 or3(G0, value, d); // setsw (this leaves upper 32 bits sign-extended) 765 return; 766 } 767 if (inv_hi22(hi22(value)) == value) { 768 sethi(addrlit, d); 769 return; 770 } 771 } 772 assert_no_delay("Cannot put two instructions in delay-slot."); 773 internal_sethi(addrlit, d, ForceRelocatable); 774 if (ForceRelocatable || addrlit.rspec().type() != relocInfo::none || addrlit.low10() != 0) { 775 add(d, addrlit.low10(), d, addrlit.rspec()); 776 } 777 } 778 779 // Keep in sync with MacroAssembler::internal_set 780 int MacroAssembler::insts_for_internal_set(intptr_t value) { 781 // can optimize 782 if (-4096 <= value && value <= 4095) { 783 return 1; 784 } 785 if (inv_hi22(hi22(value)) == value) { 786 return insts_for_sethi((address) value); 787 } 788 int count = insts_for_sethi((address) value); 789 AddressLiteral al(value); 790 if (al.low10() != 0) { 791 count++; 792 } 793 return count; 794 } 795 796 void MacroAssembler::set(const AddressLiteral& al, Register d) { 797 internal_set(al, d, false); 798 } 799 800 void MacroAssembler::set(intptr_t value, Register d) { 801 AddressLiteral al(value); 802 internal_set(al, d, false); 803 } 804 805 void MacroAssembler::set(address addr, Register d, RelocationHolder const& rspec) { 806 AddressLiteral al(addr, rspec); 807 internal_set(al, d, false); 808 } 809 810 void MacroAssembler::patchable_set(const AddressLiteral& al, Register d) { 811 internal_set(al, d, true); 812 } 813 814 void MacroAssembler::patchable_set(intptr_t value, Register d) { 815 AddressLiteral al(value); 816 internal_set(al, d, true); 817 } 818 819 820 void MacroAssembler::set64(jlong value, Register d, Register tmp) { 821 assert_not_delayed(); 822 v9_dep(); 823 824 int hi = (int)(value >> 32); 825 int lo = (int)(value & ~0); 826 int bits_33to2 = (int)((value >> 2) & ~0); 827 // (Matcher::isSimpleConstant64 knows about the following optimizations.) 828 if (Assembler::is_simm13(lo) && value == lo) { 829 or3(G0, lo, d); 830 } else if (hi == 0) { 831 Assembler::sethi(lo, d); // hardware version zero-extends to upper 32 832 if (low10(lo) != 0) 833 or3(d, low10(lo), d); 834 } 835 else if ((hi >> 2) == 0) { 836 Assembler::sethi(bits_33to2, d); // hardware version zero-extends to upper 32 837 sllx(d, 2, d); 838 if (low12(lo) != 0) 839 or3(d, low12(lo), d); 840 } 841 else if (hi == -1) { 842 Assembler::sethi(~lo, d); // hardware version zero-extends to upper 32 843 xor3(d, low10(lo) ^ ~low10(~0), d); 844 } 845 else if (lo == 0) { 846 if (Assembler::is_simm13(hi)) { 847 or3(G0, hi, d); 848 } else { 849 Assembler::sethi(hi, d); // hardware version zero-extends to upper 32 850 if (low10(hi) != 0) 851 or3(d, low10(hi), d); 852 } 853 sllx(d, 32, d); 854 } 855 else { 856 Assembler::sethi(hi, tmp); 857 Assembler::sethi(lo, d); // macro assembler version sign-extends 858 if (low10(hi) != 0) 859 or3 (tmp, low10(hi), tmp); 860 if (low10(lo) != 0) 861 or3 ( d, low10(lo), d); 862 sllx(tmp, 32, tmp); 863 or3 (d, tmp, d); 864 } 865 } 866 867 int MacroAssembler::insts_for_set64(jlong value) { 868 v9_dep(); 869 870 int hi = (int) (value >> 32); 871 int lo = (int) (value & ~0); 872 int count = 0; 873 874 // (Matcher::isSimpleConstant64 knows about the following optimizations.) 875 if (Assembler::is_simm13(lo) && value == lo) { 876 count++; 877 } else if (hi == 0) { 878 count++; 879 if (low10(lo) != 0) 880 count++; 881 } 882 else if (hi == -1) { 883 count += 2; 884 } 885 else if (lo == 0) { 886 if (Assembler::is_simm13(hi)) { 887 count++; 888 } else { 889 count++; 890 if (low10(hi) != 0) 891 count++; 892 } 893 count++; 894 } 895 else { 896 count += 2; 897 if (low10(hi) != 0) 898 count++; 899 if (low10(lo) != 0) 900 count++; 901 count += 2; 902 } 903 return count; 904 } 905 906 // compute size in bytes of sparc frame, given 907 // number of extraWords 908 int MacroAssembler::total_frame_size_in_bytes(int extraWords) { 909 910 int nWords = frame::memory_parameter_word_sp_offset; 911 912 nWords += extraWords; 913 914 if (nWords & 1) ++nWords; // round up to double-word 915 916 return nWords * BytesPerWord; 917 } 918 919 920 // save_frame: given number of "extra" words in frame, 921 // issue approp. save instruction (p 200, v8 manual) 922 923 void MacroAssembler::save_frame(int extraWords) { 924 int delta = -total_frame_size_in_bytes(extraWords); 925 if (is_simm13(delta)) { 926 save(SP, delta, SP); 927 } else { 928 set(delta, G3_scratch); 929 save(SP, G3_scratch, SP); 930 } 931 } 932 933 934 void MacroAssembler::save_frame_c1(int size_in_bytes) { 935 if (is_simm13(-size_in_bytes)) { 936 save(SP, -size_in_bytes, SP); 937 } else { 938 set(-size_in_bytes, G3_scratch); 939 save(SP, G3_scratch, SP); 940 } 941 } 942 943 944 void MacroAssembler::save_frame_and_mov(int extraWords, 945 Register s1, Register d1, 946 Register s2, Register d2) { 947 assert_not_delayed(); 948 949 // The trick here is to use precisely the same memory word 950 // that trap handlers also use to save the register. 951 // This word cannot be used for any other purpose, but 952 // it works fine to save the register's value, whether or not 953 // an interrupt flushes register windows at any given moment! 954 Address s1_addr; 955 if (s1->is_valid() && (s1->is_in() || s1->is_local())) { 956 s1_addr = s1->address_in_saved_window(); 957 st_ptr(s1, s1_addr); 958 } 959 960 Address s2_addr; 961 if (s2->is_valid() && (s2->is_in() || s2->is_local())) { 962 s2_addr = s2->address_in_saved_window(); 963 st_ptr(s2, s2_addr); 964 } 965 966 save_frame(extraWords); 967 968 if (s1_addr.base() == SP) { 969 ld_ptr(s1_addr.after_save(), d1); 970 } else if (s1->is_valid()) { 971 mov(s1->after_save(), d1); 972 } 973 974 if (s2_addr.base() == SP) { 975 ld_ptr(s2_addr.after_save(), d2); 976 } else if (s2->is_valid()) { 977 mov(s2->after_save(), d2); 978 } 979 } 980 981 982 AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) { 983 assert(oop_recorder() != NULL, "this assembler needs a Recorder"); 984 int index = oop_recorder()->allocate_metadata_index(obj); 985 RelocationHolder rspec = metadata_Relocation::spec(index); 986 return AddressLiteral((address)obj, rspec); 987 } 988 989 AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) { 990 assert(oop_recorder() != NULL, "this assembler needs a Recorder"); 991 int index = oop_recorder()->find_index(obj); 992 RelocationHolder rspec = metadata_Relocation::spec(index); 993 return AddressLiteral((address)obj, rspec); 994 } 995 996 997 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) { 998 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 999 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop"); 1000 int oop_index = oop_recorder()->find_index(obj); 1001 return AddressLiteral(obj, oop_Relocation::spec(oop_index)); 1002 } 1003 1004 void MacroAssembler::set_narrow_oop(jobject obj, Register d) { 1005 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1006 int oop_index = oop_recorder()->find_index(obj); 1007 RelocationHolder rspec = oop_Relocation::spec(oop_index); 1008 1009 assert_not_delayed(); 1010 // Relocation with special format (see relocInfo_sparc.hpp). 1011 relocate(rspec, 1); 1012 // Assembler::sethi(0x3fffff, d); 1013 emit_int32( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(0x3fffff) ); 1014 // Don't add relocation for 'add'. Do patching during 'sethi' processing. 1015 add(d, 0x3ff, d); 1016 1017 } 1018 1019 void MacroAssembler::set_narrow_klass(Klass* k, Register d) { 1020 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1021 int klass_index = oop_recorder()->find_index(k); 1022 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 1023 narrowOop encoded_k = Klass::encode_klass(k); 1024 1025 assert_not_delayed(); 1026 // Relocation with special format (see relocInfo_sparc.hpp). 1027 relocate(rspec, 1); 1028 // Assembler::sethi(encoded_k, d); 1029 emit_int32( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(encoded_k) ); 1030 // Don't add relocation for 'add'. Do patching during 'sethi' processing. 1031 add(d, low10(encoded_k), d); 1032 1033 } 1034 1035 void MacroAssembler::align(int modulus) { 1036 while (offset() % modulus != 0) nop(); 1037 } 1038 1039 void RegistersForDebugging::print(outputStream* s) { 1040 FlagSetting fs(Debugging, true); 1041 int j; 1042 for (j = 0; j < 8; ++j) { 1043 if (j != 6) { s->print("i%d = ", j); os::print_location(s, i[j]); } 1044 else { s->print( "fp = " ); os::print_location(s, i[j]); } 1045 } 1046 s->cr(); 1047 1048 for (j = 0; j < 8; ++j) { 1049 s->print("l%d = ", j); os::print_location(s, l[j]); 1050 } 1051 s->cr(); 1052 1053 for (j = 0; j < 8; ++j) { 1054 if (j != 6) { s->print("o%d = ", j); os::print_location(s, o[j]); } 1055 else { s->print( "sp = " ); os::print_location(s, o[j]); } 1056 } 1057 s->cr(); 1058 1059 for (j = 0; j < 8; ++j) { 1060 s->print("g%d = ", j); os::print_location(s, g[j]); 1061 } 1062 s->cr(); 1063 1064 // print out floats with compression 1065 for (j = 0; j < 32; ) { 1066 jfloat val = f[j]; 1067 int last = j; 1068 for ( ; last+1 < 32; ++last ) { 1069 char b1[1024], b2[1024]; 1070 sprintf(b1, "%f", val); 1071 sprintf(b2, "%f", f[last+1]); 1072 if (strcmp(b1, b2)) 1073 break; 1074 } 1075 s->print("f%d", j); 1076 if ( j != last ) s->print(" - f%d", last); 1077 s->print(" = %f", val); 1078 s->fill_to(25); 1079 s->print_cr(" (0x%x)", *(int*)&val); 1080 j = last + 1; 1081 } 1082 s->cr(); 1083 1084 // and doubles (evens only) 1085 for (j = 0; j < 32; ) { 1086 jdouble val = d[j]; 1087 int last = j; 1088 for ( ; last+1 < 32; ++last ) { 1089 char b1[1024], b2[1024]; 1090 sprintf(b1, "%f", val); 1091 sprintf(b2, "%f", d[last+1]); 1092 if (strcmp(b1, b2)) 1093 break; 1094 } 1095 s->print("d%d", 2 * j); 1096 if ( j != last ) s->print(" - d%d", last); 1097 s->print(" = %f", val); 1098 s->fill_to(30); 1099 s->print("(0x%x)", *(int*)&val); 1100 s->fill_to(42); 1101 s->print_cr("(0x%x)", *(1 + (int*)&val)); 1102 j = last + 1; 1103 } 1104 s->cr(); 1105 } 1106 1107 void RegistersForDebugging::save_registers(MacroAssembler* a) { 1108 a->sub(FP, align_up(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0); 1109 a->flushw(); 1110 int i; 1111 for (i = 0; i < 8; ++i) { 1112 a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, i_offset(i)); 1113 a->ld_ptr(as_lRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, l_offset(i)); 1114 a->st_ptr(as_oRegister(i)->after_save(), O0, o_offset(i)); 1115 a->st_ptr(as_gRegister(i)->after_save(), O0, g_offset(i)); 1116 } 1117 for (i = 0; i < 32; ++i) { 1118 a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i)); 1119 } 1120 for (i = 0; i < 64; i += 2) { 1121 a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i)); 1122 } 1123 } 1124 1125 void RegistersForDebugging::restore_registers(MacroAssembler* a, Register r) { 1126 for (int i = 1; i < 8; ++i) { 1127 a->ld_ptr(r, g_offset(i), as_gRegister(i)); 1128 } 1129 for (int j = 0; j < 32; ++j) { 1130 a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j)); 1131 } 1132 for (int k = 0; k < 64; k += 2) { 1133 a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k)); 1134 } 1135 } 1136 1137 1138 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack 1139 void MacroAssembler::push_fTOS() { 1140 // %%%%%% need to implement this 1141 } 1142 1143 // pops double TOS element from CPU stack and pushes on FPU stack 1144 void MacroAssembler::pop_fTOS() { 1145 // %%%%%% need to implement this 1146 } 1147 1148 void MacroAssembler::empty_FPU_stack() { 1149 // %%%%%% need to implement this 1150 } 1151 1152 void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * file, int line) { 1153 // plausibility check for oops 1154 if (!VerifyOops) return; 1155 1156 if (reg == G0) return; // always NULL, which is always an oop 1157 1158 BLOCK_COMMENT("verify_oop {"); 1159 char buffer[64]; 1160 #ifdef COMPILER1 1161 if (CommentedAssembly) { 1162 snprintf(buffer, sizeof(buffer), "verify_oop at %d", offset()); 1163 block_comment(buffer); 1164 } 1165 #endif 1166 1167 const char* real_msg = NULL; 1168 { 1169 ResourceMark rm; 1170 stringStream ss; 1171 ss.print("%s at offset %d (%s:%d)", msg, offset(), file, line); 1172 real_msg = code_string(ss.as_string()); 1173 } 1174 1175 // Call indirectly to solve generation ordering problem 1176 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address()); 1177 1178 // Make some space on stack above the current register window. 1179 // Enough to hold 8 64-bit registers. 1180 add(SP,-8*8,SP); 1181 1182 // Save some 64-bit registers; a normal 'save' chops the heads off 1183 // of 64-bit longs in the 32-bit build. 1184 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); 1185 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); 1186 mov(reg,O0); // Move arg into O0; arg might be in O7 which is about to be crushed 1187 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); 1188 1189 // Size of set() should stay the same 1190 patchable_set((intptr_t)real_msg, O1); 1191 // Load address to call to into O7 1192 load_ptr_contents(a, O7); 1193 // Register call to verify_oop_subroutine 1194 callr(O7, G0); 1195 delayed()->nop(); 1196 // recover frame size 1197 add(SP, 8*8,SP); 1198 BLOCK_COMMENT("} verify_oop"); 1199 } 1200 1201 void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char * file, int line) { 1202 // plausibility check for oops 1203 if (!VerifyOops) return; 1204 1205 const char* real_msg = NULL; 1206 { 1207 ResourceMark rm; 1208 stringStream ss; 1209 ss.print("%s at SP+%d (%s:%d)", msg, addr.disp(), file, line); 1210 real_msg = code_string(ss.as_string()); 1211 } 1212 1213 // Call indirectly to solve generation ordering problem 1214 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address()); 1215 1216 // Make some space on stack above the current register window. 1217 // Enough to hold 8 64-bit registers. 1218 add(SP,-8*8,SP); 1219 1220 // Save some 64-bit registers; a normal 'save' chops the heads off 1221 // of 64-bit longs in the 32-bit build. 1222 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); 1223 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); 1224 ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed 1225 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); 1226 1227 // Size of set() should stay the same 1228 patchable_set((intptr_t)real_msg, O1); 1229 // Load address to call to into O7 1230 load_ptr_contents(a, O7); 1231 // Register call to verify_oop_subroutine 1232 callr(O7, G0); 1233 delayed()->nop(); 1234 // recover frame size 1235 add(SP, 8*8,SP); 1236 } 1237 1238 // side-door communication with signalHandler in os_solaris.cpp 1239 address MacroAssembler::_verify_oop_implicit_branch[3] = { NULL }; 1240 1241 // This macro is expanded just once; it creates shared code. Contract: 1242 // receives an oop in O0. Must restore O0 & O7 from TLS. Must not smash ANY 1243 // registers, including flags. May not use a register 'save', as this blows 1244 // the high bits of the O-regs if they contain Long values. Acts as a 'leaf' 1245 // call. 1246 void MacroAssembler::verify_oop_subroutine() { 1247 // Leaf call; no frame. 1248 Label succeed, fail, null_or_fail; 1249 1250 // O0 and O7 were saved already (O0 in O0's TLS home, O7 in O5's TLS home). 1251 // O0 is now the oop to be checked. O7 is the return address. 1252 Register O0_obj = O0; 1253 1254 // Save some more registers for temps. 1255 stx(O2,SP,frame::register_save_words*wordSize+STACK_BIAS+2*8); 1256 stx(O3,SP,frame::register_save_words*wordSize+STACK_BIAS+3*8); 1257 stx(O4,SP,frame::register_save_words*wordSize+STACK_BIAS+4*8); 1258 stx(O5,SP,frame::register_save_words*wordSize+STACK_BIAS+5*8); 1259 1260 // Save flags 1261 Register O5_save_flags = O5; 1262 rdccr( O5_save_flags ); 1263 1264 { // count number of verifies 1265 Register O2_adr = O2; 1266 Register O3_accum = O3; 1267 inc_counter(StubRoutines::verify_oop_count_addr(), O2_adr, O3_accum); 1268 } 1269 1270 Register O2_mask = O2; 1271 Register O3_bits = O3; 1272 Register O4_temp = O4; 1273 1274 // mark lower end of faulting range 1275 assert(_verify_oop_implicit_branch[0] == NULL, "set once"); 1276 _verify_oop_implicit_branch[0] = pc(); 1277 1278 // We can't check the mark oop because it could be in the process of 1279 // locking or unlocking while this is running. 1280 set(Universe::verify_oop_mask (), O2_mask); 1281 set(Universe::verify_oop_bits (), O3_bits); 1282 1283 // assert((obj & oop_mask) == oop_bits); 1284 and3(O0_obj, O2_mask, O4_temp); 1285 cmp_and_brx_short(O4_temp, O3_bits, notEqual, pn, null_or_fail); 1286 1287 if ((NULL_WORD & Universe::verify_oop_mask()) == Universe::verify_oop_bits()) { 1288 // the null_or_fail case is useless; must test for null separately 1289 br_null_short(O0_obj, pn, succeed); 1290 } 1291 1292 // Check the Klass* of this object for being in the right area of memory. 1293 // Cannot do the load in the delay above slot in case O0 is null 1294 load_klass(O0_obj, O0_obj); 1295 // assert((klass != NULL) 1296 br_null_short(O0_obj, pn, fail); 1297 1298 wrccr( O5_save_flags ); // Restore CCR's 1299 1300 // mark upper end of faulting range 1301 _verify_oop_implicit_branch[1] = pc(); 1302 1303 //----------------------- 1304 // all tests pass 1305 bind(succeed); 1306 1307 // Restore prior 64-bit registers 1308 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+0*8,O0); 1309 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+1*8,O1); 1310 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+2*8,O2); 1311 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+3*8,O3); 1312 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+4*8,O4); 1313 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+5*8,O5); 1314 1315 retl(); // Leaf return; restore prior O7 in delay slot 1316 delayed()->ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+7*8,O7); 1317 1318 //----------------------- 1319 bind(null_or_fail); // nulls are less common but OK 1320 br_null(O0_obj, false, pt, succeed); 1321 delayed()->wrccr( O5_save_flags ); // Restore CCR's 1322 1323 //----------------------- 1324 // report failure: 1325 bind(fail); 1326 _verify_oop_implicit_branch[2] = pc(); 1327 1328 wrccr( O5_save_flags ); // Restore CCR's 1329 1330 save_frame(align_up(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1331 1332 // stop_subroutine expects message pointer in I1. 1333 mov(I1, O1); 1334 1335 // Restore prior 64-bit registers 1336 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+0*8,I0); 1337 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+1*8,I1); 1338 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+2*8,I2); 1339 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+3*8,I3); 1340 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+4*8,I4); 1341 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+5*8,I5); 1342 1343 // factor long stop-sequence into subroutine to save space 1344 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); 1345 1346 // call indirectly to solve generation ordering problem 1347 AddressLiteral al(StubRoutines::Sparc::stop_subroutine_entry_address()); 1348 load_ptr_contents(al, O5); 1349 jmpl(O5, 0, O7); 1350 delayed()->nop(); 1351 } 1352 1353 1354 void MacroAssembler::stop(const char* msg) { 1355 // save frame first to get O7 for return address 1356 // add one word to size in case struct is odd number of words long 1357 // It must be doubleword-aligned for storing doubles into it. 1358 1359 save_frame(align_up(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1360 1361 // stop_subroutine expects message pointer in I1. 1362 // Size of set() should stay the same 1363 patchable_set((intptr_t)msg, O1); 1364 1365 // factor long stop-sequence into subroutine to save space 1366 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); 1367 1368 // call indirectly to solve generation ordering problem 1369 AddressLiteral a(StubRoutines::Sparc::stop_subroutine_entry_address()); 1370 load_ptr_contents(a, O5); 1371 jmpl(O5, 0, O7); 1372 delayed()->nop(); 1373 1374 breakpoint_trap(); // make stop actually stop rather than writing 1375 // unnoticeable results in the output files. 1376 1377 // restore(); done in callee to save space! 1378 } 1379 1380 1381 void MacroAssembler::warn(const char* msg) { 1382 save_frame(align_up(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1383 RegistersForDebugging::save_registers(this); 1384 mov(O0, L0); 1385 // Size of set() should stay the same 1386 patchable_set((intptr_t)msg, O0); 1387 call( CAST_FROM_FN_PTR(address, warning) ); 1388 delayed()->nop(); 1389 // ret(); 1390 // delayed()->restore(); 1391 RegistersForDebugging::restore_registers(this, L0); 1392 restore(); 1393 } 1394 1395 1396 void MacroAssembler::untested(const char* what) { 1397 // We must be able to turn interactive prompting off 1398 // in order to run automated test scripts on the VM 1399 // Use the flag ShowMessageBoxOnError 1400 1401 const char* b = NULL; 1402 { 1403 ResourceMark rm; 1404 stringStream ss; 1405 ss.print("untested: %s", what); 1406 b = code_string(ss.as_string()); 1407 } 1408 if (ShowMessageBoxOnError) { STOP(b); } 1409 else { warn(b); } 1410 } 1411 1412 1413 void MacroAssembler::unimplemented(const char* what) { 1414 char* b = new char[1024]; 1415 jio_snprintf(b, 1024, "unimplemented: %s", what); 1416 stop(b); 1417 } 1418 1419 1420 void MacroAssembler::stop_subroutine() { 1421 RegistersForDebugging::save_registers(this); 1422 1423 // for the sake of the debugger, stick a PC on the current frame 1424 // (this assumes that the caller has performed an extra "save") 1425 mov(I7, L7); 1426 add(O7, -7 * BytesPerInt, I7); 1427 1428 save_frame(); // one more save to free up another O7 register 1429 mov(I0, O1); // addr of reg save area 1430 1431 // We expect pointer to message in I1. Caller must set it up in O1 1432 mov(I1, O0); // get msg 1433 call (CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type); 1434 delayed()->nop(); 1435 1436 restore(); 1437 1438 RegistersForDebugging::restore_registers(this, O0); 1439 1440 save_frame(0); 1441 call(CAST_FROM_FN_PTR(address,breakpoint)); 1442 delayed()->nop(); 1443 restore(); 1444 1445 mov(L7, I7); 1446 retl(); 1447 delayed()->restore(); // see stop above 1448 } 1449 1450 1451 void MacroAssembler::debug(char* msg, RegistersForDebugging* regs) { 1452 if ( ShowMessageBoxOnError ) { 1453 JavaThread* thread = JavaThread::current(); 1454 JavaThreadState saved_state = thread->thread_state(); 1455 thread->set_thread_state(_thread_in_vm); 1456 { 1457 // In order to get locks work, we need to fake a in_VM state 1458 ttyLocker ttyl; 1459 ::tty->print_cr("EXECUTION STOPPED: %s\n", msg); 1460 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 1461 BytecodeCounter::print(); 1462 } 1463 if (os::message_box(msg, "Execution stopped, print registers?")) 1464 regs->print(::tty); 1465 } 1466 BREAKPOINT; 1467 ThreadStateTransition::transition(JavaThread::current(), _thread_in_vm, saved_state); 1468 } 1469 else { 1470 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg); 1471 } 1472 assert(false, "DEBUG MESSAGE: %s", msg); 1473 } 1474 1475 1476 void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresult) { 1477 subcc( Rparam_words, Argument::n_register_parameters, Rresult); // how many mem words? 1478 Label no_extras; 1479 br( negative, true, pt, no_extras ); // if neg, clear reg 1480 delayed()->set(0, Rresult); // annuled, so only if taken 1481 bind( no_extras ); 1482 } 1483 1484 1485 void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) { 1486 add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult); 1487 bclr(1, Rresult); 1488 sll(Rresult, LogBytesPerWord, Rresult); // Rresult has total frame bytes 1489 } 1490 1491 1492 void MacroAssembler::calc_frame_size_and_save(Register Rextra_words, Register Rresult) { 1493 calc_frame_size(Rextra_words, Rresult); 1494 neg(Rresult); 1495 save(SP, Rresult, SP); 1496 } 1497 1498 1499 // --------------------------------------------------------- 1500 Assembler::RCondition cond2rcond(Assembler::Condition c) { 1501 switch (c) { 1502 /*case zero: */ 1503 case Assembler::equal: return Assembler::rc_z; 1504 case Assembler::lessEqual: return Assembler::rc_lez; 1505 case Assembler::less: return Assembler::rc_lz; 1506 /*case notZero:*/ 1507 case Assembler::notEqual: return Assembler::rc_nz; 1508 case Assembler::greater: return Assembler::rc_gz; 1509 case Assembler::greaterEqual: return Assembler::rc_gez; 1510 } 1511 ShouldNotReachHere(); 1512 return Assembler::rc_z; 1513 } 1514 1515 // compares (32 bit) register with zero and branches. NOT FOR USE WITH 64-bit POINTERS 1516 void MacroAssembler::cmp_zero_and_br(Condition c, Register s1, Label& L, bool a, Predict p) { 1517 tst(s1); 1518 br (c, a, p, L); 1519 } 1520 1521 // Compares a pointer register with zero and branches on null. 1522 // Does a test & branch on 32-bit systems and a register-branch on 64-bit. 1523 void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) { 1524 assert_not_delayed(); 1525 bpr( rc_z, a, p, s1, L ); 1526 } 1527 1528 void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) { 1529 assert_not_delayed(); 1530 bpr( rc_nz, a, p, s1, L ); 1531 } 1532 1533 // Compare registers and branch with nop in delay slot or cbcond without delay slot. 1534 1535 // Compare integer (32 bit) values (icc only). 1536 void MacroAssembler::cmp_and_br_short(Register s1, Register s2, Condition c, 1537 Predict p, Label& L) { 1538 assert_not_delayed(); 1539 if (use_cbcond(L)) { 1540 Assembler::cbcond(c, icc, s1, s2, L); 1541 } else { 1542 cmp(s1, s2); 1543 br(c, false, p, L); 1544 delayed()->nop(); 1545 } 1546 } 1547 1548 // Compare integer (32 bit) values (icc only). 1549 void MacroAssembler::cmp_and_br_short(Register s1, int simm13a, Condition c, 1550 Predict p, Label& L) { 1551 assert_not_delayed(); 1552 if (is_simm(simm13a,5) && use_cbcond(L)) { 1553 Assembler::cbcond(c, icc, s1, simm13a, L); 1554 } else { 1555 cmp(s1, simm13a); 1556 br(c, false, p, L); 1557 delayed()->nop(); 1558 } 1559 } 1560 1561 // Branch that tests xcc in LP64 and icc in !LP64 1562 void MacroAssembler::cmp_and_brx_short(Register s1, Register s2, Condition c, 1563 Predict p, Label& L) { 1564 assert_not_delayed(); 1565 if (use_cbcond(L)) { 1566 Assembler::cbcond(c, ptr_cc, s1, s2, L); 1567 } else { 1568 cmp(s1, s2); 1569 brx(c, false, p, L); 1570 delayed()->nop(); 1571 } 1572 } 1573 1574 // Branch that tests xcc in LP64 and icc in !LP64 1575 void MacroAssembler::cmp_and_brx_short(Register s1, int simm13a, Condition c, 1576 Predict p, Label& L) { 1577 assert_not_delayed(); 1578 if (is_simm(simm13a,5) && use_cbcond(L)) { 1579 Assembler::cbcond(c, ptr_cc, s1, simm13a, L); 1580 } else { 1581 cmp(s1, simm13a); 1582 brx(c, false, p, L); 1583 delayed()->nop(); 1584 } 1585 } 1586 1587 // Short branch version for compares a pointer with zero. 1588 1589 void MacroAssembler::br_null_short(Register s1, Predict p, Label& L) { 1590 assert_not_delayed(); 1591 if (use_cbcond(L)) { 1592 Assembler::cbcond(zero, ptr_cc, s1, 0, L); 1593 } else { 1594 br_null(s1, false, p, L); 1595 delayed()->nop(); 1596 } 1597 } 1598 1599 void MacroAssembler::br_notnull_short(Register s1, Predict p, Label& L) { 1600 assert_not_delayed(); 1601 if (use_cbcond(L)) { 1602 Assembler::cbcond(notZero, ptr_cc, s1, 0, L); 1603 } else { 1604 br_notnull(s1, false, p, L); 1605 delayed()->nop(); 1606 } 1607 } 1608 1609 // Unconditional short branch 1610 void MacroAssembler::ba_short(Label& L) { 1611 assert_not_delayed(); 1612 if (use_cbcond(L)) { 1613 Assembler::cbcond(equal, icc, G0, G0, L); 1614 } else { 1615 br(always, false, pt, L); 1616 delayed()->nop(); 1617 } 1618 } 1619 1620 // Branch if 'icc' says zero or not (i.e. icc.z == 1|0). 1621 1622 void MacroAssembler::br_icc_zero(bool iszero, Predict p, Label &L) { 1623 assert_not_delayed(); 1624 Condition cf = (iszero ? Assembler::zero : Assembler::notZero); 1625 br(cf, false, p, L); 1626 delayed()->nop(); 1627 } 1628 1629 // instruction sequences factored across compiler & interpreter 1630 1631 1632 void MacroAssembler::lcmp( Register Ra_hi, Register Ra_low, 1633 Register Rb_hi, Register Rb_low, 1634 Register Rresult) { 1635 1636 Label check_low_parts, done; 1637 1638 cmp(Ra_hi, Rb_hi ); // compare hi parts 1639 br(equal, true, pt, check_low_parts); 1640 delayed()->cmp(Ra_low, Rb_low); // test low parts 1641 1642 // And, with an unsigned comparison, it does not matter if the numbers 1643 // are negative or not. 1644 // E.g., -2 cmp -1: the low parts are 0xfffffffe and 0xffffffff. 1645 // The second one is bigger (unsignedly). 1646 1647 // Other notes: The first move in each triplet can be unconditional 1648 // (and therefore probably prefetchable). 1649 // And the equals case for the high part does not need testing, 1650 // since that triplet is reached only after finding the high halves differ. 1651 1652 mov(-1, Rresult); 1653 ba(done); 1654 delayed()->movcc(greater, false, icc, 1, Rresult); 1655 1656 bind(check_low_parts); 1657 1658 mov( -1, Rresult); 1659 movcc(equal, false, icc, 0, Rresult); 1660 movcc(greaterUnsigned, false, icc, 1, Rresult); 1661 1662 bind(done); 1663 } 1664 1665 void MacroAssembler::lneg( Register Rhi, Register Rlow ) { 1666 subcc( G0, Rlow, Rlow ); 1667 subc( G0, Rhi, Rhi ); 1668 } 1669 1670 void MacroAssembler::lshl( Register Rin_high, Register Rin_low, 1671 Register Rcount, 1672 Register Rout_high, Register Rout_low, 1673 Register Rtemp ) { 1674 1675 1676 Register Ralt_count = Rtemp; 1677 Register Rxfer_bits = Rtemp; 1678 1679 assert( Ralt_count != Rin_high 1680 && Ralt_count != Rin_low 1681 && Ralt_count != Rcount 1682 && Rxfer_bits != Rin_low 1683 && Rxfer_bits != Rin_high 1684 && Rxfer_bits != Rcount 1685 && Rxfer_bits != Rout_low 1686 && Rout_low != Rin_high, 1687 "register alias checks"); 1688 1689 Label big_shift, done; 1690 1691 // This code can be optimized to use the 64 bit shifts in V9. 1692 // Here we use the 32 bit shifts. 1693 1694 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1695 subcc(Rcount, 31, Ralt_count); 1696 br(greater, true, pn, big_shift); 1697 delayed()->dec(Ralt_count); 1698 1699 // shift < 32 bits, Ralt_count = Rcount-31 1700 1701 // We get the transfer bits by shifting right by 32-count the low 1702 // register. This is done by shifting right by 31-count and then by one 1703 // more to take care of the special (rare) case where count is zero 1704 // (shifting by 32 would not work). 1705 1706 neg(Ralt_count); 1707 1708 // The order of the next two instructions is critical in the case where 1709 // Rin and Rout are the same and should not be reversed. 1710 1711 srl(Rin_low, Ralt_count, Rxfer_bits); // shift right by 31-count 1712 if (Rcount != Rout_low) { 1713 sll(Rin_low, Rcount, Rout_low); // low half 1714 } 1715 sll(Rin_high, Rcount, Rout_high); 1716 if (Rcount == Rout_low) { 1717 sll(Rin_low, Rcount, Rout_low); // low half 1718 } 1719 srl(Rxfer_bits, 1, Rxfer_bits ); // shift right by one more 1720 ba(done); 1721 delayed()->or3(Rout_high, Rxfer_bits, Rout_high); // new hi value: or in shifted old hi part and xfer from low 1722 1723 // shift >= 32 bits, Ralt_count = Rcount-32 1724 bind(big_shift); 1725 sll(Rin_low, Ralt_count, Rout_high ); 1726 clr(Rout_low); 1727 1728 bind(done); 1729 } 1730 1731 1732 void MacroAssembler::lshr( Register Rin_high, Register Rin_low, 1733 Register Rcount, 1734 Register Rout_high, Register Rout_low, 1735 Register Rtemp ) { 1736 1737 Register Ralt_count = Rtemp; 1738 Register Rxfer_bits = Rtemp; 1739 1740 assert( Ralt_count != Rin_high 1741 && Ralt_count != Rin_low 1742 && Ralt_count != Rcount 1743 && Rxfer_bits != Rin_low 1744 && Rxfer_bits != Rin_high 1745 && Rxfer_bits != Rcount 1746 && Rxfer_bits != Rout_high 1747 && Rout_high != Rin_low, 1748 "register alias checks"); 1749 1750 Label big_shift, done; 1751 1752 // This code can be optimized to use the 64 bit shifts in V9. 1753 // Here we use the 32 bit shifts. 1754 1755 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1756 subcc(Rcount, 31, Ralt_count); 1757 br(greater, true, pn, big_shift); 1758 delayed()->dec(Ralt_count); 1759 1760 // shift < 32 bits, Ralt_count = Rcount-31 1761 1762 // We get the transfer bits by shifting left by 32-count the high 1763 // register. This is done by shifting left by 31-count and then by one 1764 // more to take care of the special (rare) case where count is zero 1765 // (shifting by 32 would not work). 1766 1767 neg(Ralt_count); 1768 if (Rcount != Rout_low) { 1769 srl(Rin_low, Rcount, Rout_low); 1770 } 1771 1772 // The order of the next two instructions is critical in the case where 1773 // Rin and Rout are the same and should not be reversed. 1774 1775 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count 1776 sra(Rin_high, Rcount, Rout_high ); // high half 1777 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more 1778 if (Rcount == Rout_low) { 1779 srl(Rin_low, Rcount, Rout_low); 1780 } 1781 ba(done); 1782 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high 1783 1784 // shift >= 32 bits, Ralt_count = Rcount-32 1785 bind(big_shift); 1786 1787 sra(Rin_high, Ralt_count, Rout_low); 1788 sra(Rin_high, 31, Rout_high); // sign into hi 1789 1790 bind( done ); 1791 } 1792 1793 1794 1795 void MacroAssembler::lushr( Register Rin_high, Register Rin_low, 1796 Register Rcount, 1797 Register Rout_high, Register Rout_low, 1798 Register Rtemp ) { 1799 1800 Register Ralt_count = Rtemp; 1801 Register Rxfer_bits = Rtemp; 1802 1803 assert( Ralt_count != Rin_high 1804 && Ralt_count != Rin_low 1805 && Ralt_count != Rcount 1806 && Rxfer_bits != Rin_low 1807 && Rxfer_bits != Rin_high 1808 && Rxfer_bits != Rcount 1809 && Rxfer_bits != Rout_high 1810 && Rout_high != Rin_low, 1811 "register alias checks"); 1812 1813 Label big_shift, done; 1814 1815 // This code can be optimized to use the 64 bit shifts in V9. 1816 // Here we use the 32 bit shifts. 1817 1818 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1819 subcc(Rcount, 31, Ralt_count); 1820 br(greater, true, pn, big_shift); 1821 delayed()->dec(Ralt_count); 1822 1823 // shift < 32 bits, Ralt_count = Rcount-31 1824 1825 // We get the transfer bits by shifting left by 32-count the high 1826 // register. This is done by shifting left by 31-count and then by one 1827 // more to take care of the special (rare) case where count is zero 1828 // (shifting by 32 would not work). 1829 1830 neg(Ralt_count); 1831 if (Rcount != Rout_low) { 1832 srl(Rin_low, Rcount, Rout_low); 1833 } 1834 1835 // The order of the next two instructions is critical in the case where 1836 // Rin and Rout are the same and should not be reversed. 1837 1838 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count 1839 srl(Rin_high, Rcount, Rout_high ); // high half 1840 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more 1841 if (Rcount == Rout_low) { 1842 srl(Rin_low, Rcount, Rout_low); 1843 } 1844 ba(done); 1845 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high 1846 1847 // shift >= 32 bits, Ralt_count = Rcount-32 1848 bind(big_shift); 1849 1850 srl(Rin_high, Ralt_count, Rout_low); 1851 clr(Rout_high); 1852 1853 bind( done ); 1854 } 1855 1856 void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) { 1857 cmp(Ra, Rb); 1858 mov(-1, Rresult); 1859 movcc(equal, false, xcc, 0, Rresult); 1860 movcc(greater, false, xcc, 1, Rresult); 1861 } 1862 1863 1864 void MacroAssembler::load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed) { 1865 switch (size_in_bytes) { 1866 case 8: ld_long(src, dst); break; 1867 case 4: ld( src, dst); break; 1868 case 2: is_signed ? ldsh(src, dst) : lduh(src, dst); break; 1869 case 1: is_signed ? ldsb(src, dst) : ldub(src, dst); break; 1870 default: ShouldNotReachHere(); 1871 } 1872 } 1873 1874 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) { 1875 switch (size_in_bytes) { 1876 case 8: st_long(src, dst); break; 1877 case 4: st( src, dst); break; 1878 case 2: sth( src, dst); break; 1879 case 1: stb( src, dst); break; 1880 default: ShouldNotReachHere(); 1881 } 1882 } 1883 1884 1885 void MacroAssembler::float_cmp( bool is_float, int unordered_result, 1886 FloatRegister Fa, FloatRegister Fb, 1887 Register Rresult) { 1888 if (is_float) { 1889 fcmp(FloatRegisterImpl::S, fcc0, Fa, Fb); 1890 } else { 1891 fcmp(FloatRegisterImpl::D, fcc0, Fa, Fb); 1892 } 1893 1894 if (unordered_result == 1) { 1895 mov( -1, Rresult); 1896 movcc(f_equal, true, fcc0, 0, Rresult); 1897 movcc(f_unorderedOrGreater, true, fcc0, 1, Rresult); 1898 } else { 1899 mov( -1, Rresult); 1900 movcc(f_equal, true, fcc0, 0, Rresult); 1901 movcc(f_greater, true, fcc0, 1, Rresult); 1902 } 1903 } 1904 1905 1906 void MacroAssembler::save_all_globals_into_locals() { 1907 mov(G1,L1); 1908 mov(G2,L2); 1909 mov(G3,L3); 1910 mov(G4,L4); 1911 mov(G5,L5); 1912 mov(G6,L6); 1913 mov(G7,L7); 1914 } 1915 1916 void MacroAssembler::restore_globals_from_locals() { 1917 mov(L1,G1); 1918 mov(L2,G2); 1919 mov(L3,G3); 1920 mov(L4,G4); 1921 mov(L5,G5); 1922 mov(L6,G6); 1923 mov(L7,G7); 1924 } 1925 1926 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, 1927 Register tmp, 1928 int offset) { 1929 intptr_t value = *delayed_value_addr; 1930 if (value != 0) 1931 return RegisterOrConstant(value + offset); 1932 1933 // load indirectly to solve generation ordering problem 1934 AddressLiteral a(delayed_value_addr); 1935 load_ptr_contents(a, tmp); 1936 1937 #ifdef ASSERT 1938 tst(tmp); 1939 breakpoint_trap(zero, xcc); 1940 #endif 1941 1942 if (offset != 0) 1943 add(tmp, offset, tmp); 1944 1945 return RegisterOrConstant(tmp); 1946 } 1947 1948 1949 RegisterOrConstant MacroAssembler::regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 1950 assert(d.register_or_noreg() != G0, "lost side effect"); 1951 if ((s2.is_constant() && s2.as_constant() == 0) || 1952 (s2.is_register() && s2.as_register() == G0)) { 1953 // Do nothing, just move value. 1954 if (s1.is_register()) { 1955 if (d.is_constant()) d = temp; 1956 mov(s1.as_register(), d.as_register()); 1957 return d; 1958 } else { 1959 return s1; 1960 } 1961 } 1962 1963 if (s1.is_register()) { 1964 assert_different_registers(s1.as_register(), temp); 1965 if (d.is_constant()) d = temp; 1966 andn(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 1967 return d; 1968 } else { 1969 if (s2.is_register()) { 1970 assert_different_registers(s2.as_register(), temp); 1971 if (d.is_constant()) d = temp; 1972 set(s1.as_constant(), temp); 1973 andn(temp, s2.as_register(), d.as_register()); 1974 return d; 1975 } else { 1976 intptr_t res = s1.as_constant() & ~s2.as_constant(); 1977 return res; 1978 } 1979 } 1980 } 1981 1982 RegisterOrConstant MacroAssembler::regcon_inc_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 1983 assert(d.register_or_noreg() != G0, "lost side effect"); 1984 if ((s2.is_constant() && s2.as_constant() == 0) || 1985 (s2.is_register() && s2.as_register() == G0)) { 1986 // Do nothing, just move value. 1987 if (s1.is_register()) { 1988 if (d.is_constant()) d = temp; 1989 mov(s1.as_register(), d.as_register()); 1990 return d; 1991 } else { 1992 return s1; 1993 } 1994 } 1995 1996 if (s1.is_register()) { 1997 assert_different_registers(s1.as_register(), temp); 1998 if (d.is_constant()) d = temp; 1999 add(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 2000 return d; 2001 } else { 2002 if (s2.is_register()) { 2003 assert_different_registers(s2.as_register(), temp); 2004 if (d.is_constant()) d = temp; 2005 add(s2.as_register(), ensure_simm13_or_reg(s1, temp), d.as_register()); 2006 return d; 2007 } else { 2008 intptr_t res = s1.as_constant() + s2.as_constant(); 2009 return res; 2010 } 2011 } 2012 } 2013 2014 RegisterOrConstant MacroAssembler::regcon_sll_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 2015 assert(d.register_or_noreg() != G0, "lost side effect"); 2016 if (!is_simm13(s2.constant_or_zero())) 2017 s2 = (s2.as_constant() & 0xFF); 2018 if ((s2.is_constant() && s2.as_constant() == 0) || 2019 (s2.is_register() && s2.as_register() == G0)) { 2020 // Do nothing, just move value. 2021 if (s1.is_register()) { 2022 if (d.is_constant()) d = temp; 2023 mov(s1.as_register(), d.as_register()); 2024 return d; 2025 } else { 2026 return s1; 2027 } 2028 } 2029 2030 if (s1.is_register()) { 2031 assert_different_registers(s1.as_register(), temp); 2032 if (d.is_constant()) d = temp; 2033 sll_ptr(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 2034 return d; 2035 } else { 2036 if (s2.is_register()) { 2037 assert_different_registers(s2.as_register(), temp); 2038 if (d.is_constant()) d = temp; 2039 set(s1.as_constant(), temp); 2040 sll_ptr(temp, s2.as_register(), d.as_register()); 2041 return d; 2042 } else { 2043 intptr_t res = s1.as_constant() << s2.as_constant(); 2044 return res; 2045 } 2046 } 2047 } 2048 2049 2050 // Look up the method for a megamorphic invokeinterface call. 2051 // The target method is determined by <intf_klass, itable_index>. 2052 // The receiver klass is in recv_klass. 2053 // On success, the result will be in method_result, and execution falls through. 2054 // On failure, execution transfers to the given label. 2055 void MacroAssembler::lookup_interface_method(Register recv_klass, 2056 Register intf_klass, 2057 RegisterOrConstant itable_index, 2058 Register method_result, 2059 Register scan_temp, 2060 Register sethi_temp, 2061 Label& L_no_such_interface, 2062 bool return_method) { 2063 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp); 2064 assert(!return_method || itable_index.is_constant() || itable_index.as_register() == method_result, 2065 "caller must use same register for non-constant itable index as for method"); 2066 2067 Label L_no_such_interface_restore; 2068 bool did_save = false; 2069 if (scan_temp == noreg || sethi_temp == noreg) { 2070 Register recv_2 = recv_klass->is_global() ? recv_klass : L0; 2071 Register intf_2 = intf_klass->is_global() ? intf_klass : L1; 2072 assert(method_result->is_global(), "must be able to return value"); 2073 scan_temp = L2; 2074 sethi_temp = L3; 2075 save_frame_and_mov(0, recv_klass, recv_2, intf_klass, intf_2); 2076 recv_klass = recv_2; 2077 intf_klass = intf_2; 2078 did_save = true; 2079 } 2080 2081 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 2082 int vtable_base = in_bytes(Klass::vtable_start_offset()); 2083 int scan_step = itableOffsetEntry::size() * wordSize; 2084 int vte_size = vtableEntry::size_in_bytes(); 2085 2086 lduw(recv_klass, in_bytes(Klass::vtable_length_offset()), scan_temp); 2087 // %%% We should store the aligned, prescaled offset in the klassoop. 2088 // Then the next several instructions would fold away. 2089 2090 int itb_offset = vtable_base; 2091 int itb_scale = exact_log2(vtableEntry::size_in_bytes()); 2092 sll(scan_temp, itb_scale, scan_temp); 2093 add(scan_temp, itb_offset, scan_temp); 2094 add(recv_klass, scan_temp, scan_temp); 2095 2096 if (return_method) { 2097 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 2098 RegisterOrConstant itable_offset = itable_index; 2099 itable_offset = regcon_sll_ptr(itable_index, exact_log2(itableMethodEntry::size() * wordSize), itable_offset); 2100 itable_offset = regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes(), itable_offset); 2101 add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass); 2102 } 2103 2104 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) { 2105 // if (scan->interface() == intf) { 2106 // result = (klass + scan->offset() + itable_index); 2107 // } 2108 // } 2109 Label L_search, L_found_method; 2110 2111 for (int peel = 1; peel >= 0; peel--) { 2112 // %%%% Could load both offset and interface in one ldx, if they were 2113 // in the opposite order. This would save a load. 2114 ld_ptr(scan_temp, itableOffsetEntry::interface_offset_in_bytes(), method_result); 2115 2116 // Check that this entry is non-null. A null entry means that 2117 // the receiver class doesn't implement the interface, and wasn't the 2118 // same as when the caller was compiled. 2119 bpr(Assembler::rc_z, false, Assembler::pn, method_result, did_save ? L_no_such_interface_restore : L_no_such_interface); 2120 delayed()->cmp(method_result, intf_klass); 2121 2122 if (peel) { 2123 brx(Assembler::equal, false, Assembler::pt, L_found_method); 2124 } else { 2125 brx(Assembler::notEqual, false, Assembler::pn, L_search); 2126 // (invert the test to fall through to found_method...) 2127 } 2128 delayed()->add(scan_temp, scan_step, scan_temp); 2129 2130 if (!peel) break; 2131 2132 bind(L_search); 2133 } 2134 2135 bind(L_found_method); 2136 2137 if (return_method) { 2138 // Got a hit. 2139 int ito_offset = itableOffsetEntry::offset_offset_in_bytes(); 2140 // scan_temp[-scan_step] points to the vtable offset we need 2141 ito_offset -= scan_step; 2142 lduw(scan_temp, ito_offset, scan_temp); 2143 ld_ptr(recv_klass, scan_temp, method_result); 2144 } 2145 2146 if (did_save) { 2147 Label L_done; 2148 ba(L_done); 2149 delayed()->restore(); 2150 2151 bind(L_no_such_interface_restore); 2152 ba(L_no_such_interface); 2153 delayed()->restore(); 2154 2155 bind(L_done); 2156 } 2157 } 2158 2159 2160 // virtual method calling 2161 void MacroAssembler::lookup_virtual_method(Register recv_klass, 2162 RegisterOrConstant vtable_index, 2163 Register method_result) { 2164 assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg()); 2165 Register sethi_temp = method_result; 2166 const int base = in_bytes(Klass::vtable_start_offset()) + 2167 // method pointer offset within the vtable entry: 2168 vtableEntry::method_offset_in_bytes(); 2169 RegisterOrConstant vtable_offset = vtable_index; 2170 // Each of the following three lines potentially generates an instruction. 2171 // But the total number of address formation instructions will always be 2172 // at most two, and will often be zero. In any case, it will be optimal. 2173 // If vtable_index is a register, we will have (sll_ptr N,x; inc_ptr B,x; ld_ptr k,x). 2174 // If vtable_index is a constant, we will have at most (set B+X<<N,t; ld_ptr k,t). 2175 vtable_offset = regcon_sll_ptr(vtable_index, exact_log2(vtableEntry::size_in_bytes()), vtable_offset); 2176 vtable_offset = regcon_inc_ptr(vtable_offset, base, vtable_offset, sethi_temp); 2177 Address vtable_entry_addr(recv_klass, ensure_simm13_or_reg(vtable_offset, sethi_temp)); 2178 ld_ptr(vtable_entry_addr, method_result); 2179 } 2180 2181 2182 void MacroAssembler::check_klass_subtype(Register sub_klass, 2183 Register super_klass, 2184 Register temp_reg, 2185 Register temp2_reg, 2186 Label& L_success) { 2187 Register sub_2 = sub_klass; 2188 Register sup_2 = super_klass; 2189 if (!sub_2->is_global()) sub_2 = L0; 2190 if (!sup_2->is_global()) sup_2 = L1; 2191 bool did_save = false; 2192 if (temp_reg == noreg || temp2_reg == noreg) { 2193 temp_reg = L2; 2194 temp2_reg = L3; 2195 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); 2196 sub_klass = sub_2; 2197 super_klass = sup_2; 2198 did_save = true; 2199 } 2200 Label L_failure, L_pop_to_failure, L_pop_to_success; 2201 check_klass_subtype_fast_path(sub_klass, super_klass, 2202 temp_reg, temp2_reg, 2203 (did_save ? &L_pop_to_success : &L_success), 2204 (did_save ? &L_pop_to_failure : &L_failure), NULL); 2205 2206 if (!did_save) 2207 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); 2208 check_klass_subtype_slow_path(sub_2, sup_2, 2209 L2, L3, L4, L5, 2210 NULL, &L_pop_to_failure); 2211 2212 // on success: 2213 bind(L_pop_to_success); 2214 restore(); 2215 ba_short(L_success); 2216 2217 // on failure: 2218 bind(L_pop_to_failure); 2219 restore(); 2220 bind(L_failure); 2221 } 2222 2223 2224 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 2225 Register super_klass, 2226 Register temp_reg, 2227 Register temp2_reg, 2228 Label* L_success, 2229 Label* L_failure, 2230 Label* L_slow_path, 2231 RegisterOrConstant super_check_offset) { 2232 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 2233 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2234 2235 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 2236 bool need_slow_path = (must_load_sco || 2237 super_check_offset.constant_or_zero() == sco_offset); 2238 2239 assert_different_registers(sub_klass, super_klass, temp_reg); 2240 if (super_check_offset.is_register()) { 2241 assert_different_registers(sub_klass, super_klass, temp_reg, 2242 super_check_offset.as_register()); 2243 } else if (must_load_sco) { 2244 assert(temp2_reg != noreg, "supply either a temp or a register offset"); 2245 } 2246 2247 Label L_fallthrough; 2248 int label_nulls = 0; 2249 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 2250 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 2251 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } 2252 assert(label_nulls <= 1 || 2253 (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path), 2254 "at most one NULL in the batch, usually"); 2255 2256 // If the pointers are equal, we are done (e.g., String[] elements). 2257 // This self-check enables sharing of secondary supertype arrays among 2258 // non-primary types such as array-of-interface. Otherwise, each such 2259 // type would need its own customized SSA. 2260 // We move this check to the front of the fast path because many 2261 // type checks are in fact trivially successful in this manner, 2262 // so we get a nicely predicted branch right at the start of the check. 2263 cmp(super_klass, sub_klass); 2264 brx(Assembler::equal, false, Assembler::pn, *L_success); 2265 delayed()->nop(); 2266 2267 // Check the supertype display: 2268 if (must_load_sco) { 2269 // The super check offset is always positive... 2270 lduw(super_klass, sco_offset, temp2_reg); 2271 super_check_offset = RegisterOrConstant(temp2_reg); 2272 // super_check_offset is register. 2273 assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset.as_register()); 2274 } 2275 ld_ptr(sub_klass, super_check_offset, temp_reg); 2276 cmp(super_klass, temp_reg); 2277 2278 // This check has worked decisively for primary supers. 2279 // Secondary supers are sought in the super_cache ('super_cache_addr'). 2280 // (Secondary supers are interfaces and very deeply nested subtypes.) 2281 // This works in the same check above because of a tricky aliasing 2282 // between the super_cache and the primary super display elements. 2283 // (The 'super_check_addr' can address either, as the case requires.) 2284 // Note that the cache is updated below if it does not help us find 2285 // what we need immediately. 2286 // So if it was a primary super, we can just fail immediately. 2287 // Otherwise, it's the slow path for us (no success at this point). 2288 2289 // Hacked ba(), which may only be used just before L_fallthrough. 2290 #define FINAL_JUMP(label) \ 2291 if (&(label) != &L_fallthrough) { \ 2292 ba(label); delayed()->nop(); \ 2293 } 2294 2295 if (super_check_offset.is_register()) { 2296 brx(Assembler::equal, false, Assembler::pn, *L_success); 2297 delayed()->cmp(super_check_offset.as_register(), sc_offset); 2298 2299 if (L_failure == &L_fallthrough) { 2300 brx(Assembler::equal, false, Assembler::pt, *L_slow_path); 2301 delayed()->nop(); 2302 } else { 2303 brx(Assembler::notEqual, false, Assembler::pn, *L_failure); 2304 delayed()->nop(); 2305 FINAL_JUMP(*L_slow_path); 2306 } 2307 } else if (super_check_offset.as_constant() == sc_offset) { 2308 // Need a slow path; fast failure is impossible. 2309 if (L_slow_path == &L_fallthrough) { 2310 brx(Assembler::equal, false, Assembler::pt, *L_success); 2311 delayed()->nop(); 2312 } else { 2313 brx(Assembler::notEqual, false, Assembler::pn, *L_slow_path); 2314 delayed()->nop(); 2315 FINAL_JUMP(*L_success); 2316 } 2317 } else { 2318 // No slow path; it's a fast decision. 2319 if (L_failure == &L_fallthrough) { 2320 brx(Assembler::equal, false, Assembler::pt, *L_success); 2321 delayed()->nop(); 2322 } else { 2323 brx(Assembler::notEqual, false, Assembler::pn, *L_failure); 2324 delayed()->nop(); 2325 FINAL_JUMP(*L_success); 2326 } 2327 } 2328 2329 bind(L_fallthrough); 2330 2331 #undef FINAL_JUMP 2332 } 2333 2334 2335 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 2336 Register super_klass, 2337 Register count_temp, 2338 Register scan_temp, 2339 Register scratch_reg, 2340 Register coop_reg, 2341 Label* L_success, 2342 Label* L_failure) { 2343 assert_different_registers(sub_klass, super_klass, 2344 count_temp, scan_temp, scratch_reg, coop_reg); 2345 2346 Label L_fallthrough, L_loop; 2347 int label_nulls = 0; 2348 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 2349 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 2350 assert(label_nulls <= 1, "at most one NULL in the batch"); 2351 2352 // a couple of useful fields in sub_klass: 2353 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 2354 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 2355 2356 // Do a linear scan of the secondary super-klass chain. 2357 // This code is rarely used, so simplicity is a virtue here. 2358 2359 #ifndef PRODUCT 2360 int* pst_counter = &SharedRuntime::_partial_subtype_ctr; 2361 inc_counter((address) pst_counter, count_temp, scan_temp); 2362 #endif 2363 2364 // We will consult the secondary-super array. 2365 ld_ptr(sub_klass, ss_offset, scan_temp); 2366 2367 Register search_key = super_klass; 2368 2369 // Load the array length. (Positive movl does right thing on LP64.) 2370 lduw(scan_temp, Array<Klass*>::length_offset_in_bytes(), count_temp); 2371 2372 // Check for empty secondary super list 2373 tst(count_temp); 2374 2375 // In the array of super classes elements are pointer sized. 2376 int element_size = wordSize; 2377 2378 // Top of search loop 2379 bind(L_loop); 2380 br(Assembler::equal, false, Assembler::pn, *L_failure); 2381 delayed()->add(scan_temp, element_size, scan_temp); 2382 2383 // Skip the array header in all array accesses. 2384 int elem_offset = Array<Klass*>::base_offset_in_bytes(); 2385 elem_offset -= element_size; // the scan pointer was pre-incremented also 2386 2387 // Load next super to check 2388 ld_ptr( scan_temp, elem_offset, scratch_reg ); 2389 2390 // Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list 2391 cmp(scratch_reg, search_key); 2392 2393 // A miss means we are NOT a subtype and need to keep looping 2394 brx(Assembler::notEqual, false, Assembler::pn, L_loop); 2395 delayed()->deccc(count_temp); // decrement trip counter in delay slot 2396 2397 // Success. Cache the super we found and proceed in triumph. 2398 st_ptr(super_klass, sub_klass, sc_offset); 2399 2400 if (L_success != &L_fallthrough) { 2401 ba(*L_success); 2402 delayed()->nop(); 2403 } 2404 2405 bind(L_fallthrough); 2406 } 2407 2408 2409 RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot, 2410 Register temp_reg, 2411 int extra_slot_offset) { 2412 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 2413 int stackElementSize = Interpreter::stackElementSize; 2414 int offset = extra_slot_offset * stackElementSize; 2415 if (arg_slot.is_constant()) { 2416 offset += arg_slot.as_constant() * stackElementSize; 2417 return offset; 2418 } else { 2419 assert(temp_reg != noreg, "must specify"); 2420 sll_ptr(arg_slot.as_register(), exact_log2(stackElementSize), temp_reg); 2421 if (offset != 0) 2422 add(temp_reg, offset, temp_reg); 2423 return temp_reg; 2424 } 2425 } 2426 2427 2428 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 2429 Register temp_reg, 2430 int extra_slot_offset) { 2431 return Address(Gargs, argument_offset(arg_slot, temp_reg, extra_slot_offset)); 2432 } 2433 2434 2435 void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg, 2436 Register temp_reg, 2437 Label& done, Label* slow_case, 2438 BiasedLockingCounters* counters) { 2439 assert(UseBiasedLocking, "why call this otherwise?"); 2440 2441 if (PrintBiasedLockingStatistics) { 2442 assert_different_registers(obj_reg, mark_reg, temp_reg, O7); 2443 if (counters == NULL) 2444 counters = BiasedLocking::counters(); 2445 } 2446 2447 Label cas_label; 2448 2449 // Biased locking 2450 // See whether the lock is currently biased toward our thread and 2451 // whether the epoch is still valid 2452 // Note that the runtime guarantees sufficient alignment of JavaThread 2453 // pointers to allow age to be placed into low bits 2454 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); 2455 and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg); 2456 cmp_and_brx_short(temp_reg, markOopDesc::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label); 2457 2458 load_klass(obj_reg, temp_reg); 2459 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2460 or3(G2_thread, temp_reg, temp_reg); 2461 xor3(mark_reg, temp_reg, temp_reg); 2462 andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg); 2463 if (counters != NULL) { 2464 cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg); 2465 // Reload mark_reg as we may need it later 2466 ld_ptr(Address(obj_reg, oopDesc::mark_offset_in_bytes()), mark_reg); 2467 } 2468 brx(Assembler::equal, true, Assembler::pt, done); 2469 delayed()->nop(); 2470 2471 Label try_revoke_bias; 2472 Label try_rebias; 2473 Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes()); 2474 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2475 2476 // At this point we know that the header has the bias pattern and 2477 // that we are not the bias owner in the current epoch. We need to 2478 // figure out more details about the state of the header in order to 2479 // know what operations can be legally performed on the object's 2480 // header. 2481 2482 // If the low three bits in the xor result aren't clear, that means 2483 // the prototype header is no longer biased and we have to revoke 2484 // the bias on this object. 2485 btst(markOopDesc::biased_lock_mask_in_place, temp_reg); 2486 brx(Assembler::notZero, false, Assembler::pn, try_revoke_bias); 2487 2488 // Biasing is still enabled for this data type. See whether the 2489 // epoch of the current bias is still valid, meaning that the epoch 2490 // bits of the mark word are equal to the epoch bits of the 2491 // prototype header. (Note that the prototype header's epoch bits 2492 // only change at a safepoint.) If not, attempt to rebias the object 2493 // toward the current thread. Note that we must be absolutely sure 2494 // that the current epoch is invalid in order to do this because 2495 // otherwise the manipulations it performs on the mark word are 2496 // illegal. 2497 delayed()->btst(markOopDesc::epoch_mask_in_place, temp_reg); 2498 brx(Assembler::notZero, false, Assembler::pn, try_rebias); 2499 2500 // The epoch of the current bias is still valid but we know nothing 2501 // about the owner; it might be set or it might be clear. Try to 2502 // acquire the bias of the object using an atomic operation. If this 2503 // fails we will go in to the runtime to revoke the object's bias. 2504 // Note that we first construct the presumed unbiased header so we 2505 // don't accidentally blow away another thread's valid bias. 2506 delayed()->and3(mark_reg, 2507 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place, 2508 mark_reg); 2509 or3(G2_thread, mark_reg, temp_reg); 2510 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2511 // If the biasing toward our thread failed, this means that 2512 // another thread succeeded in biasing it toward itself and we 2513 // need to revoke that bias. The revocation will occur in the 2514 // interpreter runtime in the slow case. 2515 cmp(mark_reg, temp_reg); 2516 if (counters != NULL) { 2517 cond_inc(Assembler::zero, (address) counters->anonymously_biased_lock_entry_count_addr(), mark_reg, temp_reg); 2518 } 2519 if (slow_case != NULL) { 2520 brx(Assembler::notEqual, true, Assembler::pn, *slow_case); 2521 delayed()->nop(); 2522 } 2523 ba_short(done); 2524 2525 bind(try_rebias); 2526 // At this point we know the epoch has expired, meaning that the 2527 // current "bias owner", if any, is actually invalid. Under these 2528 // circumstances _only_, we are allowed to use the current header's 2529 // value as the comparison value when doing the cas to acquire the 2530 // bias in the current epoch. In other words, we allow transfer of 2531 // the bias from one thread to another directly in this situation. 2532 // 2533 // FIXME: due to a lack of registers we currently blow away the age 2534 // bits in this situation. Should attempt to preserve them. 2535 load_klass(obj_reg, temp_reg); 2536 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2537 or3(G2_thread, temp_reg, temp_reg); 2538 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2539 // If the biasing toward our thread failed, this means that 2540 // another thread succeeded in biasing it toward itself and we 2541 // need to revoke that bias. The revocation will occur in the 2542 // interpreter runtime in the slow case. 2543 cmp(mark_reg, temp_reg); 2544 if (counters != NULL) { 2545 cond_inc(Assembler::zero, (address) counters->rebiased_lock_entry_count_addr(), mark_reg, temp_reg); 2546 } 2547 if (slow_case != NULL) { 2548 brx(Assembler::notEqual, true, Assembler::pn, *slow_case); 2549 delayed()->nop(); 2550 } 2551 ba_short(done); 2552 2553 bind(try_revoke_bias); 2554 // The prototype mark in the klass doesn't have the bias bit set any 2555 // more, indicating that objects of this data type are not supposed 2556 // to be biased any more. We are going to try to reset the mark of 2557 // this object to the prototype value and fall through to the 2558 // CAS-based locking scheme. Note that if our CAS fails, it means 2559 // that another thread raced us for the privilege of revoking the 2560 // bias of this particular object, so it's okay to continue in the 2561 // normal locking code. 2562 // 2563 // FIXME: due to a lack of registers we currently blow away the age 2564 // bits in this situation. Should attempt to preserve them. 2565 load_klass(obj_reg, temp_reg); 2566 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2567 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2568 // Fall through to the normal CAS-based lock, because no matter what 2569 // the result of the above CAS, some thread must have succeeded in 2570 // removing the bias bit from the object's header. 2571 if (counters != NULL) { 2572 cmp(mark_reg, temp_reg); 2573 cond_inc(Assembler::zero, (address) counters->revoked_lock_entry_count_addr(), mark_reg, temp_reg); 2574 } 2575 2576 bind(cas_label); 2577 } 2578 2579 void MacroAssembler::biased_locking_exit (Address mark_addr, Register temp_reg, Label& done, 2580 bool allow_delay_slot_filling) { 2581 // Check for biased locking unlock case, which is a no-op 2582 // Note: we do not have to check the thread ID for two reasons. 2583 // First, the interpreter checks for IllegalMonitorStateException at 2584 // a higher level. Second, if the bias was revoked while we held the 2585 // lock, the object could not be rebiased toward another thread, so 2586 // the bias bit would be clear. 2587 ld_ptr(mark_addr, temp_reg); 2588 and3(temp_reg, markOopDesc::biased_lock_mask_in_place, temp_reg); 2589 cmp(temp_reg, markOopDesc::biased_lock_pattern); 2590 brx(Assembler::equal, allow_delay_slot_filling, Assembler::pt, done); 2591 delayed(); 2592 if (!allow_delay_slot_filling) { 2593 nop(); 2594 } 2595 } 2596 2597 2598 // compiler_lock_object() and compiler_unlock_object() are direct transliterations 2599 // of i486.ad fast_lock() and fast_unlock(). See those methods for detailed comments. 2600 // The code could be tightened up considerably. 2601 // 2602 // box->dhw disposition - post-conditions at DONE_LABEL. 2603 // - Successful inflated lock: box->dhw != 0. 2604 // Any non-zero value suffices. 2605 // Consider G2_thread, rsp, boxReg, or markOopDesc::unused_mark() 2606 // - Successful Stack-lock: box->dhw == mark. 2607 // box->dhw must contain the displaced mark word value 2608 // - Failure -- icc.ZFlag == 0 and box->dhw is undefined. 2609 // The slow-path fast_enter() and slow_enter() operators 2610 // are responsible for setting box->dhw = NonZero (typically markOopDesc::unused_mark()). 2611 // - Biased: box->dhw is undefined 2612 // 2613 // SPARC refworkload performance - specifically jetstream and scimark - are 2614 // extremely sensitive to the size of the code emitted by compiler_lock_object 2615 // and compiler_unlock_object. Critically, the key factor is code size, not path 2616 // length. (Simply experiments to pad CLO with unexecuted NOPs demonstrte the 2617 // effect). 2618 2619 2620 void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark, 2621 Register Rbox, Register Rscratch, 2622 BiasedLockingCounters* counters, 2623 bool try_bias) { 2624 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); 2625 2626 verify_oop(Roop); 2627 Label done ; 2628 2629 if (counters != NULL) { 2630 inc_counter((address) counters->total_entry_count_addr(), Rmark, Rscratch); 2631 } 2632 2633 if (EmitSync & 1) { 2634 mov(3, Rscratch); 2635 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2636 cmp(SP, G0); 2637 return ; 2638 } 2639 2640 if (EmitSync & 2) { 2641 2642 // Fetch object's markword 2643 ld_ptr(mark_addr, Rmark); 2644 2645 if (try_bias) { 2646 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 2647 } 2648 2649 // Save Rbox in Rscratch to be used for the cas operation 2650 mov(Rbox, Rscratch); 2651 2652 // set Rmark to markOop | markOopDesc::unlocked_value 2653 or3(Rmark, markOopDesc::unlocked_value, Rmark); 2654 2655 // Initialize the box. (Must happen before we update the object mark!) 2656 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2657 2658 // compare object markOop with Rmark and if equal exchange Rscratch with object markOop 2659 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2660 cas_ptr(mark_addr.base(), Rmark, Rscratch); 2661 2662 // if compare/exchange succeeded we found an unlocked object and we now have locked it 2663 // hence we are done 2664 cmp(Rmark, Rscratch); 2665 sub(Rscratch, STACK_BIAS, Rscratch); 2666 brx(Assembler::equal, false, Assembler::pt, done); 2667 delayed()->sub(Rscratch, SP, Rscratch); //pull next instruction into delay slot 2668 2669 // we did not find an unlocked object so see if this is a recursive case 2670 // sub(Rscratch, SP, Rscratch); 2671 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 2672 andcc(Rscratch, 0xfffff003, Rscratch); 2673 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2674 bind (done); 2675 return ; 2676 } 2677 2678 Label Egress ; 2679 2680 if (EmitSync & 256) { 2681 Label IsInflated ; 2682 2683 ld_ptr(mark_addr, Rmark); // fetch obj->mark 2684 // Triage: biased, stack-locked, neutral, inflated 2685 if (try_bias) { 2686 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 2687 // Invariant: if control reaches this point in the emitted stream 2688 // then Rmark has not been modified. 2689 } 2690 2691 // Store mark into displaced mark field in the on-stack basic-lock "box" 2692 // Critically, this must happen before the CAS 2693 // Maximize the ST-CAS distance to minimize the ST-before-CAS penalty. 2694 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2695 andcc(Rmark, 2, G0); 2696 brx(Assembler::notZero, false, Assembler::pn, IsInflated); 2697 delayed()-> 2698 2699 // Try stack-lock acquisition. 2700 // Beware: the 1st instruction is in a delay slot 2701 mov(Rbox, Rscratch); 2702 or3(Rmark, markOopDesc::unlocked_value, Rmark); 2703 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2704 cas_ptr(mark_addr.base(), Rmark, Rscratch); 2705 cmp(Rmark, Rscratch); 2706 brx(Assembler::equal, false, Assembler::pt, done); 2707 delayed()->sub(Rscratch, SP, Rscratch); 2708 2709 // Stack-lock attempt failed - check for recursive stack-lock. 2710 // See the comments below about how we might remove this case. 2711 sub(Rscratch, STACK_BIAS, Rscratch); 2712 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 2713 andcc(Rscratch, 0xfffff003, Rscratch); 2714 br(Assembler::always, false, Assembler::pt, done); 2715 delayed()-> st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2716 2717 bind(IsInflated); 2718 if (EmitSync & 64) { 2719 // If m->owner != null goto IsLocked 2720 // Pessimistic form: Test-and-CAS vs CAS 2721 // The optimistic form avoids RTS->RTO cache line upgrades. 2722 ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rscratch); 2723 andcc(Rscratch, Rscratch, G0); 2724 brx(Assembler::notZero, false, Assembler::pn, done); 2725 delayed()->nop(); 2726 // m->owner == null : it's unlocked. 2727 } 2728 2729 // Try to CAS m->owner from null to Self 2730 // Invariant: if we acquire the lock then _recursions should be 0. 2731 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2732 mov(G2_thread, Rscratch); 2733 cas_ptr(Rmark, G0, Rscratch); 2734 cmp(Rscratch, G0); 2735 // Intentional fall-through into done 2736 } else { 2737 // Aggressively avoid the Store-before-CAS penalty 2738 // Defer the store into box->dhw until after the CAS 2739 Label IsInflated, Recursive ; 2740 2741 // Anticipate CAS -- Avoid RTS->RTO upgrade 2742 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); 2743 2744 ld_ptr(mark_addr, Rmark); // fetch obj->mark 2745 // Triage: biased, stack-locked, neutral, inflated 2746 2747 if (try_bias) { 2748 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 2749 // Invariant: if control reaches this point in the emitted stream 2750 // then Rmark has not been modified. 2751 } 2752 andcc(Rmark, 2, G0); 2753 brx(Assembler::notZero, false, Assembler::pn, IsInflated); 2754 delayed()-> // Beware - dangling delay-slot 2755 2756 // Try stack-lock acquisition. 2757 // Transiently install BUSY (0) encoding in the mark word. 2758 // if the CAS of 0 into the mark was successful then we execute: 2759 // ST box->dhw = mark -- save fetched mark in on-stack basiclock box 2760 // ST obj->mark = box -- overwrite transient 0 value 2761 // This presumes TSO, of course. 2762 2763 mov(0, Rscratch); 2764 or3(Rmark, markOopDesc::unlocked_value, Rmark); 2765 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2766 cas_ptr(mark_addr.base(), Rmark, Rscratch); 2767 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); 2768 cmp(Rscratch, Rmark); 2769 brx(Assembler::notZero, false, Assembler::pn, Recursive); 2770 delayed()->st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2771 if (counters != NULL) { 2772 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); 2773 } 2774 ba(done); 2775 delayed()->st_ptr(Rbox, mark_addr); 2776 2777 bind(Recursive); 2778 // Stack-lock attempt failed - check for recursive stack-lock. 2779 // Tests show that we can remove the recursive case with no impact 2780 // on refworkload 0.83. If we need to reduce the size of the code 2781 // emitted by compiler_lock_object() the recursive case is perfect 2782 // candidate. 2783 // 2784 // A more extreme idea is to always inflate on stack-lock recursion. 2785 // This lets us eliminate the recursive checks in compiler_lock_object 2786 // and compiler_unlock_object and the (box->dhw == 0) encoding. 2787 // A brief experiment - requiring changes to synchronizer.cpp, interpreter, 2788 // and showed a performance *increase*. In the same experiment I eliminated 2789 // the fast-path stack-lock code from the interpreter and always passed 2790 // control to the "slow" operators in synchronizer.cpp. 2791 2792 // RScratch contains the fetched obj->mark value from the failed CAS. 2793 sub(Rscratch, STACK_BIAS, Rscratch); 2794 sub(Rscratch, SP, Rscratch); 2795 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 2796 andcc(Rscratch, 0xfffff003, Rscratch); 2797 if (counters != NULL) { 2798 // Accounting needs the Rscratch register 2799 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2800 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); 2801 ba_short(done); 2802 } else { 2803 ba(done); 2804 delayed()->st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2805 } 2806 2807 bind (IsInflated); 2808 2809 // Try to CAS m->owner from null to Self 2810 // Invariant: if we acquire the lock then _recursions should be 0. 2811 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2812 mov(G2_thread, Rscratch); 2813 cas_ptr(Rmark, G0, Rscratch); 2814 andcc(Rscratch, Rscratch, G0); // set ICCs for done: icc.zf iff success 2815 // set icc.zf : 1=success 0=failure 2816 // ST box->displaced_header = NonZero. 2817 // Any non-zero value suffices: 2818 // markOopDesc::unused_mark(), G2_thread, RBox, RScratch, rsp, etc. 2819 st_ptr(Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2820 // Intentional fall-through into done 2821 } 2822 2823 bind (done); 2824 } 2825 2826 void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark, 2827 Register Rbox, Register Rscratch, 2828 bool try_bias) { 2829 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); 2830 2831 Label done ; 2832 2833 if (EmitSync & 4) { 2834 cmp(SP, G0); 2835 return ; 2836 } 2837 2838 if (EmitSync & 8) { 2839 if (try_bias) { 2840 biased_locking_exit(mark_addr, Rscratch, done); 2841 } 2842 2843 // Test first if it is a fast recursive unlock 2844 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rmark); 2845 br_null_short(Rmark, Assembler::pt, done); 2846 2847 // Check if it is still a light weight lock, this is is true if we see 2848 // the stack address of the basicLock in the markOop of the object 2849 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2850 cas_ptr(mark_addr.base(), Rbox, Rmark); 2851 ba(done); 2852 delayed()->cmp(Rbox, Rmark); 2853 bind(done); 2854 return ; 2855 } 2856 2857 // Beware ... If the aggregate size of the code emitted by CLO and CUO is 2858 // is too large performance rolls abruptly off a cliff. 2859 // This could be related to inlining policies, code cache management, or 2860 // I$ effects. 2861 Label LStacked ; 2862 2863 if (try_bias) { 2864 // TODO: eliminate redundant LDs of obj->mark 2865 biased_locking_exit(mark_addr, Rscratch, done); 2866 } 2867 2868 ld_ptr(Roop, oopDesc::mark_offset_in_bytes(), Rmark); 2869 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rscratch); 2870 andcc(Rscratch, Rscratch, G0); 2871 brx(Assembler::zero, false, Assembler::pn, done); 2872 delayed()->nop(); // consider: relocate fetch of mark, above, into this DS 2873 andcc(Rmark, 2, G0); 2874 brx(Assembler::zero, false, Assembler::pt, LStacked); 2875 delayed()->nop(); 2876 2877 // It's inflated 2878 // Conceptually we need a #loadstore|#storestore "release" MEMBAR before 2879 // the ST of 0 into _owner which releases the lock. This prevents loads 2880 // and stores within the critical section from reordering (floating) 2881 // past the store that releases the lock. But TSO is a strong memory model 2882 // and that particular flavor of barrier is a noop, so we can safely elide it. 2883 // Note that we use 1-0 locking by default for the inflated case. We 2884 // close the resultant (and rare) race by having contended threads in 2885 // monitorenter periodically poll _owner. 2886 2887 if (EmitSync & 1024) { 2888 // Emit code to check that _owner == Self 2889 // We could fold the _owner test into subsequent code more efficiently 2890 // than using a stand-alone check, but since _owner checking is off by 2891 // default we don't bother. We also might consider predicating the 2892 // _owner==Self check on Xcheck:jni or running on a debug build. 2893 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), Rscratch); 2894 orcc(Rscratch, G0, G0); 2895 brx(Assembler::notZero, false, Assembler::pn, done); 2896 delayed()->nop(); 2897 } 2898 2899 if (EmitSync & 512) { 2900 // classic lock release code absent 1-0 locking 2901 // m->Owner = null; 2902 // membar #storeload 2903 // if (m->cxq|m->EntryList) == null goto Success 2904 // if (m->succ != null) goto Success 2905 // if CAS (&m->Owner,0,Self) != 0 goto Success 2906 // goto SlowPath 2907 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), Rbox); 2908 orcc(Rbox, G0, G0); 2909 brx(Assembler::notZero, false, Assembler::pn, done); 2910 delayed()->nop(); 2911 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2912 if (os::is_MP()) { membar(StoreLoad); } 2913 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)), Rscratch); 2914 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)), Rbox); 2915 orcc(Rbox, Rscratch, G0); 2916 brx(Assembler::zero, false, Assembler::pt, done); 2917 delayed()-> 2918 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch); 2919 andcc(Rscratch, Rscratch, G0); 2920 brx(Assembler::notZero, false, Assembler::pt, done); 2921 delayed()->andcc(G0, G0, G0); 2922 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2923 mov(G2_thread, Rscratch); 2924 cas_ptr(Rmark, G0, Rscratch); 2925 cmp(Rscratch, G0); 2926 // invert icc.zf and goto done 2927 brx(Assembler::notZero, false, Assembler::pt, done); 2928 delayed()->cmp(G0, G0); 2929 br(Assembler::always, false, Assembler::pt, done); 2930 delayed()->cmp(G0, 1); 2931 } else { 2932 // 1-0 form : avoids CAS and MEMBAR in the common case 2933 // Do not bother to ratify that m->Owner == Self. 2934 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), Rbox); 2935 orcc(Rbox, G0, G0); 2936 brx(Assembler::notZero, false, Assembler::pn, done); 2937 delayed()-> 2938 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)), Rscratch); 2939 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)), Rbox); 2940 orcc(Rbox, Rscratch, G0); 2941 if (EmitSync & 16384) { 2942 // As an optional optimization, if (EntryList|cxq) != null and _succ is null then 2943 // we should transfer control directly to the slow-path. 2944 // This test makes the reacquire operation below very infrequent. 2945 // The logic is equivalent to : 2946 // if (cxq|EntryList) == null : Owner=null; goto Success 2947 // if succ == null : goto SlowPath 2948 // Owner=null; membar #storeload 2949 // if succ != null : goto Success 2950 // if CAS(&Owner,null,Self) != null goto Success 2951 // goto SlowPath 2952 brx(Assembler::zero, true, Assembler::pt, done); 2953 delayed()-> 2954 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2955 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch); 2956 andcc(Rscratch, Rscratch, G0) ; 2957 brx(Assembler::zero, false, Assembler::pt, done); 2958 delayed()->orcc(G0, 1, G0); 2959 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2960 } else { 2961 brx(Assembler::zero, false, Assembler::pt, done); 2962 delayed()-> 2963 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2964 } 2965 if (os::is_MP()) { membar(StoreLoad); } 2966 // Check that _succ is (or remains) non-zero 2967 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch); 2968 andcc(Rscratch, Rscratch, G0); 2969 brx(Assembler::notZero, false, Assembler::pt, done); 2970 delayed()->andcc(G0, G0, G0); 2971 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2972 mov(G2_thread, Rscratch); 2973 cas_ptr(Rmark, G0, Rscratch); 2974 cmp(Rscratch, G0); 2975 // invert icc.zf and goto done 2976 // A slightly better v8+/v9 idiom would be the following: 2977 // movrnz Rscratch,1,Rscratch 2978 // ba done 2979 // xorcc Rscratch,1,G0 2980 // In v8+ mode the idiom would be valid IFF Rscratch was a G or O register 2981 brx(Assembler::notZero, false, Assembler::pt, done); 2982 delayed()->cmp(G0, G0); 2983 br(Assembler::always, false, Assembler::pt, done); 2984 delayed()->cmp(G0, 1); 2985 } 2986 2987 bind (LStacked); 2988 // Consider: we could replace the expensive CAS in the exit 2989 // path with a simple ST of the displaced mark value fetched from 2990 // the on-stack basiclock box. That admits a race where a thread T2 2991 // in the slow lock path -- inflating with monitor M -- could race a 2992 // thread T1 in the fast unlock path, resulting in a missed wakeup for T2. 2993 // More precisely T1 in the stack-lock unlock path could "stomp" the 2994 // inflated mark value M installed by T2, resulting in an orphan 2995 // object monitor M and T2 becoming stranded. We can remedy that situation 2996 // by having T2 periodically poll the object's mark word using timed wait 2997 // operations. If T2 discovers that a stomp has occurred it vacates 2998 // the monitor M and wakes any other threads stranded on the now-orphan M. 2999 // In addition the monitor scavenger, which performs deflation, 3000 // would also need to check for orpan monitors and stranded threads. 3001 // 3002 // Finally, inflation is also used when T2 needs to assign a hashCode 3003 // to O and O is stack-locked by T1. The "stomp" race could cause 3004 // an assigned hashCode value to be lost. We can avoid that condition 3005 // and provide the necessary hashCode stability invariants by ensuring 3006 // that hashCode generation is idempotent between copying GCs. 3007 // For example we could compute the hashCode of an object O as 3008 // O's heap address XOR some high quality RNG value that is refreshed 3009 // at GC-time. The monitor scavenger would install the hashCode 3010 // found in any orphan monitors. Again, the mechanism admits a 3011 // lost-update "stomp" WAW race but detects and recovers as needed. 3012 // 3013 // A prototype implementation showed excellent results, although 3014 // the scavenger and timeout code was rather involved. 3015 3016 cas_ptr(mark_addr.base(), Rbox, Rscratch); 3017 cmp(Rbox, Rscratch); 3018 // Intentional fall through into done ... 3019 3020 bind(done); 3021 } 3022 3023 3024 3025 void MacroAssembler::print_CPU_state() { 3026 // %%%%% need to implement this 3027 } 3028 3029 void MacroAssembler::verify_FPU(int stack_depth, const char* s) { 3030 // %%%%% need to implement this 3031 } 3032 3033 void MacroAssembler::push_IU_state() { 3034 // %%%%% need to implement this 3035 } 3036 3037 3038 void MacroAssembler::pop_IU_state() { 3039 // %%%%% need to implement this 3040 } 3041 3042 3043 void MacroAssembler::push_FPU_state() { 3044 // %%%%% need to implement this 3045 } 3046 3047 3048 void MacroAssembler::pop_FPU_state() { 3049 // %%%%% need to implement this 3050 } 3051 3052 3053 void MacroAssembler::push_CPU_state() { 3054 // %%%%% need to implement this 3055 } 3056 3057 3058 void MacroAssembler::pop_CPU_state() { 3059 // %%%%% need to implement this 3060 } 3061 3062 3063 3064 void MacroAssembler::verify_tlab() { 3065 #ifdef ASSERT 3066 if (UseTLAB && VerifyOops) { 3067 Label next, next2, ok; 3068 Register t1 = L0; 3069 Register t2 = L1; 3070 Register t3 = L2; 3071 3072 save_frame(0); 3073 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); 3074 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2); 3075 or3(t1, t2, t3); 3076 cmp_and_br_short(t1, t2, Assembler::greaterEqual, Assembler::pn, next); 3077 STOP("assert(top >= start)"); 3078 should_not_reach_here(); 3079 3080 bind(next); 3081 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); 3082 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2); 3083 or3(t3, t2, t3); 3084 cmp_and_br_short(t1, t2, Assembler::lessEqual, Assembler::pn, next2); 3085 STOP("assert(top <= end)"); 3086 should_not_reach_here(); 3087 3088 bind(next2); 3089 and3(t3, MinObjAlignmentInBytesMask, t3); 3090 cmp_and_br_short(t3, 0, Assembler::lessEqual, Assembler::pn, ok); 3091 STOP("assert(aligned)"); 3092 should_not_reach_here(); 3093 3094 bind(ok); 3095 restore(); 3096 } 3097 #endif 3098 } 3099 3100 3101 void MacroAssembler::eden_allocate( 3102 Register obj, // result: pointer to object after successful allocation 3103 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 3104 int con_size_in_bytes, // object size in bytes if known at compile time 3105 Register t1, // temp register 3106 Register t2, // temp register 3107 Label& slow_case // continuation point if fast allocation fails 3108 ){ 3109 // make sure arguments make sense 3110 assert_different_registers(obj, var_size_in_bytes, t1, t2); 3111 assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size"); 3112 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); 3113 3114 if (!Universe::heap()->supports_inline_contig_alloc()) { 3115 // No allocation in the shared eden. 3116 ba(slow_case); 3117 delayed()->nop(); 3118 } else { 3119 // get eden boundaries 3120 // note: we need both top & top_addr! 3121 const Register top_addr = t1; 3122 const Register end = t2; 3123 3124 CollectedHeap* ch = Universe::heap(); 3125 set((intx)ch->top_addr(), top_addr); 3126 intx delta = (intx)ch->end_addr() - (intx)ch->top_addr(); 3127 ld_ptr(top_addr, delta, end); 3128 ld_ptr(top_addr, 0, obj); 3129 3130 // try to allocate 3131 Label retry; 3132 bind(retry); 3133 #ifdef ASSERT 3134 // make sure eden top is properly aligned 3135 { 3136 Label L; 3137 btst(MinObjAlignmentInBytesMask, obj); 3138 br(Assembler::zero, false, Assembler::pt, L); 3139 delayed()->nop(); 3140 STOP("eden top is not properly aligned"); 3141 bind(L); 3142 } 3143 #endif // ASSERT 3144 const Register free = end; 3145 sub(end, obj, free); // compute amount of free space 3146 if (var_size_in_bytes->is_valid()) { 3147 // size is unknown at compile time 3148 cmp(free, var_size_in_bytes); 3149 brx(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case 3150 delayed()->add(obj, var_size_in_bytes, end); 3151 } else { 3152 // size is known at compile time 3153 cmp(free, con_size_in_bytes); 3154 brx(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case 3155 delayed()->add(obj, con_size_in_bytes, end); 3156 } 3157 // Compare obj with the value at top_addr; if still equal, swap the value of 3158 // end with the value at top_addr. If not equal, read the value at top_addr 3159 // into end. 3160 cas_ptr(top_addr, obj, end); 3161 // if someone beat us on the allocation, try again, otherwise continue 3162 cmp(obj, end); 3163 brx(Assembler::notEqual, false, Assembler::pn, retry); 3164 delayed()->mov(end, obj); // nop if successfull since obj == end 3165 3166 #ifdef ASSERT 3167 // make sure eden top is properly aligned 3168 { 3169 Label L; 3170 const Register top_addr = t1; 3171 3172 set((intx)ch->top_addr(), top_addr); 3173 ld_ptr(top_addr, 0, top_addr); 3174 btst(MinObjAlignmentInBytesMask, top_addr); 3175 br(Assembler::zero, false, Assembler::pt, L); 3176 delayed()->nop(); 3177 STOP("eden top is not properly aligned"); 3178 bind(L); 3179 } 3180 #endif // ASSERT 3181 } 3182 } 3183 3184 3185 void MacroAssembler::tlab_allocate( 3186 Register obj, // result: pointer to object after successful allocation 3187 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 3188 int con_size_in_bytes, // object size in bytes if known at compile time 3189 Register t1, // temp register 3190 Label& slow_case // continuation point if fast allocation fails 3191 ){ 3192 // make sure arguments make sense 3193 assert_different_registers(obj, var_size_in_bytes, t1); 3194 assert(0 <= con_size_in_bytes && is_simm13(con_size_in_bytes), "illegal object size"); 3195 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); 3196 3197 const Register free = t1; 3198 3199 verify_tlab(); 3200 3201 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), obj); 3202 3203 // calculate amount of free space 3204 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), free); 3205 sub(free, obj, free); 3206 3207 Label done; 3208 if (var_size_in_bytes == noreg) { 3209 cmp(free, con_size_in_bytes); 3210 } else { 3211 cmp(free, var_size_in_bytes); 3212 } 3213 br(Assembler::less, false, Assembler::pn, slow_case); 3214 // calculate the new top pointer 3215 if (var_size_in_bytes == noreg) { 3216 delayed()->add(obj, con_size_in_bytes, free); 3217 } else { 3218 delayed()->add(obj, var_size_in_bytes, free); 3219 } 3220 3221 bind(done); 3222 3223 #ifdef ASSERT 3224 // make sure new free pointer is properly aligned 3225 { 3226 Label L; 3227 btst(MinObjAlignmentInBytesMask, free); 3228 br(Assembler::zero, false, Assembler::pt, L); 3229 delayed()->nop(); 3230 STOP("updated TLAB free is not properly aligned"); 3231 bind(L); 3232 } 3233 #endif // ASSERT 3234 3235 // update the tlab top pointer 3236 st_ptr(free, G2_thread, in_bytes(JavaThread::tlab_top_offset())); 3237 verify_tlab(); 3238 } 3239 3240 3241 void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case) { 3242 Register top = O0; 3243 Register t1 = G1; 3244 Register t2 = G3; 3245 Register t3 = O1; 3246 assert_different_registers(top, t1, t2, t3, G4, G5 /* preserve G4 and G5 */); 3247 Label do_refill, discard_tlab; 3248 3249 if (!Universe::heap()->supports_inline_contig_alloc()) { 3250 // No allocation in the shared eden. 3251 ba(slow_case); 3252 delayed()->nop(); 3253 } 3254 3255 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top); 3256 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t1); 3257 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), t2); 3258 3259 // calculate amount of free space 3260 sub(t1, top, t1); 3261 srl_ptr(t1, LogHeapWordSize, t1); 3262 3263 // Retain tlab and allocate object in shared space if 3264 // the amount free in the tlab is too large to discard. 3265 cmp(t1, t2); 3266 3267 brx(Assembler::lessEqual, false, Assembler::pt, discard_tlab); 3268 // increment waste limit to prevent getting stuck on this slow path 3269 if (Assembler::is_simm13(ThreadLocalAllocBuffer::refill_waste_limit_increment())) { 3270 delayed()->add(t2, ThreadLocalAllocBuffer::refill_waste_limit_increment(), t2); 3271 } else { 3272 delayed()->nop(); 3273 // set64 does not use the temp register if the given constant is 32 bit. So 3274 // we can just use any register; using G0 results in ignoring of the upper 32 bit 3275 // of that value. 3276 set64(ThreadLocalAllocBuffer::refill_waste_limit_increment(), t3, G0); 3277 add(t2, t3, t2); 3278 } 3279 3280 st_ptr(t2, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset())); 3281 if (TLABStats) { 3282 // increment number of slow_allocations 3283 ld(G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()), t2); 3284 add(t2, 1, t2); 3285 stw(t2, G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset())); 3286 } 3287 ba(try_eden); 3288 delayed()->nop(); 3289 3290 bind(discard_tlab); 3291 if (TLABStats) { 3292 // increment number of refills 3293 ld(G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()), t2); 3294 add(t2, 1, t2); 3295 stw(t2, G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset())); 3296 // accumulate wastage 3297 ld(G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()), t2); 3298 add(t2, t1, t2); 3299 stw(t2, G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset())); 3300 } 3301 3302 // if tlab is currently allocated (top or end != null) then 3303 // fill [top, end + alignment_reserve) with array object 3304 br_null_short(top, Assembler::pn, do_refill); 3305 3306 set((intptr_t)markOopDesc::prototype()->copy_set_hash(0x2), t2); 3307 st_ptr(t2, top, oopDesc::mark_offset_in_bytes()); // set up the mark word 3308 // set klass to intArrayKlass 3309 sub(t1, typeArrayOopDesc::header_size(T_INT), t1); 3310 add(t1, ThreadLocalAllocBuffer::alignment_reserve(), t1); 3311 sll_ptr(t1, log2_intptr(HeapWordSize/sizeof(jint)), t1); 3312 st(t1, top, arrayOopDesc::length_offset_in_bytes()); 3313 set((intptr_t)Universe::intArrayKlassObj_addr(), t2); 3314 ld_ptr(t2, 0, t2); 3315 // store klass last. concurrent gcs assumes klass length is valid if 3316 // klass field is not null. 3317 store_klass(t2, top); 3318 verify_oop(top); 3319 3320 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t1); 3321 sub(top, t1, t1); // size of tlab's allocated portion 3322 incr_allocated_bytes(t1, t2, t3); 3323 3324 // refill the tlab with an eden allocation 3325 bind(do_refill); 3326 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t1); 3327 sll_ptr(t1, LogHeapWordSize, t1); 3328 // allocate new tlab, address returned in top 3329 eden_allocate(top, t1, 0, t2, t3, slow_case); 3330 3331 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_start_offset())); 3332 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_top_offset())); 3333 #ifdef ASSERT 3334 // check that tlab_size (t1) is still valid 3335 { 3336 Label ok; 3337 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t2); 3338 sll_ptr(t2, LogHeapWordSize, t2); 3339 cmp_and_br_short(t1, t2, Assembler::equal, Assembler::pt, ok); 3340 STOP("assert(t1 == tlab_size)"); 3341 should_not_reach_here(); 3342 3343 bind(ok); 3344 } 3345 #endif // ASSERT 3346 add(top, t1, top); // t1 is tlab_size 3347 sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top); 3348 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset())); 3349 3350 if (ZeroTLAB) { 3351 // This is a fast TLAB refill, therefore the GC is not notified of it. 3352 // So compiled code must fill the new TLAB with zeroes. 3353 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2); 3354 zero_memory(t2, t1); 3355 } 3356 verify_tlab(); 3357 ba(retry); 3358 delayed()->nop(); 3359 } 3360 3361 void MacroAssembler::zero_memory(Register base, Register index) { 3362 assert_different_registers(base, index); 3363 Label loop; 3364 bind(loop); 3365 subcc(index, HeapWordSize, index); 3366 brx(Assembler::greaterEqual, true, Assembler::pt, loop); 3367 delayed()->st_ptr(G0, base, index); 3368 } 3369 3370 void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes, 3371 Register t1, Register t2) { 3372 // Bump total bytes allocated by this thread 3373 assert(t1->is_global(), "must be global reg"); // so all 64 bits are saved on a context switch 3374 assert_different_registers(size_in_bytes.register_or_noreg(), t1, t2); 3375 // v8 support has gone the way of the dodo 3376 ldx(G2_thread, in_bytes(JavaThread::allocated_bytes_offset()), t1); 3377 add(t1, ensure_simm13_or_reg(size_in_bytes, t2), t1); 3378 stx(t1, G2_thread, in_bytes(JavaThread::allocated_bytes_offset())); 3379 } 3380 3381 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { 3382 switch (cond) { 3383 // Note some conditions are synonyms for others 3384 case Assembler::never: return Assembler::always; 3385 case Assembler::zero: return Assembler::notZero; 3386 case Assembler::lessEqual: return Assembler::greater; 3387 case Assembler::less: return Assembler::greaterEqual; 3388 case Assembler::lessEqualUnsigned: return Assembler::greaterUnsigned; 3389 case Assembler::lessUnsigned: return Assembler::greaterEqualUnsigned; 3390 case Assembler::negative: return Assembler::positive; 3391 case Assembler::overflowSet: return Assembler::overflowClear; 3392 case Assembler::always: return Assembler::never; 3393 case Assembler::notZero: return Assembler::zero; 3394 case Assembler::greater: return Assembler::lessEqual; 3395 case Assembler::greaterEqual: return Assembler::less; 3396 case Assembler::greaterUnsigned: return Assembler::lessEqualUnsigned; 3397 case Assembler::greaterEqualUnsigned: return Assembler::lessUnsigned; 3398 case Assembler::positive: return Assembler::negative; 3399 case Assembler::overflowClear: return Assembler::overflowSet; 3400 } 3401 3402 ShouldNotReachHere(); return Assembler::overflowClear; 3403 } 3404 3405 void MacroAssembler::cond_inc(Assembler::Condition cond, address counter_ptr, 3406 Register Rtmp1, Register Rtmp2 /*, Register Rtmp3, Register Rtmp4 */) { 3407 Condition negated_cond = negate_condition(cond); 3408 Label L; 3409 brx(negated_cond, false, Assembler::pt, L); 3410 delayed()->nop(); 3411 inc_counter(counter_ptr, Rtmp1, Rtmp2); 3412 bind(L); 3413 } 3414 3415 void MacroAssembler::inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2) { 3416 AddressLiteral addrlit(counter_addr); 3417 sethi(addrlit, Rtmp1); // Move hi22 bits into temporary register. 3418 Address addr(Rtmp1, addrlit.low10()); // Build an address with low10 bits. 3419 ld(addr, Rtmp2); 3420 inc(Rtmp2); 3421 st(Rtmp2, addr); 3422 } 3423 3424 void MacroAssembler::inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2) { 3425 inc_counter((address) counter_addr, Rtmp1, Rtmp2); 3426 } 3427 3428 SkipIfEqual::SkipIfEqual( 3429 MacroAssembler* masm, Register temp, const bool* flag_addr, 3430 Assembler::Condition condition) { 3431 _masm = masm; 3432 AddressLiteral flag(flag_addr); 3433 _masm->sethi(flag, temp); 3434 _masm->ldub(temp, flag.low10(), temp); 3435 _masm->tst(temp); 3436 _masm->br(condition, false, Assembler::pt, _label); 3437 _masm->delayed()->nop(); 3438 } 3439 3440 SkipIfEqual::~SkipIfEqual() { 3441 _masm->bind(_label); 3442 } 3443 3444 3445 // Writes to stack successive pages until offset reached to check for 3446 // stack overflow + shadow pages. This clobbers tsp and scratch. 3447 void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp, 3448 Register Rscratch) { 3449 // Use stack pointer in temp stack pointer 3450 mov(SP, Rtsp); 3451 3452 // Bang stack for total size given plus stack shadow page size. 3453 // Bang one page at a time because a large size can overflow yellow and 3454 // red zones (the bang will fail but stack overflow handling can't tell that 3455 // it was a stack overflow bang vs a regular segv). 3456 int offset = os::vm_page_size(); 3457 Register Roffset = Rscratch; 3458 3459 Label loop; 3460 bind(loop); 3461 set((-offset)+STACK_BIAS, Rscratch); 3462 st(G0, Rtsp, Rscratch); 3463 set(offset, Roffset); 3464 sub(Rsize, Roffset, Rsize); 3465 cmp(Rsize, G0); 3466 br(Assembler::greater, false, Assembler::pn, loop); 3467 delayed()->sub(Rtsp, Roffset, Rtsp); 3468 3469 // Bang down shadow pages too. 3470 // At this point, (tmp-0) is the last address touched, so don't 3471 // touch it again. (It was touched as (tmp-pagesize) but then tmp 3472 // was post-decremented.) Skip this address by starting at i=1, and 3473 // touch a few more pages below. N.B. It is important to touch all 3474 // the way down to and including i=StackShadowPages. 3475 for (int i = 1; i < JavaThread::stack_shadow_zone_size() / os::vm_page_size(); i++) { 3476 set((-i*offset)+STACK_BIAS, Rscratch); 3477 st(G0, Rtsp, Rscratch); 3478 } 3479 } 3480 3481 void MacroAssembler::reserved_stack_check() { 3482 // testing if reserved zone needs to be enabled 3483 Label no_reserved_zone_enabling; 3484 3485 ld_ptr(G2_thread, JavaThread::reserved_stack_activation_offset(), G4_scratch); 3486 cmp_and_brx_short(SP, G4_scratch, Assembler::lessUnsigned, Assembler::pt, no_reserved_zone_enabling); 3487 3488 call_VM_leaf(L0, CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), G2_thread); 3489 3490 AddressLiteral stub(StubRoutines::throw_delayed_StackOverflowError_entry()); 3491 jump_to(stub, G4_scratch); 3492 delayed()->restore(); 3493 3494 should_not_reach_here(); 3495 3496 bind(no_reserved_zone_enabling); 3497 } 3498 3499 /////////////////////////////////////////////////////////////////////////////////// 3500 #if INCLUDE_ALL_GCS 3501 3502 static address satb_log_enqueue_with_frame = NULL; 3503 static u_char* satb_log_enqueue_with_frame_end = NULL; 3504 3505 static address satb_log_enqueue_frameless = NULL; 3506 static u_char* satb_log_enqueue_frameless_end = NULL; 3507 3508 static int EnqueueCodeSize = 128 DEBUG_ONLY( + 256); // Instructions? 3509 3510 static void generate_satb_log_enqueue(bool with_frame) { 3511 BufferBlob* bb = BufferBlob::create("enqueue_with_frame", EnqueueCodeSize); 3512 CodeBuffer buf(bb); 3513 MacroAssembler masm(&buf); 3514 3515 #define __ masm. 3516 3517 address start = __ pc(); 3518 Register pre_val; 3519 3520 Label refill, restart; 3521 if (with_frame) { 3522 __ save_frame(0); 3523 pre_val = I0; // Was O0 before the save. 3524 } else { 3525 pre_val = O0; 3526 } 3527 3528 int satb_q_index_byte_offset = 3529 in_bytes(JavaThread::satb_mark_queue_offset() + 3530 SATBMarkQueue::byte_offset_of_index()); 3531 3532 int satb_q_buf_byte_offset = 3533 in_bytes(JavaThread::satb_mark_queue_offset() + 3534 SATBMarkQueue::byte_offset_of_buf()); 3535 3536 assert(in_bytes(SATBMarkQueue::byte_width_of_index()) == sizeof(intptr_t) && 3537 in_bytes(SATBMarkQueue::byte_width_of_buf()) == sizeof(intptr_t), 3538 "check sizes in assembly below"); 3539 3540 __ bind(restart); 3541 3542 // Load the index into the SATB buffer. SATBMarkQueue::_index is a size_t 3543 // so ld_ptr is appropriate. 3544 __ ld_ptr(G2_thread, satb_q_index_byte_offset, L0); 3545 3546 // index == 0? 3547 __ cmp_and_brx_short(L0, G0, Assembler::equal, Assembler::pn, refill); 3548 3549 __ ld_ptr(G2_thread, satb_q_buf_byte_offset, L1); 3550 __ sub(L0, oopSize, L0); 3551 3552 __ st_ptr(pre_val, L1, L0); // [_buf + index] := I0 3553 if (!with_frame) { 3554 // Use return-from-leaf 3555 __ retl(); 3556 __ delayed()->st_ptr(L0, G2_thread, satb_q_index_byte_offset); 3557 } else { 3558 // Not delayed. 3559 __ st_ptr(L0, G2_thread, satb_q_index_byte_offset); 3560 } 3561 if (with_frame) { 3562 __ ret(); 3563 __ delayed()->restore(); 3564 } 3565 __ bind(refill); 3566 3567 address handle_zero = 3568 CAST_FROM_FN_PTR(address, 3569 &SATBMarkQueueSet::handle_zero_index_for_thread); 3570 // This should be rare enough that we can afford to save all the 3571 // scratch registers that the calling context might be using. 3572 __ mov(G1_scratch, L0); 3573 __ mov(G3_scratch, L1); 3574 __ mov(G4, L2); 3575 // We need the value of O0 above (for the write into the buffer), so we 3576 // save and restore it. 3577 __ mov(O0, L3); 3578 // Since the call will overwrite O7, we save and restore that, as well. 3579 __ mov(O7, L4); 3580 __ call_VM_leaf(L5, handle_zero, G2_thread); 3581 __ mov(L0, G1_scratch); 3582 __ mov(L1, G3_scratch); 3583 __ mov(L2, G4); 3584 __ mov(L3, O0); 3585 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); 3586 __ delayed()->mov(L4, O7); 3587 3588 if (with_frame) { 3589 satb_log_enqueue_with_frame = start; 3590 satb_log_enqueue_with_frame_end = __ pc(); 3591 } else { 3592 satb_log_enqueue_frameless = start; 3593 satb_log_enqueue_frameless_end = __ pc(); 3594 } 3595 3596 #undef __ 3597 } 3598 3599 void MacroAssembler::g1_write_barrier_pre(Register obj, 3600 Register index, 3601 int offset, 3602 Register pre_val, 3603 Register tmp, 3604 bool preserve_o_regs) { 3605 Label filtered; 3606 3607 if (obj == noreg) { 3608 // We are not loading the previous value so make 3609 // sure that we don't trash the value in pre_val 3610 // with the code below. 3611 assert_different_registers(pre_val, tmp); 3612 } else { 3613 // We will be loading the previous value 3614 // in this code so... 3615 assert(offset == 0 || index == noreg, "choose one"); 3616 assert(pre_val == noreg, "check this code"); 3617 } 3618 3619 // Is marking active? 3620 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 3621 ld(G2, 3622 in_bytes(JavaThread::satb_mark_queue_offset() + 3623 SATBMarkQueue::byte_offset_of_active()), 3624 tmp); 3625 } else { 3626 guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, 3627 "Assumption"); 3628 ldsb(G2, 3629 in_bytes(JavaThread::satb_mark_queue_offset() + 3630 SATBMarkQueue::byte_offset_of_active()), 3631 tmp); 3632 } 3633 3634 // Is marking active? 3635 cmp_and_br_short(tmp, G0, Assembler::equal, Assembler::pt, filtered); 3636 3637 // Do we need to load the previous value? 3638 if (obj != noreg) { 3639 // Load the previous value... 3640 if (index == noreg) { 3641 if (Assembler::is_simm13(offset)) { 3642 load_heap_oop(obj, offset, tmp); 3643 } else { 3644 set(offset, tmp); 3645 load_heap_oop(obj, tmp, tmp); 3646 } 3647 } else { 3648 load_heap_oop(obj, index, tmp); 3649 } 3650 // Previous value has been loaded into tmp 3651 pre_val = tmp; 3652 } 3653 3654 assert(pre_val != noreg, "must have a real register"); 3655 3656 // Is the previous value null? 3657 cmp_and_brx_short(pre_val, G0, Assembler::equal, Assembler::pt, filtered); 3658 3659 // OK, it's not filtered, so we'll need to call enqueue. In the normal 3660 // case, pre_val will be a scratch G-reg, but there are some cases in 3661 // which it's an O-reg. In the first case, do a normal call. In the 3662 // latter, do a save here and call the frameless version. 3663 3664 guarantee(pre_val->is_global() || pre_val->is_out(), 3665 "Or we need to think harder."); 3666 3667 if (pre_val->is_global() && !preserve_o_regs) { 3668 call(satb_log_enqueue_with_frame); 3669 delayed()->mov(pre_val, O0); 3670 } else { 3671 save_frame(0); 3672 call(satb_log_enqueue_frameless); 3673 delayed()->mov(pre_val->after_save(), O0); 3674 restore(); 3675 } 3676 3677 bind(filtered); 3678 } 3679 3680 static address dirty_card_log_enqueue = 0; 3681 static u_char* dirty_card_log_enqueue_end = 0; 3682 3683 // This gets to assume that o0 contains the object address. 3684 static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) { 3685 BufferBlob* bb = BufferBlob::create("dirty_card_enqueue", EnqueueCodeSize*2); 3686 CodeBuffer buf(bb); 3687 MacroAssembler masm(&buf); 3688 #define __ masm. 3689 address start = __ pc(); 3690 3691 Label not_already_dirty, restart, refill, young_card; 3692 3693 __ srlx(O0, CardTableModRefBS::card_shift, O0); 3694 AddressLiteral addrlit(byte_map_base); 3695 __ set(addrlit, O1); // O1 := <card table base> 3696 __ ldub(O0, O1, O2); // O2 := [O0 + O1] 3697 3698 __ cmp_and_br_short(O2, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card); 3699 3700 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 3701 __ ldub(O0, O1, O2); // O2 := [O0 + O1] 3702 3703 assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code"); 3704 __ cmp_and_br_short(O2, G0, Assembler::notEqual, Assembler::pt, not_already_dirty); 3705 3706 __ bind(young_card); 3707 // We didn't take the branch, so we're already dirty: return. 3708 // Use return-from-leaf 3709 __ retl(); 3710 __ delayed()->nop(); 3711 3712 // Not dirty. 3713 __ bind(not_already_dirty); 3714 3715 // Get O0 + O1 into a reg by itself 3716 __ add(O0, O1, O3); 3717 3718 // First, dirty it. 3719 __ stb(G0, O3, G0); // [cardPtr] := 0 (i.e., dirty). 3720 3721 int dirty_card_q_index_byte_offset = 3722 in_bytes(JavaThread::dirty_card_queue_offset() + 3723 DirtyCardQueue::byte_offset_of_index()); 3724 int dirty_card_q_buf_byte_offset = 3725 in_bytes(JavaThread::dirty_card_queue_offset() + 3726 DirtyCardQueue::byte_offset_of_buf()); 3727 __ bind(restart); 3728 3729 // Load the index into the update buffer. DirtyCardQueue::_index is 3730 // a size_t so ld_ptr is appropriate here. 3731 __ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, L0); 3732 3733 // index == 0? 3734 __ cmp_and_brx_short(L0, G0, Assembler::equal, Assembler::pn, refill); 3735 3736 __ ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, L1); 3737 __ sub(L0, oopSize, L0); 3738 3739 __ st_ptr(O3, L1, L0); // [_buf + index] := I0 3740 // Use return-from-leaf 3741 __ retl(); 3742 __ delayed()->st_ptr(L0, G2_thread, dirty_card_q_index_byte_offset); 3743 3744 __ bind(refill); 3745 address handle_zero = 3746 CAST_FROM_FN_PTR(address, 3747 &DirtyCardQueueSet::handle_zero_index_for_thread); 3748 // This should be rare enough that we can afford to save all the 3749 // scratch registers that the calling context might be using. 3750 __ mov(G1_scratch, L3); 3751 __ mov(G3_scratch, L5); 3752 // We need the value of O3 above (for the write into the buffer), so we 3753 // save and restore it. 3754 __ mov(O3, L6); 3755 // Since the call will overwrite O7, we save and restore that, as well. 3756 __ mov(O7, L4); 3757 3758 __ call_VM_leaf(L7_thread_cache, handle_zero, G2_thread); 3759 __ mov(L3, G1_scratch); 3760 __ mov(L5, G3_scratch); 3761 __ mov(L6, O3); 3762 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); 3763 __ delayed()->mov(L4, O7); 3764 3765 dirty_card_log_enqueue = start; 3766 dirty_card_log_enqueue_end = __ pc(); 3767 // XXX Should have a guarantee here about not going off the end! 3768 // Does it already do so? Do an experiment... 3769 3770 #undef __ 3771 3772 } 3773 3774 void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val, Register tmp) { 3775 3776 Label filtered; 3777 MacroAssembler* post_filter_masm = this; 3778 3779 if (new_val == G0) return; 3780 3781 G1SATBCardTableLoggingModRefBS* bs = 3782 barrier_set_cast<G1SATBCardTableLoggingModRefBS>(Universe::heap()->barrier_set()); 3783 3784 if (G1RSBarrierRegionFilter) { 3785 xor3(store_addr, new_val, tmp); 3786 srlx(tmp, HeapRegion::LogOfHRGrainBytes, tmp); 3787 3788 // XXX Should I predict this taken or not? Does it matter? 3789 cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pt, filtered); 3790 } 3791 3792 // If the "store_addr" register is an "in" or "local" register, move it to 3793 // a scratch reg so we can pass it as an argument. 3794 bool use_scr = !(store_addr->is_global() || store_addr->is_out()); 3795 // Pick a scratch register different from "tmp". 3796 Register scr = (tmp == G1_scratch ? G3_scratch : G1_scratch); 3797 // Make sure we use up the delay slot! 3798 if (use_scr) { 3799 post_filter_masm->mov(store_addr, scr); 3800 } else { 3801 post_filter_masm->nop(); 3802 } 3803 save_frame(0); 3804 call(dirty_card_log_enqueue); 3805 if (use_scr) { 3806 delayed()->mov(scr, O0); 3807 } else { 3808 delayed()->mov(store_addr->after_save(), O0); 3809 } 3810 restore(); 3811 3812 bind(filtered); 3813 } 3814 3815 // Called from init_globals() after universe_init() and before interpreter_init() 3816 void g1_barrier_stubs_init() { 3817 CollectedHeap* heap = Universe::heap(); 3818 if (heap->kind() == CollectedHeap::G1CollectedHeap) { 3819 // Only needed for G1 3820 if (dirty_card_log_enqueue == 0) { 3821 G1SATBCardTableLoggingModRefBS* bs = 3822 barrier_set_cast<G1SATBCardTableLoggingModRefBS>(heap->barrier_set()); 3823 generate_dirty_card_log_enqueue(bs->byte_map_base); 3824 assert(dirty_card_log_enqueue != 0, "postcondition."); 3825 } 3826 if (satb_log_enqueue_with_frame == 0) { 3827 generate_satb_log_enqueue(true); 3828 assert(satb_log_enqueue_with_frame != 0, "postcondition."); 3829 } 3830 if (satb_log_enqueue_frameless == 0) { 3831 generate_satb_log_enqueue(false); 3832 assert(satb_log_enqueue_frameless != 0, "postcondition."); 3833 } 3834 } 3835 } 3836 3837 #endif // INCLUDE_ALL_GCS 3838 /////////////////////////////////////////////////////////////////////////////////// 3839 3840 void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_val, Register tmp) { 3841 // If we're writing constant NULL, we can skip the write barrier. 3842 if (new_val == G0) return; 3843 CardTableModRefBS* bs = 3844 barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set()); 3845 assert(bs->kind() == BarrierSet::CardTableForRS || 3846 bs->kind() == BarrierSet::CardTableExtension, "wrong barrier"); 3847 card_table_write(bs->byte_map_base, tmp, store_addr); 3848 } 3849 3850 // ((OopHandle)result).resolve(); 3851 void MacroAssembler::resolve_oop_handle(Register result) { 3852 // OopHandle::resolve is an indirection. 3853 ld_ptr(result, 0, result); 3854 } 3855 3856 void MacroAssembler::load_mirror(Register mirror, Register method) { 3857 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 3858 ld_ptr(method, in_bytes(Method::const_offset()), mirror); 3859 ld_ptr(mirror, in_bytes(ConstMethod::constants_offset()), mirror); 3860 ld_ptr(mirror, ConstantPool::pool_holder_offset_in_bytes(), mirror); 3861 ld_ptr(mirror, mirror_offset, mirror); 3862 resolve_oop_handle(mirror); 3863 } 3864 3865 void MacroAssembler::load_klass(Register src_oop, Register klass) { 3866 // The number of bytes in this code is used by 3867 // MachCallDynamicJavaNode::ret_addr_offset() 3868 // if this changes, change that. 3869 if (UseCompressedClassPointers) { 3870 lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass); 3871 decode_klass_not_null(klass); 3872 } else { 3873 ld_ptr(src_oop, oopDesc::klass_offset_in_bytes(), klass); 3874 } 3875 } 3876 3877 void MacroAssembler::store_klass(Register klass, Register dst_oop) { 3878 if (UseCompressedClassPointers) { 3879 assert(dst_oop != klass, "not enough registers"); 3880 encode_klass_not_null(klass); 3881 st(klass, dst_oop, oopDesc::klass_offset_in_bytes()); 3882 } else { 3883 st_ptr(klass, dst_oop, oopDesc::klass_offset_in_bytes()); 3884 } 3885 } 3886 3887 void MacroAssembler::store_klass_gap(Register s, Register d) { 3888 if (UseCompressedClassPointers) { 3889 assert(s != d, "not enough registers"); 3890 st(s, d, oopDesc::klass_gap_offset_in_bytes()); 3891 } 3892 } 3893 3894 void MacroAssembler::load_heap_oop(const Address& s, Register d) { 3895 if (UseCompressedOops) { 3896 lduw(s, d); 3897 decode_heap_oop(d); 3898 } else { 3899 ld_ptr(s, d); 3900 } 3901 } 3902 3903 void MacroAssembler::load_heap_oop(Register s1, Register s2, Register d) { 3904 if (UseCompressedOops) { 3905 lduw(s1, s2, d); 3906 decode_heap_oop(d, d); 3907 } else { 3908 ld_ptr(s1, s2, d); 3909 } 3910 } 3911 3912 void MacroAssembler::load_heap_oop(Register s1, int simm13a, Register d) { 3913 if (UseCompressedOops) { 3914 lduw(s1, simm13a, d); 3915 decode_heap_oop(d, d); 3916 } else { 3917 ld_ptr(s1, simm13a, d); 3918 } 3919 } 3920 3921 void MacroAssembler::load_heap_oop(Register s1, RegisterOrConstant s2, Register d) { 3922 if (s2.is_constant()) load_heap_oop(s1, s2.as_constant(), d); 3923 else load_heap_oop(s1, s2.as_register(), d); 3924 } 3925 3926 void MacroAssembler::store_heap_oop(Register d, Register s1, Register s2) { 3927 if (UseCompressedOops) { 3928 assert(s1 != d && s2 != d, "not enough registers"); 3929 encode_heap_oop(d); 3930 st(d, s1, s2); 3931 } else { 3932 st_ptr(d, s1, s2); 3933 } 3934 } 3935 3936 void MacroAssembler::store_heap_oop(Register d, Register s1, int simm13a) { 3937 if (UseCompressedOops) { 3938 assert(s1 != d, "not enough registers"); 3939 encode_heap_oop(d); 3940 st(d, s1, simm13a); 3941 } else { 3942 st_ptr(d, s1, simm13a); 3943 } 3944 } 3945 3946 void MacroAssembler::store_heap_oop(Register d, const Address& a, int offset) { 3947 if (UseCompressedOops) { 3948 assert(a.base() != d, "not enough registers"); 3949 encode_heap_oop(d); 3950 st(d, a, offset); 3951 } else { 3952 st_ptr(d, a, offset); 3953 } 3954 } 3955 3956 3957 void MacroAssembler::encode_heap_oop(Register src, Register dst) { 3958 assert (UseCompressedOops, "must be compressed"); 3959 assert (Universe::heap() != NULL, "java heap should be initialized"); 3960 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3961 verify_oop(src); 3962 if (Universe::narrow_oop_base() == NULL) { 3963 srlx(src, LogMinObjAlignmentInBytes, dst); 3964 return; 3965 } 3966 Label done; 3967 if (src == dst) { 3968 // optimize for frequent case src == dst 3969 bpr(rc_nz, true, Assembler::pt, src, done); 3970 delayed() -> sub(src, G6_heapbase, dst); // annuled if not taken 3971 bind(done); 3972 srlx(src, LogMinObjAlignmentInBytes, dst); 3973 } else { 3974 bpr(rc_z, false, Assembler::pn, src, done); 3975 delayed() -> mov(G0, dst); 3976 // could be moved before branch, and annulate delay, 3977 // but may add some unneeded work decoding null 3978 sub(src, G6_heapbase, dst); 3979 srlx(dst, LogMinObjAlignmentInBytes, dst); 3980 bind(done); 3981 } 3982 } 3983 3984 3985 void MacroAssembler::encode_heap_oop_not_null(Register r) { 3986 assert (UseCompressedOops, "must be compressed"); 3987 assert (Universe::heap() != NULL, "java heap should be initialized"); 3988 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3989 verify_oop(r); 3990 if (Universe::narrow_oop_base() != NULL) 3991 sub(r, G6_heapbase, r); 3992 srlx(r, LogMinObjAlignmentInBytes, r); 3993 } 3994 3995 void MacroAssembler::encode_heap_oop_not_null(Register src, Register dst) { 3996 assert (UseCompressedOops, "must be compressed"); 3997 assert (Universe::heap() != NULL, "java heap should be initialized"); 3998 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 3999 verify_oop(src); 4000 if (Universe::narrow_oop_base() == NULL) { 4001 srlx(src, LogMinObjAlignmentInBytes, dst); 4002 } else { 4003 sub(src, G6_heapbase, dst); 4004 srlx(dst, LogMinObjAlignmentInBytes, dst); 4005 } 4006 } 4007 4008 // Same algorithm as oops.inline.hpp decode_heap_oop. 4009 void MacroAssembler::decode_heap_oop(Register src, Register dst) { 4010 assert (UseCompressedOops, "must be compressed"); 4011 assert (Universe::heap() != NULL, "java heap should be initialized"); 4012 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4013 sllx(src, LogMinObjAlignmentInBytes, dst); 4014 if (Universe::narrow_oop_base() != NULL) { 4015 Label done; 4016 bpr(rc_nz, true, Assembler::pt, dst, done); 4017 delayed() -> add(dst, G6_heapbase, dst); // annuled if not taken 4018 bind(done); 4019 } 4020 verify_oop(dst); 4021 } 4022 4023 void MacroAssembler::decode_heap_oop_not_null(Register r) { 4024 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 4025 // pd_code_size_limit. 4026 // Also do not verify_oop as this is called by verify_oop. 4027 assert (UseCompressedOops, "must be compressed"); 4028 assert (Universe::heap() != NULL, "java heap should be initialized"); 4029 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4030 sllx(r, LogMinObjAlignmentInBytes, r); 4031 if (Universe::narrow_oop_base() != NULL) 4032 add(r, G6_heapbase, r); 4033 } 4034 4035 void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) { 4036 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 4037 // pd_code_size_limit. 4038 // Also do not verify_oop as this is called by verify_oop. 4039 assert (UseCompressedOops, "must be compressed"); 4040 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4041 sllx(src, LogMinObjAlignmentInBytes, dst); 4042 if (Universe::narrow_oop_base() != NULL) 4043 add(dst, G6_heapbase, dst); 4044 } 4045 4046 void MacroAssembler::encode_klass_not_null(Register r) { 4047 assert (UseCompressedClassPointers, "must be compressed"); 4048 if (Universe::narrow_klass_base() != NULL) { 4049 assert(r != G6_heapbase, "bad register choice"); 4050 set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); 4051 sub(r, G6_heapbase, r); 4052 if (Universe::narrow_klass_shift() != 0) { 4053 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 4054 srlx(r, LogKlassAlignmentInBytes, r); 4055 } 4056 reinit_heapbase(); 4057 } else { 4058 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 4059 srlx(r, Universe::narrow_klass_shift(), r); 4060 } 4061 } 4062 4063 void MacroAssembler::encode_klass_not_null(Register src, Register dst) { 4064 if (src == dst) { 4065 encode_klass_not_null(src); 4066 } else { 4067 assert (UseCompressedClassPointers, "must be compressed"); 4068 if (Universe::narrow_klass_base() != NULL) { 4069 set((intptr_t)Universe::narrow_klass_base(), dst); 4070 sub(src, dst, dst); 4071 if (Universe::narrow_klass_shift() != 0) { 4072 srlx(dst, LogKlassAlignmentInBytes, dst); 4073 } 4074 } else { 4075 // shift src into dst 4076 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 4077 srlx(src, Universe::narrow_klass_shift(), dst); 4078 } 4079 } 4080 } 4081 4082 // Function instr_size_for_decode_klass_not_null() counts the instructions 4083 // generated by decode_klass_not_null() and reinit_heapbase(). Hence, if 4084 // the instructions they generate change, then this method needs to be updated. 4085 int MacroAssembler::instr_size_for_decode_klass_not_null() { 4086 assert (UseCompressedClassPointers, "only for compressed klass ptrs"); 4087 int num_instrs = 1; // shift src,dst or add 4088 if (Universe::narrow_klass_base() != NULL) { 4089 // set + add + set 4090 num_instrs += insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) + 4091 insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base()); 4092 if (Universe::narrow_klass_shift() != 0) { 4093 num_instrs += 1; // sllx 4094 } 4095 } 4096 return num_instrs * BytesPerInstWord; 4097 } 4098 4099 // !!! If the instructions that get generated here change then function 4100 // instr_size_for_decode_klass_not_null() needs to get updated. 4101 void MacroAssembler::decode_klass_not_null(Register r) { 4102 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 4103 // pd_code_size_limit. 4104 assert (UseCompressedClassPointers, "must be compressed"); 4105 if (Universe::narrow_klass_base() != NULL) { 4106 assert(r != G6_heapbase, "bad register choice"); 4107 set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); 4108 if (Universe::narrow_klass_shift() != 0) 4109 sllx(r, LogKlassAlignmentInBytes, r); 4110 add(r, G6_heapbase, r); 4111 reinit_heapbase(); 4112 } else { 4113 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 4114 sllx(r, Universe::narrow_klass_shift(), r); 4115 } 4116 } 4117 4118 void MacroAssembler::decode_klass_not_null(Register src, Register dst) { 4119 if (src == dst) { 4120 decode_klass_not_null(src); 4121 } else { 4122 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 4123 // pd_code_size_limit. 4124 assert (UseCompressedClassPointers, "must be compressed"); 4125 if (Universe::narrow_klass_base() != NULL) { 4126 if (Universe::narrow_klass_shift() != 0) { 4127 assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice"); 4128 set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); 4129 sllx(src, LogKlassAlignmentInBytes, dst); 4130 add(dst, G6_heapbase, dst); 4131 reinit_heapbase(); 4132 } else { 4133 set((intptr_t)Universe::narrow_klass_base(), dst); 4134 add(src, dst, dst); 4135 } 4136 } else { 4137 // shift/mov src into dst. 4138 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); 4139 sllx(src, Universe::narrow_klass_shift(), dst); 4140 } 4141 } 4142 } 4143 4144 void MacroAssembler::reinit_heapbase() { 4145 if (UseCompressedOops || UseCompressedClassPointers) { 4146 if (Universe::heap() != NULL) { 4147 set((intptr_t)Universe::narrow_ptrs_base(), G6_heapbase); 4148 } else { 4149 AddressLiteral base(Universe::narrow_ptrs_base_addr()); 4150 load_ptr_contents(base, G6_heapbase); 4151 } 4152 } 4153 } 4154 4155 #ifdef COMPILER2 4156 4157 // Compress char[] to byte[] by compressing 16 bytes at once. Return 0 on failure. 4158 void MacroAssembler::string_compress_16(Register src, Register dst, Register cnt, Register result, 4159 Register tmp1, Register tmp2, Register tmp3, Register tmp4, 4160 FloatRegister ftmp1, FloatRegister ftmp2, FloatRegister ftmp3, Label& Ldone) { 4161 Label Lloop, Lslow; 4162 assert(UseVIS >= 3, "VIS3 is required"); 4163 assert_different_registers(src, dst, cnt, tmp1, tmp2, tmp3, tmp4, result); 4164 assert_different_registers(ftmp1, ftmp2, ftmp3); 4165 4166 // Check if cnt >= 8 (= 16 bytes) 4167 cmp(cnt, 8); 4168 br(Assembler::less, false, Assembler::pn, Lslow); 4169 delayed()->mov(cnt, result); // copy count 4170 4171 // Check for 8-byte alignment of src and dst 4172 or3(src, dst, tmp1); 4173 andcc(tmp1, 7, G0); 4174 br(Assembler::notZero, false, Assembler::pn, Lslow); 4175 delayed()->nop(); 4176 4177 // Set mask for bshuffle instruction 4178 Register mask = tmp4; 4179 set(0x13579bdf, mask); 4180 bmask(mask, G0, G0); 4181 4182 // Set mask to 0xff00 ff00 ff00 ff00 to check for non-latin1 characters 4183 Assembler::sethi(0xff00fc00, mask); // mask = 0x0000 0000 ff00 fc00 4184 add(mask, 0x300, mask); // mask = 0x0000 0000 ff00 ff00 4185 sllx(mask, 32, tmp1); // tmp1 = 0xff00 ff00 0000 0000 4186 or3(mask, tmp1, mask); // mask = 0xff00 ff00 ff00 ff00 4187 4188 // Load first 8 bytes 4189 ldx(src, 0, tmp1); 4190 4191 bind(Lloop); 4192 // Load next 8 bytes 4193 ldx(src, 8, tmp2); 4194 4195 // Check for non-latin1 character by testing if the most significant byte of a char is set. 4196 // Although we have to move the data between integer and floating point registers, this is 4197 // still faster than the corresponding VIS instructions (ford/fand/fcmpd). 4198 or3(tmp1, tmp2, tmp3); 4199 btst(tmp3, mask); 4200 // annul zeroing if branch is not taken to preserve original count 4201 brx(Assembler::notZero, true, Assembler::pn, Ldone); 4202 delayed()->mov(G0, result); // 0 - failed 4203 4204 // Move bytes into float register 4205 movxtod(tmp1, ftmp1); 4206 movxtod(tmp2, ftmp2); 4207 4208 // Compress by copying one byte per char from ftmp1 and ftmp2 to ftmp3 4209 bshuffle(ftmp1, ftmp2, ftmp3); 4210 stf(FloatRegisterImpl::D, ftmp3, dst, 0); 4211 4212 // Increment addresses and decrement count 4213 inc(src, 16); 4214 inc(dst, 8); 4215 dec(cnt, 8); 4216 4217 cmp(cnt, 8); 4218 // annul LDX if branch is not taken to prevent access past end of string 4219 br(Assembler::greaterEqual, true, Assembler::pt, Lloop); 4220 delayed()->ldx(src, 0, tmp1); 4221 4222 // Fallback to slow version 4223 bind(Lslow); 4224 } 4225 4226 // Compress char[] to byte[]. Return 0 on failure. 4227 void MacroAssembler::string_compress(Register src, Register dst, Register cnt, Register result, Register tmp, Label& Ldone) { 4228 Label Lloop; 4229 assert_different_registers(src, dst, cnt, tmp, result); 4230 4231 lduh(src, 0, tmp); 4232 4233 bind(Lloop); 4234 inc(src, sizeof(jchar)); 4235 cmp(tmp, 0xff); 4236 // annul zeroing if branch is not taken to preserve original count 4237 br(Assembler::greater, true, Assembler::pn, Ldone); // don't check xcc 4238 delayed()->mov(G0, result); // 0 - failed 4239 deccc(cnt); 4240 stb(tmp, dst, 0); 4241 inc(dst); 4242 // annul LDUH if branch is not taken to prevent access past end of string 4243 br(Assembler::notZero, true, Assembler::pt, Lloop); 4244 delayed()->lduh(src, 0, tmp); // hoisted 4245 } 4246 4247 // Inflate byte[] to char[] by inflating 16 bytes at once. 4248 void MacroAssembler::string_inflate_16(Register src, Register dst, Register cnt, Register tmp, 4249 FloatRegister ftmp1, FloatRegister ftmp2, FloatRegister ftmp3, FloatRegister ftmp4, Label& Ldone) { 4250 Label Lloop, Lslow; 4251 assert(UseVIS >= 3, "VIS3 is required"); 4252 assert_different_registers(src, dst, cnt, tmp); 4253 assert_different_registers(ftmp1, ftmp2, ftmp3, ftmp4); 4254 4255 // Check if cnt >= 8 (= 16 bytes) 4256 cmp(cnt, 8); 4257 br(Assembler::less, false, Assembler::pn, Lslow); 4258 delayed()->nop(); 4259 4260 // Check for 8-byte alignment of src and dst 4261 or3(src, dst, tmp); 4262 andcc(tmp, 7, G0); 4263 br(Assembler::notZero, false, Assembler::pn, Lslow); 4264 // Initialize float register to zero 4265 FloatRegister zerof = ftmp4; 4266 delayed()->fzero(FloatRegisterImpl::D, zerof); 4267 4268 // Load first 8 bytes 4269 ldf(FloatRegisterImpl::D, src, 0, ftmp1); 4270 4271 bind(Lloop); 4272 inc(src, 8); 4273 dec(cnt, 8); 4274 4275 // Inflate the string by interleaving each byte from the source array 4276 // with a zero byte and storing the result in the destination array. 4277 fpmerge(zerof, ftmp1->successor(), ftmp2); 4278 stf(FloatRegisterImpl::D, ftmp2, dst, 8); 4279 fpmerge(zerof, ftmp1, ftmp3); 4280 stf(FloatRegisterImpl::D, ftmp3, dst, 0); 4281 4282 inc(dst, 16); 4283 4284 cmp(cnt, 8); 4285 // annul LDX if branch is not taken to prevent access past end of string 4286 br(Assembler::greaterEqual, true, Assembler::pt, Lloop); 4287 delayed()->ldf(FloatRegisterImpl::D, src, 0, ftmp1); 4288 4289 // Fallback to slow version 4290 bind(Lslow); 4291 } 4292 4293 // Inflate byte[] to char[]. 4294 void MacroAssembler::string_inflate(Register src, Register dst, Register cnt, Register tmp, Label& Ldone) { 4295 Label Loop; 4296 assert_different_registers(src, dst, cnt, tmp); 4297 4298 ldub(src, 0, tmp); 4299 bind(Loop); 4300 inc(src); 4301 deccc(cnt); 4302 sth(tmp, dst, 0); 4303 inc(dst, sizeof(jchar)); 4304 // annul LDUB if branch is not taken to prevent access past end of string 4305 br(Assembler::notZero, true, Assembler::pt, Loop); 4306 delayed()->ldub(src, 0, tmp); // hoisted 4307 } 4308 4309 void MacroAssembler::string_compare(Register str1, Register str2, 4310 Register cnt1, Register cnt2, 4311 Register tmp1, Register tmp2, 4312 Register result, int ae) { 4313 Label Ldone, Lloop; 4314 assert_different_registers(str1, str2, cnt1, cnt2, tmp1, result); 4315 int stride1, stride2; 4316 4317 // Note: Making use of the fact that compareTo(a, b) == -compareTo(b, a) 4318 // we interchange str1 and str2 in the UL case and negate the result. 4319 // Like this, str1 is always latin1 encoded, expect for the UU case. 4320 4321 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 4322 srl(cnt2, 1, cnt2); 4323 } 4324 4325 // See if the lengths are different, and calculate min in cnt1. 4326 // Save diff in case we need it for a tie-breaker. 4327 Label Lskip; 4328 Register diff = tmp1; 4329 subcc(cnt1, cnt2, diff); 4330 br(Assembler::greater, true, Assembler::pt, Lskip); 4331 // cnt2 is shorter, so use its count: 4332 delayed()->mov(cnt2, cnt1); 4333 bind(Lskip); 4334 4335 // Rename registers 4336 Register limit1 = cnt1; 4337 Register limit2 = limit1; 4338 Register chr1 = result; 4339 Register chr2 = cnt2; 4340 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 4341 // We need an additional register to keep track of two limits 4342 assert_different_registers(str1, str2, cnt1, cnt2, tmp1, tmp2, result); 4343 limit2 = tmp2; 4344 } 4345 4346 // Is the minimum length zero? 4347 cmp(limit1, (int)0); // use cast to resolve overloading ambiguity 4348 br(Assembler::equal, true, Assembler::pn, Ldone); 4349 // result is difference in lengths 4350 if (ae == StrIntrinsicNode::UU) { 4351 delayed()->sra(diff, 1, result); // Divide by 2 to get number of chars 4352 } else { 4353 delayed()->mov(diff, result); 4354 } 4355 4356 // Load first characters 4357 if (ae == StrIntrinsicNode::LL) { 4358 stride1 = stride2 = sizeof(jbyte); 4359 ldub(str1, 0, chr1); 4360 ldub(str2, 0, chr2); 4361 } else if (ae == StrIntrinsicNode::UU) { 4362 stride1 = stride2 = sizeof(jchar); 4363 lduh(str1, 0, chr1); 4364 lduh(str2, 0, chr2); 4365 } else { 4366 stride1 = sizeof(jbyte); 4367 stride2 = sizeof(jchar); 4368 ldub(str1, 0, chr1); 4369 lduh(str2, 0, chr2); 4370 } 4371 4372 // Compare first characters 4373 subcc(chr1, chr2, chr1); 4374 br(Assembler::notZero, false, Assembler::pt, Ldone); 4375 assert(chr1 == result, "result must be pre-placed"); 4376 delayed()->nop(); 4377 4378 // Check if the strings start at same location 4379 cmp(str1, str2); 4380 brx(Assembler::equal, true, Assembler::pn, Ldone); 4381 delayed()->mov(G0, result); // result is zero 4382 4383 // We have no guarantee that on 64 bit the higher half of limit is 0 4384 signx(limit1); 4385 4386 // Get limit 4387 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 4388 sll(limit1, 1, limit2); 4389 subcc(limit2, stride2, chr2); 4390 } 4391 subcc(limit1, stride1, chr1); 4392 br(Assembler::zero, true, Assembler::pn, Ldone); 4393 // result is difference in lengths 4394 if (ae == StrIntrinsicNode::UU) { 4395 delayed()->sra(diff, 1, result); // Divide by 2 to get number of chars 4396 } else { 4397 delayed()->mov(diff, result); 4398 } 4399 4400 // Shift str1 and str2 to the end of the arrays, negate limit 4401 add(str1, limit1, str1); 4402 add(str2, limit2, str2); 4403 neg(chr1, limit1); // limit1 = -(limit1-stride1) 4404 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 4405 neg(chr2, limit2); // limit2 = -(limit2-stride2) 4406 } 4407 4408 // Compare the rest of the characters 4409 load_sized_value(Address(str1, limit1), chr1, (ae == StrIntrinsicNode::UU) ? 2 : 1, false); 4410 4411 bind(Lloop); 4412 load_sized_value(Address(str2, limit2), chr2, (ae == StrIntrinsicNode::LL) ? 1 : 2, false); 4413 4414 subcc(chr1, chr2, chr1); 4415 br(Assembler::notZero, false, Assembler::pt, Ldone); 4416 assert(chr1 == result, "result must be pre-placed"); 4417 delayed()->inccc(limit1, stride1); 4418 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 4419 inccc(limit2, stride2); 4420 } 4421 4422 // annul LDUB if branch is not taken to prevent access past end of string 4423 br(Assembler::notZero, true, Assembler::pt, Lloop); 4424 delayed()->load_sized_value(Address(str1, limit1), chr1, (ae == StrIntrinsicNode::UU) ? 2 : 1, false); 4425 4426 // If strings are equal up to min length, return the length difference. 4427 if (ae == StrIntrinsicNode::UU) { 4428 // Divide by 2 to get number of chars 4429 sra(diff, 1, result); 4430 } else { 4431 mov(diff, result); 4432 } 4433 4434 // Otherwise, return the difference between the first mismatched chars. 4435 bind(Ldone); 4436 if(ae == StrIntrinsicNode::UL) { 4437 // Negate result (see note above) 4438 neg(result); 4439 } 4440 } 4441 4442 void MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register ary2, 4443 Register limit, Register tmp, Register result, bool is_byte) { 4444 Label Ldone, Lloop, Lremaining; 4445 assert_different_registers(ary1, ary2, limit, tmp, result); 4446 4447 int length_offset = arrayOopDesc::length_offset_in_bytes(); 4448 int base_offset = arrayOopDesc::base_offset_in_bytes(is_byte ? T_BYTE : T_CHAR); 4449 assert(base_offset % 8 == 0, "Base offset must be 8-byte aligned"); 4450 4451 if (is_array_equ) { 4452 // return true if the same array 4453 cmp(ary1, ary2); 4454 brx(Assembler::equal, true, Assembler::pn, Ldone); 4455 delayed()->mov(1, result); // equal 4456 4457 br_null(ary1, true, Assembler::pn, Ldone); 4458 delayed()->clr(result); // not equal 4459 4460 br_null(ary2, true, Assembler::pn, Ldone); 4461 delayed()->clr(result); // not equal 4462 4463 // load the lengths of arrays 4464 ld(Address(ary1, length_offset), limit); 4465 ld(Address(ary2, length_offset), tmp); 4466 4467 // return false if the two arrays are not equal length 4468 cmp(limit, tmp); 4469 br(Assembler::notEqual, true, Assembler::pn, Ldone); 4470 delayed()->clr(result); // not equal 4471 } 4472 4473 cmp_zero_and_br(Assembler::zero, limit, Ldone, true, Assembler::pn); 4474 delayed()->mov(1, result); // zero-length arrays are equal 4475 4476 if (is_array_equ) { 4477 // load array addresses 4478 add(ary1, base_offset, ary1); 4479 add(ary2, base_offset, ary2); 4480 // set byte count 4481 if (!is_byte) { 4482 sll(limit, exact_log2(sizeof(jchar)), limit); 4483 } 4484 } else { 4485 // We have no guarantee that on 64 bit the higher half of limit is 0 4486 signx(limit); 4487 } 4488 4489 #ifdef ASSERT 4490 // Sanity check for doubleword (8-byte) alignment of ary1 and ary2. 4491 // Guaranteed on 64-bit systems (see arrayOopDesc::header_size_in_bytes()). 4492 Label Laligned; 4493 or3(ary1, ary2, tmp); 4494 andcc(tmp, 7, tmp); 4495 br_null_short(tmp, Assembler::pn, Laligned); 4496 STOP("First array element is not 8-byte aligned."); 4497 should_not_reach_here(); 4498 bind(Laligned); 4499 #endif 4500 4501 // Shift ary1 and ary2 to the end of the arrays, negate limit 4502 add(ary1, limit, ary1); 4503 add(ary2, limit, ary2); 4504 neg(limit, limit); 4505 4506 // MAIN LOOP 4507 // Load and compare array elements of size 'byte_width' until the elements are not 4508 // equal or we reached the end of the arrays. If the size of the arrays is not a 4509 // multiple of 'byte_width', we simply read over the end of the array, bail out and 4510 // compare the remaining bytes below by skipping the garbage bytes. 4511 ldx(ary1, limit, result); 4512 bind(Lloop); 4513 ldx(ary2, limit, tmp); 4514 inccc(limit, 8); 4515 // Bail out if we reached the end (but still do the comparison) 4516 br(Assembler::positive, false, Assembler::pn, Lremaining); 4517 delayed()->cmp(result, tmp); 4518 // Check equality of elements 4519 brx(Assembler::equal, false, Assembler::pt, target(Lloop)); 4520 delayed()->ldx(ary1, limit, result); 4521 4522 ba(Ldone); 4523 delayed()->clr(result); // not equal 4524 4525 // TAIL COMPARISON 4526 // We got here because we reached the end of the arrays. 'limit' is the number of 4527 // garbage bytes we may have compared by reading over the end of the arrays. Shift 4528 // out the garbage and compare the remaining elements. 4529 bind(Lremaining); 4530 // Optimistic shortcut: elements potentially including garbage are equal 4531 brx(Assembler::equal, true, Assembler::pt, target(Ldone)); 4532 delayed()->mov(1, result); // equal 4533 // Shift 'limit' bytes to the right and compare 4534 sll(limit, 3, limit); // bytes to bits 4535 srlx(result, limit, result); 4536 srlx(tmp, limit, tmp); 4537 cmp(result, tmp); 4538 clr(result); 4539 movcc(Assembler::equal, false, xcc, 1, result); 4540 4541 bind(Ldone); 4542 } 4543 4544 void MacroAssembler::has_negatives(Register inp, Register size, Register result, Register t2, Register t3, Register t4, Register t5) { 4545 4546 // test for negative bytes in input string of a given size 4547 // result 1 if found, 0 otherwise. 4548 4549 Label Lcore, Ltail, Lreturn, Lcore_rpt; 4550 4551 assert_different_registers(inp, size, t2, t3, t4, t5, result); 4552 4553 Register i = result; // result used as integer index i until very end 4554 Register lmask = t2; // t2 is aliased to lmask 4555 4556 // INITIALIZATION 4557 // =========================================================== 4558 // initialize highbits mask -> lmask = 0x8080808080808080 (8B/64b) 4559 // compute unaligned offset -> i 4560 // compute core end index -> t5 4561 Assembler::sethi(0x80808000, t2); //! sethi macro fails to emit optimal 4562 add(t2, 0x80, t2); 4563 sllx(t2, 32, t3); 4564 or3(t3, t2, lmask); // 0x8080808080808080 -> lmask 4565 sra(size,0,size); 4566 andcc(inp, 0x7, i); // unaligned offset -> i 4567 br(Assembler::zero, true, Assembler::pn, Lcore); // starts 8B aligned? 4568 delayed()->add(size, -8, t5); // (annuled) core end index -> t5 4569 4570 // =========================================================== 4571 4572 // UNALIGNED HEAD 4573 // =========================================================== 4574 // * unaligned head handling: grab aligned 8B containing unaligned inp(ut) 4575 // * obliterate (ignore) bytes outside string by shifting off reg ends 4576 // * compare with bitmask, short circuit return true if one or more high 4577 // bits set. 4578 cmp(size, 0); 4579 br(Assembler::zero, true, Assembler::pn, Lreturn); // short-circuit? 4580 delayed()->mov(0,result); // annuled so i not clobbered for following 4581 neg(i, t4); 4582 add(i, size, t5); 4583 ldx(inp, t4, t3); // raw aligned 8B containing unaligned head -> t3 4584 mov(8, t4); 4585 sub(t4, t5, t4); 4586 sra(t4, 31, t5); 4587 andn(t4, t5, t5); 4588 add(i, t5, t4); 4589 sll(t5, 3, t5); 4590 sll(t4, 3, t4); // # bits to shift right, left -> t5,t4 4591 srlx(t3, t5, t3); 4592 sllx(t3, t4, t3); // bytes outside string in 8B header obliterated -> t3 4593 andcc(lmask, t3, G0); 4594 brx(Assembler::notZero, true, Assembler::pn, Lreturn); // short circuit? 4595 delayed()->mov(1,result); // annuled so i not clobbered for following 4596 add(size, -8, t5); // core end index -> t5 4597 mov(8, t4); 4598 sub(t4, i, i); // # bytes examined in unalgn head (<8) -> i 4599 // =========================================================== 4600 4601 // ALIGNED CORE 4602 // =========================================================== 4603 // * iterate index i over aligned 8B sections of core, comparing with 4604 // bitmask, short circuit return true if one or more high bits set 4605 // t5 contains core end index/loop limit which is the index 4606 // of the MSB of last (unaligned) 8B fully contained in the string. 4607 // inp contains address of first byte in string/array 4608 // lmask contains 8B high bit mask for comparison 4609 // i contains next index to be processed (adr. inp+i is on 8B boundary) 4610 bind(Lcore); 4611 cmp_and_br_short(i, t5, Assembler::greater, Assembler::pn, Ltail); 4612 bind(Lcore_rpt); 4613 ldx(inp, i, t3); 4614 andcc(t3, lmask, G0); 4615 brx(Assembler::notZero, true, Assembler::pn, Lreturn); 4616 delayed()->mov(1, result); // annuled so i not clobbered for following 4617 add(i, 8, i); 4618 cmp_and_br_short(i, t5, Assembler::lessEqual, Assembler::pn, Lcore_rpt); 4619 // =========================================================== 4620 4621 // ALIGNED TAIL (<8B) 4622 // =========================================================== 4623 // handle aligned tail of 7B or less as complete 8B, obliterating end of 4624 // string bytes by shifting them off end, compare what's left with bitmask 4625 // inp contains address of first byte in string/array 4626 // lmask contains 8B high bit mask for comparison 4627 // i contains next index to be processed (adr. inp+i is on 8B boundary) 4628 bind(Ltail); 4629 subcc(size, i, t4); // # of remaining bytes in string -> t4 4630 // return 0 if no more remaining bytes 4631 br(Assembler::lessEqual, true, Assembler::pn, Lreturn); 4632 delayed()->mov(0, result); // annuled so i not clobbered for following 4633 ldx(inp, i, t3); // load final 8B (aligned) containing tail -> t3 4634 mov(8, t5); 4635 sub(t5, t4, t4); 4636 mov(0, result); // ** i clobbered at this point 4637 sll(t4, 3, t4); // bits beyond end of string -> t4 4638 srlx(t3, t4, t3); // bytes beyond end now obliterated -> t3 4639 andcc(lmask, t3, G0); 4640 movcc(Assembler::notZero, false, xcc, 1, result); 4641 bind(Lreturn); 4642 } 4643 4644 #endif 4645 4646 4647 // Use BIS for zeroing (count is in bytes). 4648 void MacroAssembler::bis_zeroing(Register to, Register count, Register temp, Label& Ldone) { 4649 assert(UseBlockZeroing && VM_Version::has_blk_zeroing(), "only works with BIS zeroing"); 4650 Register end = count; 4651 int cache_line_size = VM_Version::prefetch_data_size(); 4652 assert(cache_line_size > 0, "cache line size should be known for this code"); 4653 // Minimum count when BIS zeroing can be used since 4654 // it needs membar which is expensive. 4655 int block_zero_size = MAX2(cache_line_size*3, (int)BlockZeroingLowLimit); 4656 4657 Label small_loop; 4658 // Check if count is negative (dead code) or zero. 4659 // Note, count uses 64bit in 64 bit VM. 4660 cmp_and_brx_short(count, 0, Assembler::lessEqual, Assembler::pn, Ldone); 4661 4662 // Use BIS zeroing only for big arrays since it requires membar. 4663 if (Assembler::is_simm13(block_zero_size)) { // < 4096 4664 cmp(count, block_zero_size); 4665 } else { 4666 set(block_zero_size, temp); 4667 cmp(count, temp); 4668 } 4669 br(Assembler::lessUnsigned, false, Assembler::pt, small_loop); 4670 delayed()->add(to, count, end); 4671 4672 // Note: size is >= three (32 bytes) cache lines. 4673 4674 // Clean the beginning of space up to next cache line. 4675 for (int offs = 0; offs < cache_line_size; offs += 8) { 4676 stx(G0, to, offs); 4677 } 4678 4679 // align to next cache line 4680 add(to, cache_line_size, to); 4681 and3(to, -cache_line_size, to); 4682 4683 // Note: size left >= two (32 bytes) cache lines. 4684 4685 // BIS should not be used to zero tail (64 bytes) 4686 // to avoid zeroing a header of the following object. 4687 sub(end, (cache_line_size*2)-8, end); 4688 4689 Label bis_loop; 4690 bind(bis_loop); 4691 stxa(G0, to, G0, Assembler::ASI_ST_BLKINIT_PRIMARY); 4692 add(to, cache_line_size, to); 4693 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, bis_loop); 4694 4695 // BIS needs membar. 4696 membar(Assembler::StoreLoad); 4697 4698 add(end, (cache_line_size*2)-8, end); // restore end 4699 cmp_and_brx_short(to, end, Assembler::greaterEqualUnsigned, Assembler::pn, Ldone); 4700 4701 // Clean the tail. 4702 bind(small_loop); 4703 stx(G0, to, 0); 4704 add(to, 8, to); 4705 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, small_loop); 4706 nop(); // Separate short branches 4707 } 4708 4709 /** 4710 * Update CRC-32[C] with a byte value according to constants in table 4711 * 4712 * @param [in,out]crc Register containing the crc. 4713 * @param [in]val Register containing the byte to fold into the CRC. 4714 * @param [in]table Register containing the table of crc constants. 4715 * 4716 * uint32_t crc; 4717 * val = crc_table[(val ^ crc) & 0xFF]; 4718 * crc = val ^ (crc >> 8); 4719 */ 4720 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 4721 xor3(val, crc, val); 4722 and3(val, 0xFF, val); 4723 sllx(val, 2, val); 4724 lduw(table, val, val); 4725 srlx(crc, 8, crc); 4726 xor3(val, crc, crc); 4727 } 4728 4729 // Reverse byte order of lower 32 bits, assuming upper 32 bits all zeros 4730 void MacroAssembler::reverse_bytes_32(Register src, Register dst, Register tmp) { 4731 srlx(src, 24, dst); 4732 4733 sllx(src, 32+8, tmp); 4734 srlx(tmp, 32+24, tmp); 4735 sllx(tmp, 8, tmp); 4736 or3(dst, tmp, dst); 4737 4738 sllx(src, 32+16, tmp); 4739 srlx(tmp, 32+24, tmp); 4740 sllx(tmp, 16, tmp); 4741 or3(dst, tmp, dst); 4742 4743 sllx(src, 32+24, tmp); 4744 srlx(tmp, 32, tmp); 4745 or3(dst, tmp, dst); 4746 } 4747 4748 void MacroAssembler::movitof_revbytes(Register src, FloatRegister dst, Register tmp1, Register tmp2) { 4749 reverse_bytes_32(src, tmp1, tmp2); 4750 movxtod(tmp1, dst); 4751 } 4752 4753 void MacroAssembler::movftoi_revbytes(FloatRegister src, Register dst, Register tmp1, Register tmp2) { 4754 movdtox(src, tmp1); 4755 reverse_bytes_32(tmp1, dst, tmp2); 4756 } 4757 4758 void MacroAssembler::fold_128bit_crc32(Register xcrc_hi, Register xcrc_lo, Register xK_hi, Register xK_lo, Register xtmp_hi, Register xtmp_lo, Register buf, int offset) { 4759 xmulx(xcrc_hi, xK_hi, xtmp_lo); 4760 xmulxhi(xcrc_hi, xK_hi, xtmp_hi); 4761 xmulxhi(xcrc_lo, xK_lo, xcrc_hi); 4762 xmulx(xcrc_lo, xK_lo, xcrc_lo); 4763 xor3(xcrc_lo, xtmp_lo, xcrc_lo); 4764 xor3(xcrc_hi, xtmp_hi, xcrc_hi); 4765 ldxl(buf, G0, xtmp_lo); 4766 inc(buf, 8); 4767 ldxl(buf, G0, xtmp_hi); 4768 inc(buf, 8); 4769 xor3(xcrc_lo, xtmp_lo, xcrc_lo); 4770 xor3(xcrc_hi, xtmp_hi, xcrc_hi); 4771 } 4772 4773 void MacroAssembler::fold_128bit_crc32(Register xcrc_hi, Register xcrc_lo, Register xK_hi, Register xK_lo, Register xtmp_hi, Register xtmp_lo, Register xbuf_hi, Register xbuf_lo) { 4774 mov(xcrc_lo, xtmp_lo); 4775 mov(xcrc_hi, xtmp_hi); 4776 xmulx(xtmp_hi, xK_hi, xtmp_lo); 4777 xmulxhi(xtmp_hi, xK_hi, xtmp_hi); 4778 xmulxhi(xcrc_lo, xK_lo, xcrc_hi); 4779 xmulx(xcrc_lo, xK_lo, xcrc_lo); 4780 xor3(xcrc_lo, xbuf_lo, xcrc_lo); 4781 xor3(xcrc_hi, xbuf_hi, xcrc_hi); 4782 xor3(xcrc_lo, xtmp_lo, xcrc_lo); 4783 xor3(xcrc_hi, xtmp_hi, xcrc_hi); 4784 } 4785 4786 void MacroAssembler::fold_8bit_crc32(Register xcrc, Register table, Register xtmp, Register tmp) { 4787 and3(xcrc, 0xFF, tmp); 4788 sllx(tmp, 2, tmp); 4789 lduw(table, tmp, xtmp); 4790 srlx(xcrc, 8, xcrc); 4791 xor3(xtmp, xcrc, xcrc); 4792 } 4793 4794 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { 4795 and3(crc, 0xFF, tmp); 4796 srlx(crc, 8, crc); 4797 sllx(tmp, 2, tmp); 4798 lduw(table, tmp, tmp); 4799 xor3(tmp, crc, crc); 4800 } 4801 4802 #define CRC32_TMP_REG_NUM 18 4803 4804 #define CRC32_CONST_64 0x163cd6124 4805 #define CRC32_CONST_96 0x0ccaa009e 4806 #define CRC32_CONST_160 0x1751997d0 4807 #define CRC32_CONST_480 0x1c6e41596 4808 #define CRC32_CONST_544 0x154442bd4 4809 4810 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table) { 4811 4812 Label L_cleanup_loop, L_cleanup_check, L_align_loop, L_align_check; 4813 Label L_main_loop_prologue; 4814 Label L_fold_512b, L_fold_512b_loop, L_fold_128b; 4815 Label L_fold_tail, L_fold_tail_loop; 4816 Label L_8byte_fold_loop, L_8byte_fold_check; 4817 4818 const Register tmp[CRC32_TMP_REG_NUM] = {L0, L1, L2, L3, L4, L5, L6, G1, I0, I1, I2, I3, I4, I5, I7, O4, O5, G3}; 4819 4820 Register const_64 = tmp[CRC32_TMP_REG_NUM-1]; 4821 Register const_96 = tmp[CRC32_TMP_REG_NUM-1]; 4822 Register const_160 = tmp[CRC32_TMP_REG_NUM-2]; 4823 Register const_480 = tmp[CRC32_TMP_REG_NUM-1]; 4824 Register const_544 = tmp[CRC32_TMP_REG_NUM-2]; 4825 4826 set(ExternalAddress(StubRoutines::crc_table_addr()), table); 4827 4828 not1(crc); // ~c 4829 clruwu(crc); // clear upper 32 bits of crc 4830 4831 // Check if below cutoff, proceed directly to cleanup code 4832 mov(31, G4); 4833 cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_cleanup_check); 4834 4835 // Align buffer to 8 byte boundry 4836 mov(8, O5); 4837 and3(buf, 0x7, O4); 4838 sub(O5, O4, O5); 4839 and3(O5, 0x7, O5); 4840 sub(len, O5, len); 4841 ba(L_align_check); 4842 delayed()->nop(); 4843 4844 // Alignment loop, table look up method for up to 7 bytes 4845 bind(L_align_loop); 4846 ldub(buf, 0, O4); 4847 inc(buf); 4848 dec(O5); 4849 xor3(O4, crc, O4); 4850 and3(O4, 0xFF, O4); 4851 sllx(O4, 2, O4); 4852 lduw(table, O4, O4); 4853 srlx(crc, 8, crc); 4854 xor3(O4, crc, crc); 4855 bind(L_align_check); 4856 nop(); 4857 cmp_and_br_short(O5, 0, Assembler::notEqual, Assembler::pt, L_align_loop); 4858 4859 // Aligned on 64-bit (8-byte) boundry at this point 4860 // Check if still above cutoff (31-bytes) 4861 mov(31, G4); 4862 cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_cleanup_check); 4863 // At least 32 bytes left to process 4864 4865 // Free up registers by storing them to FP registers 4866 for (int i = 0; i < CRC32_TMP_REG_NUM; i++) { 4867 movxtod(tmp[i], as_FloatRegister(2*i)); 4868 } 4869 4870 // Determine which loop to enter 4871 // Shared prologue 4872 ldxl(buf, G0, tmp[0]); 4873 inc(buf, 8); 4874 ldxl(buf, G0, tmp[1]); 4875 inc(buf, 8); 4876 xor3(tmp[0], crc, tmp[0]); // Fold CRC into first few bytes 4877 and3(crc, 0, crc); // Clear out the crc register 4878 // Main loop needs 128-bytes at least 4879 mov(128, G4); 4880 mov(64, tmp[2]); 4881 cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_main_loop_prologue); 4882 // Less than 64 bytes 4883 nop(); 4884 cmp_and_br_short(len, tmp[2], Assembler::lessUnsigned, Assembler::pt, L_fold_tail); 4885 // Between 64 and 127 bytes 4886 set64(CRC32_CONST_96, const_96, tmp[8]); 4887 set64(CRC32_CONST_160, const_160, tmp[9]); 4888 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[2], tmp[3], buf, 0); 4889 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[4], tmp[5], buf, 16); 4890 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[6], tmp[7], buf, 32); 4891 dec(len, 48); 4892 ba(L_fold_tail); 4893 delayed()->nop(); 4894 4895 bind(L_main_loop_prologue); 4896 for (int i = 2; i < 8; i++) { 4897 ldxl(buf, G0, tmp[i]); 4898 inc(buf, 8); 4899 } 4900 4901 // Fold total 512 bits of polynomial on each iteration, 4902 // 128 bits per each of 4 parallel streams 4903 set64(CRC32_CONST_480, const_480, tmp[8]); 4904 set64(CRC32_CONST_544, const_544, tmp[9]); 4905 4906 mov(128, G4); 4907 bind(L_fold_512b_loop); 4908 fold_128bit_crc32(tmp[1], tmp[0], const_480, const_544, tmp[9], tmp[8], buf, 0); 4909 fold_128bit_crc32(tmp[3], tmp[2], const_480, const_544, tmp[11], tmp[10], buf, 16); 4910 fold_128bit_crc32(tmp[5], tmp[4], const_480, const_544, tmp[13], tmp[12], buf, 32); 4911 fold_128bit_crc32(tmp[7], tmp[6], const_480, const_544, tmp[15], tmp[14], buf, 64); 4912 dec(len, 64); 4913 cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_fold_512b_loop); 4914 4915 // Fold 512 bits to 128 bits 4916 bind(L_fold_512b); 4917 set64(CRC32_CONST_96, const_96, tmp[8]); 4918 set64(CRC32_CONST_160, const_160, tmp[9]); 4919 4920 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[3], tmp[2]); 4921 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[5], tmp[4]); 4922 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[7], tmp[6]); 4923 dec(len, 48); 4924 4925 // Fold the rest of 128 bits data chunks 4926 bind(L_fold_tail); 4927 mov(32, G4); 4928 cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_fold_128b); 4929 4930 set64(CRC32_CONST_96, const_96, tmp[8]); 4931 set64(CRC32_CONST_160, const_160, tmp[9]); 4932 4933 bind(L_fold_tail_loop); 4934 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[2], tmp[3], buf, 0); 4935 sub(len, 16, len); 4936 cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_fold_tail_loop); 4937 4938 // Fold the 128 bits in tmps 0 - 1 into tmp 1 4939 bind(L_fold_128b); 4940 4941 set64(CRC32_CONST_64, const_64, tmp[4]); 4942 4943 xmulx(const_64, tmp[0], tmp[2]); 4944 xmulxhi(const_64, tmp[0], tmp[3]); 4945 4946 srl(tmp[2], G0, tmp[4]); 4947 xmulx(const_64, tmp[4], tmp[4]); 4948 4949 srlx(tmp[2], 32, tmp[2]); 4950 sllx(tmp[3], 32, tmp[3]); 4951 or3(tmp[2], tmp[3], tmp[2]); 4952 4953 xor3(tmp[4], tmp[1], tmp[4]); 4954 xor3(tmp[4], tmp[2], tmp[1]); 4955 dec(len, 8); 4956 4957 // Use table lookup for the 8 bytes left in tmp[1] 4958 dec(len, 8); 4959 4960 // 8 8-bit folds to compute 32-bit CRC. 4961 for (int j = 0; j < 4; j++) { 4962 fold_8bit_crc32(tmp[1], table, tmp[2], tmp[3]); 4963 } 4964 srl(tmp[1], G0, crc); // move 32 bits to general register 4965 for (int j = 0; j < 4; j++) { 4966 fold_8bit_crc32(crc, table, tmp[3]); 4967 } 4968 4969 bind(L_8byte_fold_check); 4970 4971 // Restore int registers saved in FP registers 4972 for (int i = 0; i < CRC32_TMP_REG_NUM; i++) { 4973 movdtox(as_FloatRegister(2*i), tmp[i]); 4974 } 4975 4976 ba(L_cleanup_check); 4977 delayed()->nop(); 4978 4979 // Table look-up method for the remaining few bytes 4980 bind(L_cleanup_loop); 4981 ldub(buf, 0, O4); 4982 inc(buf); 4983 dec(len); 4984 xor3(O4, crc, O4); 4985 and3(O4, 0xFF, O4); 4986 sllx(O4, 2, O4); 4987 lduw(table, O4, O4); 4988 srlx(crc, 8, crc); 4989 xor3(O4, crc, crc); 4990 bind(L_cleanup_check); 4991 nop(); 4992 cmp_and_br_short(len, 0, Assembler::greaterUnsigned, Assembler::pt, L_cleanup_loop); 4993 4994 not1(crc); 4995 } 4996 4997 #define CHUNK_LEN 128 /* 128 x 8B = 1KB */ 4998 #define CHUNK_K1 0x1307a0206 /* reverseBits(pow(x, CHUNK_LEN*8*8*3 - 32) mod P(x)) << 1 */ 4999 #define CHUNK_K2 0x1a0f717c4 /* reverseBits(pow(x, CHUNK_LEN*8*8*2 - 32) mod P(x)) << 1 */ 5000 #define CHUNK_K3 0x0170076fa /* reverseBits(pow(x, CHUNK_LEN*8*8*1 - 32) mod P(x)) << 1 */ 5001 5002 void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len, Register table) { 5003 5004 Label L_crc32c_head, L_crc32c_aligned; 5005 Label L_crc32c_parallel, L_crc32c_parallel_loop; 5006 Label L_crc32c_serial, L_crc32c_x32_loop, L_crc32c_x8, L_crc32c_x8_loop; 5007 Label L_crc32c_done, L_crc32c_tail, L_crc32c_return; 5008 5009 set(ExternalAddress(StubRoutines::crc32c_table_addr()), table); 5010 5011 cmp_and_br_short(len, 0, Assembler::lessEqual, Assembler::pn, L_crc32c_return); 5012 5013 // clear upper 32 bits of crc 5014 clruwu(crc); 5015 5016 and3(buf, 7, G4); 5017 cmp_and_brx_short(G4, 0, Assembler::equal, Assembler::pt, L_crc32c_aligned); 5018 5019 mov(8, G1); 5020 sub(G1, G4, G4); 5021 5022 // ------ process the misaligned head (7 bytes or less) ------ 5023 bind(L_crc32c_head); 5024 5025 // crc = (crc >>> 8) ^ byteTable[(crc ^ b) & 0xFF]; 5026 ldub(buf, 0, G1); 5027 update_byte_crc32(crc, G1, table); 5028 5029 inc(buf); 5030 dec(len); 5031 cmp_and_br_short(len, 0, Assembler::equal, Assembler::pn, L_crc32c_return); 5032 dec(G4); 5033 cmp_and_br_short(G4, 0, Assembler::greater, Assembler::pt, L_crc32c_head); 5034 5035 // ------ process the 8-byte-aligned body ------ 5036 bind(L_crc32c_aligned); 5037 nop(); 5038 cmp_and_br_short(len, 8, Assembler::less, Assembler::pn, L_crc32c_tail); 5039 5040 // reverse the byte order of lower 32 bits to big endian, and move to FP side 5041 movitof_revbytes(crc, F0, G1, G3); 5042 5043 set(CHUNK_LEN*8*4, G4); 5044 cmp_and_br_short(len, G4, Assembler::less, Assembler::pt, L_crc32c_serial); 5045 5046 // ------ process four 1KB chunks in parallel ------ 5047 bind(L_crc32c_parallel); 5048 5049 fzero(FloatRegisterImpl::D, F2); 5050 fzero(FloatRegisterImpl::D, F4); 5051 fzero(FloatRegisterImpl::D, F6); 5052 5053 mov(CHUNK_LEN - 1, G4); 5054 bind(L_crc32c_parallel_loop); 5055 // schedule ldf's ahead of crc32c's to hide the load-use latency 5056 ldf(FloatRegisterImpl::D, buf, 0, F8); 5057 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*8, F10); 5058 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*16, F12); 5059 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*24, F14); 5060 crc32c(F0, F8, F0); 5061 crc32c(F2, F10, F2); 5062 crc32c(F4, F12, F4); 5063 crc32c(F6, F14, F6); 5064 inc(buf, 8); 5065 dec(G4); 5066 cmp_and_br_short(G4, 0, Assembler::greater, Assembler::pt, L_crc32c_parallel_loop); 5067 5068 ldf(FloatRegisterImpl::D, buf, 0, F8); 5069 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*8, F10); 5070 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*16, F12); 5071 crc32c(F0, F8, F0); 5072 crc32c(F2, F10, F2); 5073 crc32c(F4, F12, F4); 5074 5075 inc(buf, CHUNK_LEN*24); 5076 ldfl(FloatRegisterImpl::D, buf, G0, F14); // load in little endian 5077 inc(buf, 8); 5078 5079 prefetch(buf, 0, Assembler::severalReads); 5080 prefetch(buf, CHUNK_LEN*8, Assembler::severalReads); 5081 prefetch(buf, CHUNK_LEN*16, Assembler::severalReads); 5082 prefetch(buf, CHUNK_LEN*24, Assembler::severalReads); 5083 5084 // move to INT side, and reverse the byte order of lower 32 bits to little endian 5085 movftoi_revbytes(F0, O4, G1, G4); 5086 movftoi_revbytes(F2, O5, G1, G4); 5087 movftoi_revbytes(F4, G5, G1, G4); 5088 5089 // combine the results of 4 chunks 5090 set64(CHUNK_K1, G3, G1); 5091 xmulx(O4, G3, O4); 5092 set64(CHUNK_K2, G3, G1); 5093 xmulx(O5, G3, O5); 5094 set64(CHUNK_K3, G3, G1); 5095 xmulx(G5, G3, G5); 5096 5097 movdtox(F14, G4); 5098 xor3(O4, O5, O5); 5099 xor3(G5, O5, O5); 5100 xor3(G4, O5, O5); 5101 5102 // reverse the byte order to big endian, via stack, and move to FP side 5103 // TODO: use new revb instruction 5104 add(SP, -8, G1); 5105 srlx(G1, 3, G1); 5106 sllx(G1, 3, G1); 5107 stx(O5, G1, G0); 5108 ldfl(FloatRegisterImpl::D, G1, G0, F2); // load in little endian 5109 5110 crc32c(F6, F2, F0); 5111 5112 set(CHUNK_LEN*8*4, G4); 5113 sub(len, G4, len); 5114 cmp_and_br_short(len, G4, Assembler::greaterEqual, Assembler::pt, L_crc32c_parallel); 5115 nop(); 5116 cmp_and_br_short(len, 0, Assembler::equal, Assembler::pt, L_crc32c_done); 5117 5118 bind(L_crc32c_serial); 5119 5120 mov(32, G4); 5121 cmp_and_br_short(len, G4, Assembler::less, Assembler::pn, L_crc32c_x8); 5122 5123 // ------ process 32B chunks ------ 5124 bind(L_crc32c_x32_loop); 5125 ldf(FloatRegisterImpl::D, buf, 0, F2); 5126 crc32c(F0, F2, F0); 5127 ldf(FloatRegisterImpl::D, buf, 8, F2); 5128 crc32c(F0, F2, F0); 5129 ldf(FloatRegisterImpl::D, buf, 16, F2); 5130 crc32c(F0, F2, F0); 5131 ldf(FloatRegisterImpl::D, buf, 24, F2); 5132 inc(buf, 32); 5133 crc32c(F0, F2, F0); 5134 dec(len, 32); 5135 cmp_and_br_short(len, G4, Assembler::greaterEqual, Assembler::pt, L_crc32c_x32_loop); 5136 5137 bind(L_crc32c_x8); 5138 nop(); 5139 cmp_and_br_short(len, 8, Assembler::less, Assembler::pt, L_crc32c_done); 5140 5141 // ------ process 8B chunks ------ 5142 bind(L_crc32c_x8_loop); 5143 ldf(FloatRegisterImpl::D, buf, 0, F2); 5144 inc(buf, 8); 5145 crc32c(F0, F2, F0); 5146 dec(len, 8); 5147 cmp_and_br_short(len, 8, Assembler::greaterEqual, Assembler::pt, L_crc32c_x8_loop); 5148 5149 bind(L_crc32c_done); 5150 5151 // move to INT side, and reverse the byte order of lower 32 bits to little endian 5152 movftoi_revbytes(F0, crc, G1, G3); 5153 5154 cmp_and_br_short(len, 0, Assembler::equal, Assembler::pt, L_crc32c_return); 5155 5156 // ------ process the misaligned tail (7 bytes or less) ------ 5157 bind(L_crc32c_tail); 5158 5159 // crc = (crc >>> 8) ^ byteTable[(crc ^ b) & 0xFF]; 5160 ldub(buf, 0, G1); 5161 update_byte_crc32(crc, G1, table); 5162 5163 inc(buf); 5164 dec(len); 5165 cmp_and_br_short(len, 0, Assembler::greater, Assembler::pt, L_crc32c_tail); 5166 5167 bind(L_crc32c_return); 5168 nop(); 5169 }